query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Changes the priority of the element elem to prio.
def change_priority(self, elem, prio): pos = self.pos[elem] currPrio = self.A[pos][1] self.A[pos] = (elem, prio) if self.cmpFn(prio, currPrio): self.insert_loop(pos, pos // 2) # Up heapify else: self.combine(pos) # Down heapify
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPriority(self, p):\n self.priority = p", "def _update_priority(self, task, prio, worker):\n task.priority = prio = max(prio, task.priority)\n for dep in task.deps or []:\n t = self._state.get_task(dep)\n if t is not None and prio > t.priority:\n self._update_priority(t, prio, worker)", "def increase_priority(self):\n if self._priority > 0:\n self._priority -= 1", "def _set_priority(self, v, load=False):\n try:\n t = YANGDynClass(v,base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"priority must be of a type compatible with base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def set_priority(self, priority):\n self._priority = priority", "def priority(self, priority):\n self._priority = priority", "def setPriority(self, *args):\n return _libsbml.Event_setPriority(self, *args)", "def get_priority(self, elem):\n pos = self.pos[elem]\n return self.A[pos][1]", "def priority(self, priority):\n\n self._priority = priority", "def priority(self, priority):\n\n self._priority = priority", "def priority(self, priority):\n\n self._priority = priority", "def setpriority(self, pid=None, priority=5):\n\t \n\t import win32api,win32process,win32con\n\t \n\t priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n\t win32process.BELOW_NORMAL_PRIORITY_CLASS,\n\t win32process.NORMAL_PRIORITY_CLASS,\n\t win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n\t win32process.HIGH_PRIORITY_CLASS,\n\t win32process.REALTIME_PRIORITY_CLASS]\n\t if pid == None:\n\t pid = win32api.GetCurrentProcessId()\n\t handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n\t win32process.SetPriorityClass(handle, priorityclasses[priority])", "def set_priority(self, priority):\n self.options[\"priority\"] = priority", "def insert(self, elem, prio):\n self.n += 1\n self.A.append( (e,w) )\n self.pos[e] = self.n\n i = self.n\n p = i // 2\n self.insert_loop(i, p)", "def set_priority(self, priority):\n self.options['priority'] = priority", "def _set_priority(self, args):\n if 'priority' in args:\n try:\n self._priority = float(args['priority'])\n except TypeError:\n raise InvalidPriority('Invalid priority: %s' % args['priority'])\n except ValueError:\n raise InvalidPriority()\n else:\n self._priority = None", "def enqueue(elem: Any, priority: int = 0) -> None:\n\tglobal queue\n\tqueue.append((priority, elem))\n\treturn None", "def change_priority(self, priority, key):\n index = self.__position[key]\n current = self.__heap[index][0]\n self.__heap[index][0] = priority\n\n if priority > current:\n self.__bubble_down(index)\n else:\n self.__bubble_up(index)", "def SetPriorityValue(self, *args, **kwargs):\n pass", "def set_thread_priority(self, priority: \"int\") -> \"int\":\n return _beamforming_swig.doaesprit_sptr_set_thread_priority(self, priority)", "def setpriority(pid=None, priority=1):\n\n #import win32api,win32process,win32con\n from ctypes import windll\n\n priorityclasses = [0x40, # IDLE_PRIORITY_CLASS,\n 0x4000, # BELOW_NORMAL_PRIORITY_CLASS,\n 0x20, # NORMAL_PRIORITY_CLASS,\n 0x8000, # ABOVE_NORMAL_PRIORITY_CLASS,\n 0x80, # HIGH_PRIORITY_CLASS,\n 0x100, # REALTIME_PRIORITY_CLASS\n ]\n if pid is None:\n pid = windll.kernel32.GetCurrentProcessId()\n handle = windll.kernel32.OpenProcess(PROCESS_ALL_ACCESS, True, pid)\n windll.kernel32.SetPriorityClass(handle, priorityclasses[priority])", "def decrease_priority(self):\n self._priority += 1", "def set_priority(priority=2, pid=None):\n print \"TODO: add os independent support\"\n priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n win32process.BELOW_NORMAL_PRIORITY_CLASS,\n win32process.NORMAL_PRIORITY_CLASS,\n win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n win32process.HIGH_PRIORITY_CLASS,\n win32process.REALTIME_PRIORITY_CLASS]\n if pid == None:\n pid = win32api.GetCurrentProcessId()\n handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n win32process.SetPriorityClass(handle, priorityclasses[priority])", "def set_priority(self, job_id, priority):\n job = Job.get_job_by_id(job_id)\n self.access_handler.check_set_priority(job)\n self.master.set_priority(job, priority)", "def _priority_changed(self, priority):\n if self.next is not None:\n self.next.priority = priority", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_fabric_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"fabric_priority must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__fabric_priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_fabric_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"fabric_priority must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__fabric_priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_fabric_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"fabric_priority must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__fabric_priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_fabric_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"fabric_priority must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__fabric_priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_fabric_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"fabric_priority must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__fabric_priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_fabric_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"fabric_priority must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__fabric_priority = t\n if hasattr(self, '_set'):\n self._set()", "def set_thread_priority(self, priority: \"int\") -> \"int\":\n return _beamforming_swig.beamformer_sptr_set_thread_priority(self, priority)", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities): self.tree.val_update(i, float(p**self.alpha))", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p**self.alpha)", "def set_thread_priority(self, priority: \"int\") -> \"int\":\n return _beamforming_swig.phasedarray_sptr_set_thread_priority(self, priority)", "def change_elem(self, elem):\n return _coconut_tail_call(self.__class__, self.var, elem)", "def set_thread_priority(self, priority):\n return _add_vector_swig.add_vector_2_cpp_sptr_set_thread_priority(self, priority)", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)", "def set_io_priority(self, nVmIoPriority):\n\t\tcall_sdk_function('PrlVmCfg_SetIoPriority', self.handle, nVmIoPriority)", "def set_thread_priority(self, priority):\n return _spacegrant_swig.invert_bit_sptr_set_thread_priority(self, priority)", "def set_thread_priority(self, priority):\n return _spacegrant_swig.G3RUH_descramble_sptr_set_thread_priority(self, priority)", "def schedule_low_prio(self, job):\r\n assert(self.c.is_live())\r\n self.low_prio_jobs.put(job)", "def update(self, index, priority=-1):\n if (priority == -1):\n priority = self._max_priority\n elif (priority > self._max_priority):\n self._max_priority = priority\n\n # Search for index\n node = self.findIndex(index)\n\n # Replace with new priority\n diff = priority - node.priority\n node.priority = priority\n\n # Update value\n self._updateValue(node.parent, diff)", "def update(self, idx: int, new_priority: T.Union[int, float]):\n old_priority, item = self.__heap[idx]\n self.__heap[idx] = (new_priority, item)\n\n if new_priority < old_priority:\n self.__sift_up(idx)\n else:\n self.__sift_down(idx)", "def priority_option(args, run):\n try:\n priority = float(args)\n except ValueError:\n raise ValueError(\n \"The PRIORITY argument must be a number! (but was '{}')\".format(args)\n )\n run.meta_info[\"priority\"] = priority", "async def setIncident_priority(\n self,\n eventID: str,\n incidentNumber: int,\n priority: IncidentPriority,\n author: str,\n ) -> None:", "def schedule_update_priority(self, func, pri, *args, **kwargs):\n self.unschedule(func)\n new_item = _Item(func, pri, args, kwargs)\n for i,sched in enumerate(self.update_schedules):\n if sched.pri > new_item.pri:\n self.update_schedules.insert(i, new_item)\n return\n self.update_schedules.append(new_item)", "def update_priorities(self, idxes, priorities):\n assert len(idxes) == len(priorities)\n for idx, priority in zip(idxes, priorities):\n assert priority > 0\n assert 0 <= idx < len(self._storage)\n delta = priority**self._alpha - self._it_sum[idx]\n self._prio_change_stats.push(delta)\n self._it_sum[idx] = priority**self._alpha\n self._it_min[idx] = priority**self._alpha\n\n self._max_priority = max(self._max_priority, priority)", "def priority(node):\n return node.priority", "def _update(self, priority, key):\n i = self._index[key]\n item = self._heap[i]\n old_priority = item.priority\n item.priority = priority\n if priority < old_priority:\n self._sift_up(i)\n else:\n self._sift_down(i)", "def set_thread_priority(self, priority):\n return _spacegrant_swig.NRZI_sptr_set_thread_priority(self, priority)", "def set_thread_priority(self, priority):\n return _spacegrant_swig.general_burster_2_sptr_set_thread_priority(self, priority)", "def set_thread_priority(self, priority):\n return _spacegrant_swig.DeNRZI_sptr_set_thread_priority(self, priority)", "def set_thread_priority(self, priority):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_set_thread_priority(self, priority)", "def set_sort_priority(self, priority):\n self.__sorting_priority = priority", "def update_priorities(self, idxes, priorities):\n assert len(idxes) == len(priorities)\n for idx, priority in zip(idxes, priorities):\n assert priority > 0\n assert 0 <= idx < len(self._storage)\n self._it_sum[idx] = priority ** self._alpha\n self._it_min[idx] = priority ** self._alpha\n\n self._max_priority = max(self._max_priority, priority)", "def priority(self):\n pass # pragma: no cover", "def priority(self):\n pass # pragma: no cover", "def set_instruction_priority(kernel, insn_match, priority):\n\n def set_prio(insn):\n return insn.copy(priority=priority)\n\n return map_instructions(kernel, insn_match, set_prio)", "def set_priority(self, new_prio):\n if Priority.MIN_PRIORITY <= new_prio <= Priority.MAX_PRIORITY:\n self.__priority = new_prio\n LOG(msg='New priority value has been assigned. Priority=%d' % (self.__priority))\n return True\n\n LOG(msg='Given priority value is not within the range of [%d, %d].' % (Priority.MIN_PRIORITY, Priority.MAX_PRIORITY), log=Logs.ERROR)\n return False", "def set_thread_priority(self, priority):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_set_thread_priority(self, priority)", "def turn_priority(self):\n raise NotImplementedError(\"turn_priority() was not implemented in a subclass of TurnListItem.\")", "def update_priorities(self, idxes, priorities):\n assert len(idxes) == len(priorities)\n for ndx, priority in zip(idxes, priorities):\n assert priority > 0\n assert 0 <= ndx < len(self.memory)\n self.iter_sum[ndx] = priority ** self.alpha\n self.iter_min[ndx] = priority ** self.alpha\n\n self.max_p = max(self.max_p, priority)", "def set_thread_priority(self, priority: \"int\") -> \"int\":\n return _beamforming_swig.randomsampler_sptr_set_thread_priority(self, priority)", "def SetPriority(self, priority=1, interruptMenuAfter=3, timeoutAfter=2):\n self.ListenToMenu(interruptMenuAfter) # listen to 'To sent with normal priority...'\n self.SipPhone.SendDTMF(str(priority))\n self.ListenToMenu(timeoutAfter) # listen to 'Message Sent'\n mailbox = self.getMailBoxDN()\n mailbox.SetPriority(int(priority))\n time.sleep(1)\n for owner in mailbox.owners:\n owner.CheckMWI()", "def set_thread_priority(self, priority):\n return _spacegrant_swig.hdlc_framer_sptr_set_thread_priority(self, priority)", "def set_thread_priority(self, priority):\n return _spacegrant_swig.ax25_pdu_packer_sptr_set_thread_priority(self, priority)", "def __init__(self, priority: Any, val: Any):\n self.priority = priority\n self.value = val", "def set_thread_priority(self, priority):\n return _spacegrant_swig.binary_sink_sptr_set_thread_priority(self, priority)", "def thread_priority(self) -> \"int\":\n return _beamforming_swig.doaesprit_sptr_thread_priority(self)", "def set_normal_priority(self, cr, uid, ids, context=None):\n return self.set_priority(cr, uid, ids, '3')", "def insert(self, id, priority):\n self.n += 1\n i = self.n\n while i > 1:\n pIdx = int(i/2)\n p = self.elements[pIdx]\n\n if priority > p[PRIORITY]:\n break\n self.elements[i] = list(p)\n self.positions[p[ID]] = 1\n i = pIdx\n\n self.elements[i][ID] = id\n self.elements[i][PRIORITY] = priority\n self.positions[id] = i", "def set_thread_priority(self, priority):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr_set_thread_priority(self, priority)", "def change_priority(self, index, new_key):\n # if index is within array\n if index < len(self):\n old_key = self._data[index].get_key()\n self._data[index].set_key(new_key)\n\n # if new key greater percolate down\n if new_key > old_key:\n self.percolate_down(index)\n # if new key is less than percolate up\n elif new_key < old_key:\n self.percolate_up(index)", "def push(self, elt):\n if len(self._queue) == 0: self._queue.append(elt); return\n for i in range(len(self._queue)):\n if self._queue[i].priority < elt.priority:\n self._queue.insert(i, elt)\n return\n #if we get here, elt is lower than all the other procs in the queue, so\n #just append it\n self._queue.append(elt)", "def set_thread_priority(self, priority):\n return _spacegrant_swig.hdlc_deframer_sptr_set_thread_priority(self, priority)", "def update_collection_priority(self, collid, prid):\n # cond = SQLBinaryExpr(collid, OP_EQ, collid)\n cond = SQLBinaryExpr(COL_NAME_COLL_COLLID, OP_EQ, collid)\n self.update_generic_data({COL_NAME_COLL_PRID: prid}, TABLE_NAME_COLL, cond)", "def thread_priority(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_thread_priority(self)", "def set_thread_priority(self, *args, **kwargs):\n return _TestA_swig.cleanslate_sptr_set_thread_priority(self, *args, **kwargs)", "def set_thread_priority(self, *args, **kwargs):\n return _TestA_swig.my_qpsk_demod_cb_sptr_set_thread_priority(self, *args, **kwargs)", "def delete_and_update_priority(self):\r\n for pbi in PBI.objects.filter(priority__gt=self.priority, project=self.project):\r\n pbi.priority -= 1\r\n pbi.save()\r\n\r\n self.delete()", "def set_thread_priority(self, priority):\n return _spacegrant_swig.message_debug_sptr_set_thread_priority(self, priority)", "def set_thread_priority(self, priority):\n return _spacegrant_swig.udp_debug_sptr_set_thread_priority(self, priority)", "def priority(name):\n try:\n manager = Actions()\n priority = Priority[name]\n ordered_tasks = manager.order_by_priority(priority)\n click.echo(\"Ordered by priority:\" + click.style(name, bg='red', fg='white'))\n click.echo()\n console_utils.format_print_ordered(ordered_tasks)\n except IndexError as e:\n click.echo(\"IndexError: \"+e)\n except Exception as e:\n click.echo(e)", "def schedule_frame_priority(self, func, pri, *args, **kwargs):\n self.unschedule(func)\n new_item = _Item(func, pri, args, kwargs)\n for i,sched in enumerate(self.frame_schedules):\n if sched.pri > new_item.pri:\n self.frame_schedules.insert(i, new_item)\n return\n self.frame_schedules.append(new_item)", "def set_thread_priority(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_set_thread_priority(self, *args, **kwargs)", "def getPriority(self):", "def set_thread_priority(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_thread_priority(self, *args, **kwargs)", "def enqueue(self, item, priority):\n # TODO: Insert given item into heap\n ...", "def put_elem(self, elem):\n serialized_elem = self.serialize_elem(elem)\n self.redis_client.lpush(self.buffer_name, serialized_elem)", "def updatePriority(self, xDest, yDest):\n self.priority = self.distance + self.estimate(xDest, yDest)", "def add_priority(self, entity_type, obj_list, comp_name=None, priority=3):\n i = priority\n objects = \", \".join(obj_list)\n args = [\"NAME:UpdatePriorityListData\"]\n if entity_type == 1:\n prio = [\n \"NAME:PriorityListParameters\",\n \"EntityType:=\",\n \"Object\",\n \"EntityList:=\",\n objects,\n \"PriorityNumber:=\",\n i,\n \"PriorityListType:=\",\n \"3D\",\n ]\n self._priorities_args.append(prio)\n args += self._priorities_args\n elif entity_type == 2:\n pcblist = self.modeler.oeditor.Get3DComponentInstanceNames(comp_name)\n prio = [\n \"NAME:PriorityListParameters\",\n \"EntityType:=\",\n \"Component\",\n \"EntityList:=\",\n pcblist[0],\n \"PriorityNumber:=\",\n i,\n \"PriorityListType:=\",\n \"3D\",\n ]\n self._priorities_args.append(prio)\n args += self._priorities_args\n self.modeler.oeditor.UpdatePriorityList([\"NAME:UpdatePriorityListData\"])\n self.modeler.oeditor.UpdatePriorityList(args)\n return True", "def set(self, value, priority):\n\t\tif priority >= self.priority:\n\t\t\tif isinstance(self.value, BaseSettings):\n\t\t\t\tvalue = BaseSettings(value, priority=priority)\n\t\t\tself.value = value\n\t\t\tself.priority = priority", "def _priority_order(id_priority_list, elem):\n assert isinstance(id_priority_list, list)\n # match id types with id priority\n for index, id_elem in enumerate(id_priority_list):\n if elem == id_elem:\n return index\n # the id type is not in id_priority_list so it will be placed last\n return len(id_priority_list) + 1", "def update_priorities(self,indexes, priorities):\r\n\r\n assert len(indexes) == len(priorities)\r\n for index, priority in zip(indexes, priorities):\r\n assert priority > 0 and 0 <= index < len(self.buffer)\r\n self._it_sum[index] = priority ** self._alpha\r\n self._it_min[index] = priority ** self._alpha\r\n self._max_priority = max(self._max_priority, priority)", "def set(self, name, value, priority=\"project\"):\n\t\tself._assert_mutability()\n\t\tpriority = get_settings_priority(priority)\n\t\t# print( name )\n\t\t# print( self )\n\t\t# sys.exit(3)\n\t\tif name not in self:\n\t\t\tif isinstance(value, SettingsAttribute):\n\t\t\t\tself.attributes[name] = value\n\t\t\telse:\n\t\t\t\tself.attributes[name] = SettingsAttribute(value, priority)\n\t\telse:\n\t\t\tself.attributes[name].set(value, priority)" ]
[ "0.67719734", "0.6752105", "0.6318923", "0.6283811", "0.6121634", "0.61172235", "0.6071931", "0.6042161", "0.5969836", "0.5969836", "0.5969836", "0.5935215", "0.5908808", "0.59068906", "0.5899444", "0.5884699", "0.5880411", "0.58484524", "0.58209383", "0.5818788", "0.5729039", "0.5682143", "0.5634272", "0.5613425", "0.5603264", "0.5583622", "0.5583622", "0.5583622", "0.55167353", "0.55167353", "0.55167353", "0.551516", "0.551516", "0.551516", "0.54652095", "0.54611635", "0.5424333", "0.5415442", "0.54026574", "0.5400078", "0.53904146", "0.53904146", "0.5372575", "0.53648674", "0.53411496", "0.5340203", "0.5334486", "0.53326964", "0.5262129", "0.52561134", "0.52502006", "0.5232322", "0.5200582", "0.51961535", "0.5187928", "0.5174766", "0.5142196", "0.50864506", "0.5080833", "0.5062957", "0.50548136", "0.50548136", "0.50520885", "0.50217307", "0.5019178", "0.50164104", "0.5012107", "0.5010751", "0.50084174", "0.5005865", "0.49906927", "0.49872777", "0.49651346", "0.4951656", "0.49472037", "0.49353698", "0.49181616", "0.49174163", "0.49173704", "0.48935446", "0.48885512", "0.48599005", "0.4835646", "0.48246694", "0.48102662", "0.48073426", "0.4785541", "0.47795755", "0.4772653", "0.4768282", "0.47574762", "0.47472134", "0.47457993", "0.474569", "0.4713451", "0.47125748", "0.47124615", "0.4676058", "0.4656486", "0.46559545" ]
0.81994
0
Gets the priority of an element.
def get_priority(self, elem): pos = self.pos[elem] return self.A[pos][1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getpriority(self, name):\n\t\tif name not in self:\n\t\t\treturn None\n\t\treturn self.attributes[name].priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def get_priority(self):\n return self._priority", "def get_priority(self):\n return self._priority", "def priority(node):\n return node.priority", "def get_priority(self, item):\n try:\n return self.set[item][0]\n except KeyError:\n print(\"Can't get priority of non-existing item\")", "def priority(self):\n return self._pri", "def get_priority(self):\n return self.options[\"priority\"]", "def priority(self) -> int:\n return pulumi.get(self, \"priority\")", "def getPriority(self):\n return self.priority", "def get_priority(self):\n return self.options['priority']", "def priority(self):\n return self._priority", "def priority(self):\n return self._priority", "def priority(self):\n return self._priority", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> str:\n return pulumi.get(self, \"priority\")", "def getPriority(self, *args):\n return _libsbml.Event_getPriority(self, *args)", "def getPriority(self):", "def get_priority(self):\n return str(self.priority)", "def get_priority(self):\n priorities = dict(PRIORITY_CHOICES)\n return priorities.get(self.priority, \"N/A\")", "def get_priority(self):\n priority_value = (self.__priority if Priority.IMPORTANCE_RANKING == PriorityRanking.DESCENDING else Priority.MAX_PRIORITY-self.__priority)\n return 2 * priority_value", "def find_priority(x):\n pat = r\"priority\\s*(\\d*)\"\n result = re.search(pat, str(x), flags=re.IGNORECASE)\n if result:\n return int(result.group(1))", "def fetch_operators_priority(self, operator):\n priority = self.operators_dict[operator]['priority']\n return priority", "def total_priority(self) -> int:\n return self.tree[0].item()", "def GetPriorityValue(self, *args, **kwargs):\n pass", "def _calc_priority(self) -> Union[None, int]:\n for priority, pattern in enumerate(TABLE_OF_PRECEDENCE):\n if re.match(pattern, self.id):\n return priority", "def thread_priority(self) -> \"int\":\n return _beamforming_swig.doaesprit_sptr_thread_priority(self)", "def _total_priority(self):\n return self.nodes[0]", "def sampling_priority(self):\n # type: () -> Optional[NumericType]\n return self._metrics.get(SAMPLING_PRIORITY_KEY)", "def _get_fabric_priority(self):\n return self.__fabric_priority", "def _get_fabric_priority(self):\n return self.__fabric_priority", "def _get_fabric_priority(self):\n return self.__fabric_priority", "def _get_fabric_priority(self):\n return self.__fabric_priority", "def _get_fabric_priority(self):\n return self.__fabric_priority", "def _get_fabric_priority(self):\n return self.__fabric_priority", "def vm_priority(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vm_priority\")", "def effective_priority (self):\n return self.priority if self.match.is_wildcarded else (1<<16) + 1", "def thread_priority(self) -> \"int\":\n return _beamforming_swig.beamformer_sptr_thread_priority(self)", "def getElementName(self):\n return _libsbml.Priority_getElementName(self)", "def priority(self):\n # type: () -> GroupPriority\n if self._priority is not None:\n return self._priority\n if self.group_name == get_constant(\"MAGPIE_ANONYMOUS_GROUP\"):\n self._priority = -1 # lowest of all for *special* public group\n elif self.group_name == get_constant(\"MAGPIE_ADMIN_GROUP\"):\n self._priority = math.inf # everything will be lower than admins\n else:\n self._priority = 0 # nothing can be lower/equal to anonymous, equal for any *generic* group\n return self._priority", "def VlanPriority(self):\n if self.force_auto_sync:\n self.get('VlanPriority')\n return self._VlanPriority", "def get_priority(self) -> str:\n if self.health >= 75 and self.food >= 75 and self.water >= 75:\n if min(self.food, self.water) == self.food:\n return 'food'\n else:\n return 'water'\n else:\n if self.food >= 75 and self.water >= 75:\n return 'monster'\n else:\n return 'food'", "def thread_priority(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_thread_priority(self)", "def get_foreground_priority(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetForegroundPriority', self.handle)", "def GetTaskPriority(cls, task):\n if not task: return None\n\n task = cls.TaskRelativeName(task)\n\n priority = ''\n parts = task.split(os.sep)\n for part in parts:\n priority_name = part.split('_', 1)\n if len(priority_name) < 2 or not priority_name[0].isdigit(): return None\n priority += priority_name[0]\n return priority", "def active_thread_priority(self) -> \"int\":\n return _beamforming_swig.doaesprit_sptr_active_thread_priority(self)", "def get_sort_priority(self):\n return self.__sorting_priority", "def getPriority(self, levelname):\n level = self.levelMap.get(levelname, syslog.LOG_INFO)\n priority = level | self.facility\n return priority", "def thread_priority(self) -> \"int\":\n return _beamforming_swig.phasedarray_sptr_thread_priority(self)", "def _get_prio_gen ( self, event_name ):\n return self._prio_gen", "def get_priority(rules):\n\n priorities = [rule['Priority'] for rule in rules]\n i = 1\n rule_priority = None\n while not rule_priority: # increment from 1 onwards until we find a priority that is unused\n if not str(i) in priorities:\n return i\n else:\n i = i + 1", "def priority(s):\n if hasattr(s, '_priority') and isinstance( s._priority, int ):\n return s._priority\n if type(s) in (list, tuple, set, frozenset):\n return 6\n if type(s) is dict:\n return 5\n if hasattr(s, 'validate'):\n return 4\n if issubclass(type(s), type):\n return 3\n if callable(s):\n return 2\n else:\n return 1", "def get_io_priority(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetIoPriority', self.handle)", "def low_priority(self):\n\n return self.filter(priority='3')", "def high_priority(self):\n\n return self.filter(priority='1')", "def thread_priority(self):\n return _TestA_swig.my_qpsk_demod_cb_sptr_thread_priority(self)", "def turn_priority(self):\n raise NotImplementedError(\"turn_priority() was not implemented in a subclass of TurnListItem.\")", "def thread_priority(self):\n return _spacegrant_swig.general_burster_2_sptr_thread_priority(self)", "def priority(self):\n pass # pragma: no cover", "def priority(self):\n pass # pragma: no cover", "def get_background_priority(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetBackgroundPriority', self.handle)", "def thread_priority(self):\n return _spacegrant_swig.DeNRZI_sptr_thread_priority(self)", "def min(self):\r\n if self.is_empty():\r\n raise Exception('Priority queue is empty.')\r\n item = self._data[0]\r\n return item._key", "def active_thread_priority(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_active_thread_priority(self)", "def min(self):\r\n if self.is_empty():\r\n raise Empty(\"Priority queue is empty.\")\r\n p = self.data.first()\r\n item = p.element()\r\n return item.key, item.value", "def get_settings_priority(priority):\n\tif isinstance(priority, six.string_types):\n\t\treturn SETTINGS_PRIORITIES[priority]\n\telse:\n\t\treturn priority", "def thread_priority(self):\n return _TestA_swig.cleanslate_sptr_thread_priority(self)", "def get_first_product_by_priority(self):\n products = self.products.filter(type=\"S\").order_by(\"billing_priority\")\n if products.exists():\n return products.first()\n else:\n return None", "def decode_priority_value(byte_iter):\n priorities = {128: 'Low', 129: 'Normal', 130: 'High'}\n\n byte = byte_iter.preview()\n if byte in priorities:\n byte = byte_iter.next()\n return priorities[byte]\n\n byte_iter.reset_preview()\n raise wsp_pdu.DecodeError('Error parsing Priority value '\n 'for byte: %s' % byte)", "def get_highest_priority(self):\n for i in self.query.index.values.tolist():\n if not int(self.query.loc[i,'in_%s'%self.program]):\n pick = self.query.loc[i]\n break\n return pick", "def _priority_order(id_priority_list, elem):\n assert isinstance(id_priority_list, list)\n # match id types with id priority\n for index, id_elem in enumerate(id_priority_list):\n if elem == id_elem:\n return index\n # the id type is not in id_priority_list so it will be placed last\n return len(id_priority_list) + 1", "def medium_priority(self):\n\n return self.filter(priority='2')", "def thread_priority(self):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr_thread_priority(self)", "def thread_priority(self):\n return _spacegrant_swig.invert_bit_sptr_thread_priority(self)", "def thread_priority(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_thread_priority(self)", "def thread_priority(self):\n return _spacegrant_swig.NRZI_sptr_thread_priority(self)", "def active_thread_priority(self) -> \"int\":\n return _beamforming_swig.beamformer_sptr_active_thread_priority(self)", "def min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n p = self._data.first()\n item = p.element()\n return (item._key, item._value)", "def peek(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n return self.priority_queue[nextkey][0]\n else:\n raise IndexError(\"There's nothing in your queue\")", "def thread_priority(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_thread_priority(self)", "def get_priority(wordset):\n return -len(wordset)", "def job_priority_key(self, job):\n if not self._stats.total_usage:\n fairshare = 1\n else:\n user = job.user\n effective = user.cpu_clock_used / self._stats.total_usage\n #shares_norm = user.shares # already normalized\n fairshare = 2.0 ** -(effective / user.shares)\n prio = int(fairshare * 100000) # higher value -> higher priority\n # TODO if needed change the constant to a configuration setting\n # TODO and add more components to the priority value\n return (-prio, job.submit, job.ID)", "def thread_priority(self):\n return _uhd_swig.usrp_sink_sptr_thread_priority(self)", "def get_priority(self):\n return False", "def createPriority(self):\n return _libsbml.Event_createPriority(self)", "def priorities(self):\n return self._get(\"priorities\").json()", "def thread_priority(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_thread_priority(self)", "def scale_set_priority(self) -> Optional[pulumi.Input[Union[str, 'ScaleSetPriority']]]:\n return pulumi.get(self, \"scale_set_priority\")", "def thread_priority(self):\n return _spacegrant_swig.hdlc_framer_sptr_thread_priority(self)", "def active_thread_priority(self):\n return _spacegrant_swig.DeNRZI_sptr_active_thread_priority(self)", "def active_thread_priority(self):\n return _TestA_swig.cleanslate_sptr_active_thread_priority(self)", "def active_thread_priority(self):\n return _spacegrant_swig.general_burster_2_sptr_active_thread_priority(self)" ]
[ "0.7882315", "0.7649342", "0.7649342", "0.7649342", "0.7649342", "0.7647876", "0.7647876", "0.7504323", "0.74972016", "0.7453977", "0.74093306", "0.74017596", "0.73951834", "0.73866266", "0.7357312", "0.7357312", "0.7357312", "0.72419477", "0.72419477", "0.72419477", "0.72419477", "0.7177661", "0.7177661", "0.7129124", "0.70402867", "0.7033402", "0.6959805", "0.6786755", "0.6735806", "0.6654902", "0.6537961", "0.6511174", "0.64912164", "0.64216787", "0.6400391", "0.62842065", "0.6268135", "0.6262835", "0.6262835", "0.6262835", "0.6262835", "0.6262835", "0.6262835", "0.6172685", "0.6146422", "0.6127946", "0.6125679", "0.6105501", "0.60926837", "0.60375494", "0.60187703", "0.5982709", "0.5975791", "0.5966403", "0.59653157", "0.59491897", "0.59443295", "0.5942421", "0.5942323", "0.59055406", "0.5901415", "0.5875621", "0.5861292", "0.5854385", "0.58408654", "0.58250767", "0.58119804", "0.58119804", "0.5810094", "0.58018196", "0.57986706", "0.5791536", "0.5788248", "0.5778119", "0.57768023", "0.5775744", "0.577214", "0.57683057", "0.5752529", "0.57374865", "0.57045937", "0.569367", "0.56843996", "0.56725156", "0.5668458", "0.5658804", "0.56313956", "0.56106216", "0.5607007", "0.5589178", "0.55866534", "0.5583706", "0.5576278", "0.5576207", "0.5571753", "0.5555696", "0.5554244", "0.55516803", "0.5544557", "0.5536546" ]
0.8585831
0
Gets the minimum element of the heap.
def min(self): return self.get_first()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findmin(self):\n return self.heap[0] if len(self.heap) > 0 else None", "def min(self):\r\n if self._size == 0: raise KeyError # Nothing to return if heap empty\r\n return self._data[0] # so simple!\r", "def get_min(self) -> object:\n if not self.is_empty():\n return self.heap.get_at_index(0)\n else:\n raise MinHeapException", "def get_min(self) -> object:\n if self.is_empty()==True:\n return None\n return self.heap.get_at_index(0)", "def min(self):\n return self.heap[1]", "def extractMinimum(self):\n\n return self.heap[1]", "def get_min(h: Heap) -> Node:\n prev, curr = _min(h)\n return curr", "def extractMin(self):\n if not self.heap:\n raise IndexError(\"there is no root\")\n elif len(self.heap) < 2:\n return self.heap.pop()\n else:\n self.heap[0], oldMin = self.heap.pop(), self.heap[0]\n self._shiftDown()\n return oldMin", "def extract_min(self):\n if self.is_empty():\n raise ValueError(\"Priority queue is empty\")\n\n edge_tuple = heapq.heappop(self.__heap)\n ew = edge_tuple[1]\n return ew.edge()", "def extractmin(self):\n if len(self.heap) == 0: \n return None\n i = self.heap[0]\n last = self.heap[-1]\n del self.heap[-1]\n if len(self.heap) > 0:\n self.siftdown(last, 0)\n return i", "def min(self):\n return tuple(self.__heap[0])", "def _find_min(self):\n if self.is_empty(): # is_empty inherited from base class\n raise Empty('Priority queue is empty')\n small = self._data.first()\n walk = self._data.after(small)\n while walk is not None:\n if walk.element() < small.element():\n small = walk\n walk = self._data.after(walk)\n return small", "def extract_min(self):\r\n if self.is_empty():\r\n return None\r\n min_elem = self.heap_array[0]\r\n aux_elem = self.heap_array.pop()\r\n\r\n if self.is_empty() == False:\r\n self.heap_array[0] = aux_elem\r\n\r\n current_index = 0\r\n left_child_index = (2 * current_index) + 1\r\n current_value = self.heap_array[current_index]\r\n\r\n while left_child_index < len(self.heap_array): # loop that will repeat until no violation of the minheap properties exist\r\n current_min = current_value\r\n\r\n for i in range(2): # this loop is in place so that both children are compared and the smaller of the two is chosen \r\n if (left_child_index + i) > len(self.heap_array)-1: # condition to avoid out of bounds\r\n continue\r\n else:\r\n if int(self.heap_array[left_child_index + i]) < int(current_min): # if child is smaller than parent\r\n current_min = self.heap_array[left_child_index + i ] # set current minimum value\r\n current_min_index = left_child_index + i # and cureent minimim index( index where current minimum value is found )\r\n if current_min == current_value: # if no property is broken (in this case, the parent is actually less than its' children)\r\n break\r\n else: # if propert is broken\r\n self.heap_array[current_index], self.heap_array[current_min_index] = self.heap_array[current_min_index], self.heap_array[current_index] # swap the elements \r\n current_index = current_min_index\r\n left_child_index = int((2 * current_index) + 1)\r\n return min_elem", "def find_min_in_max_heap(self):\n min_number = None\n last_parent = (self.size - 1) // 2\n first_leaf = last_parent + 1\n # Shortcut to find first_leaf:\n # (self.size - 1) //2 + 1 = (self.size + 1) // 2\n # But for simplicity, we will use variable first_leaf in steps\n for index in range(first_leaf, self.size):\n if min_number is None:\n min_number = self.heap[index]\n else:\n min_number = min(min_number, self.heap[index])\n\n return min_number", "def peek_min(self):\n if self.root:\n return self.root.min().value\n raise ValueError(\"cannot perform peek_min on an empty tree\")", "def delete_min(self):\n #The length is 1 because the heap list was initialized with 0\n if len(self.heap_list) == 1:\n return \"Empty heap.\"\n\n #Store the min value of the heap\n top = self.heap_list[1]\n\n #Move the last value of the heap to the top\n self.heap_list[1] = self.heap_list[self.current_size]\n\n #Pop the last value from the heap (that was moved to the top)\n *self.heap_list, _ = self.heap_list\n\n # Decrease the size of the heap\n self.current_size -= 1\n\n #Move down the top value to the appropriate position (following the definition of a min heap)\n #The value is at index 1 since the heap list was initialized with 0) \n self.sift_down(1)\n\n #Return the min value of the heap\n return top", "def smallest (self):\n return self.pointers[0].smallest()", "def peek(self):\n heaps = self.priorities\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n if len(heap_list) == 0:\n self._remove_key()\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n return heap_list[0]", "def get_min(self):\n\t\tif self.left:\n\t\t\treturn self.left.get_min()\n\t\treturn self.value", "def front(self):\n if self.size() < 1:\n return None\n else:\n # TODO: Return min item from heap, if any\n ...", "def pop_smallest(self):\n smallest = heapq.heappop(self.heap)\n del self.set[smallest]\n return smallest", "def peek(self):\n if self.is_empty():\n raise ValueError(\"Heap is empty\")\n return self._heap[0]", "def find_min(self):\n return min(self.nodes, key=int)", "def remove_min(self):\r\n # Should raise an exception of size is 0...\r\n if self._size == 0: raise KeyError # Can't remove from an empty heap\r\n result = self._data[0] # remember the smallest\r\n self._data[0] = None # None is so we don't have a reference.\r\n self._size -= 1 # don't forget we have one less\r\n # bring the last to the front and stick the None at the end\r\n self.swap(0, self._size)\r\n # and let the item inserted at the front \"drift down\"\r\n self.down_heap(0)\r\n return result # finally return what was the minimum\r", "def pop(self):\n if self.heap == [0]:\n raise EmptyHeapException('Heap is empty.')\n self.heap[1], self.heap[-1] = self.heap[-1], self.heap[1]\n minimum = self.heap[-1] # Store min val to return later\n self.heap = self.heap[:-1] # Remove final element\n self._percolate_down(1)\n return minimum", "def min(self):\r\n if self.is_empty():\r\n raise Exception('Priority queue is empty.')\r\n item = self._data[0]\r\n return item._key", "def min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n p = self._data.first()\n item = p.element()\n return (item._key, item._value)", "def find_smallest(self, i):\n\n left_child = self.left_child(i)\n\n right_child = self.right_child(i)\n\n if left_child and (self.heap[left_child][1] < self.heap[i][1]):\n\n smallest = left_child\n\n else:\n\n smallest = i\n\n if right_child and (self.heap[right_child][1] < self.heap[smallest][1]):\n\n smallest = right_child\n\n return smallest", "def pop_smallest(self):\n values = [item[0] for item in self.items] #list of the values\n #values = L[:]\n heapq.heapify(values)\n smallest = heapq.heappop(values)#not forgetting heapq.heapify(values)\n #directly writing t = heapq.heappop([4,2,4]) would result in t = 4\n i = self.getItemByValue(smallest)\n self.items.remove(i)\n return i[1]", "def find_smallest(self):\n return self._find_smallest(self.root)", "def peek(self):\n assert len(self.heap) > 0, \"ERROR: Heap is empty.\"\n return self.heap[0]", "def find_min(self):\n return self.min", "def find_min(self):\n return self.min", "def getMin(self) -> int:\n return self.minStack[-1]", "def find_min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current", "def get_min(self):\n if not self:\n return None\n return self.left.get_min() if self.left else self.value #Ternarary Operator", "def min(self):\r\n if self.is_empty():\r\n raise Empty(\"Priority queue is empty.\")\r\n p = self.data.first()\r\n item = p.element()\r\n return item.key, item.value", "def take_min(self):\n return self.get_first()", "def peek_first(self):\n if len(self._heap) == 0:\n return None\n else:\n return self._heap[0]", "def find_min(self):\n\n if self.left:\n return self.left.find_min()\n\n return self.data", "def findMin(self):\n curr = self\n while curr.hasLeftChild():\n curr = curr.leftChild\n return curr", "def delete_min(self):\n\n self.switch(0, -1)\n\n min = self.heap.pop(-1)\n\n self.bubble_down(0)\n\n return min", "def minchild(self, pos):\n minpos = minkey = None\n for c in self.children(pos):\n if minkey == None or self.heap[c].key < minkey:\n minkey, minpos = self.heap[c].key, c\n return minpos", "def remove_min(self) -> object:\n if self.is_empty() == True:\n raise MinHeapException\n\n # minimum value to be returned\n min_val = self.get_min()\n\n # get last index\n end = self.heap.length() - 1\n\n # root index\n root = 0\n\n # swap first and last nodes and remove last value\n self.heap.swap(root, end)\n self.heap.pop()\n\n # length\n length = self.heap.length()\n\n # left index and right index\n left_i = (2 * root) + 1\n right_i = (2 * root) + 2\n\n # if heap has only one value\n if left_i > length - 1:\n return min_val\n\n # if heap has only left child\n if right_i > length - 1:\n if self.heap.get_at_index(left_i) < self.heap.get_at_index(root):\n self.heap.swap(left_i, root)\n return min_val\n else:\n return min_val\n\n # percolate down heap\n while left_i < length and right_i < length:\n replace_val = self.heap.get_at_index(root)\n left_child = self.heap.get_at_index(left_i)\n right_child = self.heap.get_at_index(right_i)\n\n # find index to swap nodes and check that a node exists\n if self.find_replacement(left_i, right_i, left_child, right_child, replace_val):\n node = self.find_replacement(\n left_i, right_i, left_child, right_child, replace_val)\n\n # swap nodes, set new root and child indices\n self.heap.swap(root, node)\n root = node\n left_i = (node * 2) + 1\n right_i = (node * 2) + 2\n\n return min_val", "def top(heap):\n return heap[_root()]", "def min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n item = self._data[0]\n return (item._key, item._value)", "def min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n item = self._data[0]\n return (item._key, item._value)", "def remove_min(self) -> object:\n if self.is_empty():\n raise MinHeapException\n return\n parent_index=0\n parent=self.get_min()\n #parent=5\n #print(parent)\n #print(self)\n self.heap.swap(parent_index,self.heap.length()-1)\n self.heap.pop()\n if self.is_empty():\n return parent\n min_child=self.find_min_child(1,2)\n while min_child!=None:\n if self.heap.get_at_index(min_child)>self.heap.get_at_index(parent_index):\n break\n self.heap.swap(min_child,parent_index)\n parent_index=min_child\n if parent_index==None:\n break\n min_child=self.find_min_child((parent_index * 2)+1,(parent_index * 2) + 2)\n return parent", "def min(self):\n return min(self)", "def peek(self):\n if self.heap:\n return self.heap[0]\n else:\n raise IndexError(\"there is no root\")", "def min(self):\n return self._min(self.root)", "def getMinValue(self):\n if self.left is None:\n return self.data\n return self.left.getMinValue()", "def remove_min(self) -> Optional[T]:\n if self._array == []:\n return None\n else:\n # Remove top node\n value = self._array[0]\n self._array = self._array[1:]\n # If nodes remaing in the min heap...\n if self._array:\n # Move end node to the top\n end_node = self._array.pop()\n self._array = [end_node] + self._array\n # Rebuild the heap (heapify)\n self.__build()\n # Return the top node\n return value", "def get_smallest_element(self, n=1):\n\t\tstack = [self]\n\t\tsmallest = None\n\t\tcount = 0\n\t\twhile count < n and stack:\n\t\t\titem = stack.pop()\n\t\t\tif isinstance(item, BSTreeNode):\n\t\t\t\tif not item.right == None:\n\t\t\t\t\tstack.append(item.right)\n\t\t\t\tstack.append(item.value)\n\t\t\t\tif not item.left == None:\n\t\t\t\t\tstack.append(item.left)\n\t\t\t\tcontinue\n\t\t\tcount += 1\n\t\t\tsmallest = item\n\t\tif count < n:\n\t\t\treturn None\n\t\treturn smallest", "def top(self):\n if len(self.heap) > 0:\n return self.heap[0]\n\n return None", "def top(self):\n if len(self.heap) > 0:\n return self.heap[0]\n\n return None", "def min(self):\n node = self\n while node.left:\n node = node.left\n return node", "def _get_minimum(self):\n return self._minimum", "def _get_min_child(self, parent_idx):\n if 2 * parent_idx + 2 > len(self._heap) - 1:\n return 2 * parent_idx + 1\n if self._heap[2 * parent_idx + 1] < self._heap[2 * parent_idx + 2]:\n return 2 * parent_idx + 1\n return 2 * parent_idx + 2", "def min():\n return KeeperOfMinOrMax(int.__gt__)", "def min(self) -> \"Node\":\n current = self\n while current.left is not None:\n current = current.left\n return current", "def minChild(self, i):\n if i * 2 + 1 > self.currentSize:\n return i*2\n else:\n if self.heapList[i*2] < self.heapList[i*2+1]:\n return i*2\n else:\n return i*2+1", "def min(self):\n p = self._find_min()\n item = p.element()\n return (item._key, item._value)", "def get_left_child(self, index):\n return self.heap[self.get_left_child_index(index)]", "def remove_min(self):\n if self._size == 1: # Only root node in heap\n return self._delete_node(self.root())\n min_node = self._array[0] # Root node has min value\n last = self._array[self._size-1] # Bottom-right-most node\n self._swap(min_node, last) # Move last node to root\n element = self._delete_node(min_node) # Delete root\n self._downheap(last) # Down-heap bubble last node\n if self._size == self._N//4 and self._N > BinaryTree.DEFAULT_CAPACITY:\n self._resize_array(self._N // 2) # Halve size of array\n return element", "def test_extract_min(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n\n min_key = h.extract_min()\n self.assertEqual(min_key, 4, 'should extract the min value')\n self.assertTrue(Heap.is_heap(data), 'should still hold the heap property')\n\n min_key = h.extract_min()\n self.assertEqual(min_key, 4, 'should extract the min value')\n self.assertTrue(Heap.is_heap(data), 'should still hold the heap property')\n\n min_key = h.extract_min()\n self.assertEqual(min_key, 4, 'should extract the min value')\n self.assertTrue(Heap.is_heap(data), 'should still hold the heap property')", "def find_smallest(node):\n smallest = node.value\n\n while node.left is not None:\n node = node.left\n smallest = node.value\n\n return smallest", "def min(self):\n if not self.root:\n return None\n\n node, parent = Treap._traverse(self.root, 'left')\n return node.key", "def minimum(self):\n return min(self.numbers)", "def min(self):\n return self._min", "def min(self):\n return self._min", "def find_min(self):\n if self.is_empty():\n return None\n else:\n p = self.first()\n return (p.key(), p.value())", "def min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty')\n item = self._data[0]\n return (item._key, item._value)", "def min_child(self, i):\n # If the current node only has one child, return the index of the unique child\n if (i * 2) + 1 > self.current_size:\n return i * 2\n else:\n # Herein the current node has two children\n # Return the index of the min child according to their values\n if self.heap_list[i * 2][0] < self.heap_list[(i * 2) + 1][0]:\n return i * 2\n else:\n return (i * 2) + 1", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)", "def min(self):\n no = self.root\n if no:\n no = self.__search_node_min_dir(no)\n if no:\n return no.valor\n return None", "def peek(self):\n return self.m * self.heap[0] if self.heap else None", "def min(self):\n return self.__min", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)", "def min(self):\n least = self.data[0]\n \n for i in range(len(self.data)):\n if self.data[i] < least:\n least = self.data[i]\n return least", "def getmin(self):\n\n return self.X", "def getMinNode(self):\n currentNode = self.openList[0]\n for node in self.openList:\n if node.g + node.h < currentNode.g + currentNode.h:\n currentNode = node\n return currentNode", "def min(self):\n return self.root.leftmost", "def peek_up(self):\n return self.heap[0]", "def get_min(self):\n if self.root is None: # BC1\n return float('+inf')\n\n current = self.root\n while current.left is not None: # Traverse like a linked-list\n current = current.left\n\n return current.key", "def min_child(self, index):\n if self.empty():\n return None\n if self._has_left(index):\n left = self._left(index)\n small_child = left\n if self._has_right(index):\n right = self._right(index)\n if self._data[right] < self._data[left]:\n small_child = right\n if self._data[right] == self._data[left]:\n small_child = right\n return small_child\n return None", "def first(self):\n if self.is_empty():\n raise Empty(\"Queue undeflow.\")\n return self._head._element", "def get_min(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def get_min(self):\n return self.serie.min()", "def remove_min(self):\r\n if self.is_empty():\r\n raise Exception('Priority queue is empty.')\r\n self._swap(0, len(self._data) - 1) # put minimum item at the end\r\n item = self._data.pop() # and remove it from the list;\r\n self._downheap(0) # then fix new root\r\n return (item._key, item._value)", "def get(self):\n size = self.size()\n if size < 0:\n return None\n res = self.heap[0]\n self.heap[0], self.heap[size - 1] = self.heap[size - 1], self.heap[0]\n self.heap.pop()\n self.sift_down(0)\n # self.sift_down_recursion(0)\n return res", "def _find_smallest(node):\n if node.left:\n return BinarySearchTree._find_smallest(node.left)\n else:\n return node", "def remove_min(self):\r\n try:\r\n if self.is_empty():\r\n raise \"List is Empty\"\r\n \r\n self.swap(0,len(self._data)-1) \r\n element = self._data.pop() # remove the value from list.\r\n self._heapify_after_remove(0) # heapify the list\r\n return element._key, element._value\r\n \r\n except Exception, e:\r\n print \"Error occurred in HeapDistance: remove_min\", e\r\n print traceback.print_exc(e)", "def first(self):\n if self.is_empty():\n raise ValueError('Queue is empty!')\n return self.root().element().value()", "def test_extract_min_and_insert(self):\n data = [4, 5, 8, 9, 6, 12, 9, 11, 13]\n h = Heap(data)\n\n min_value = h.extract_min_and_insert(2)\n self.assertEqual(min_value, 4, 'should return the min value')\n expected = [2, 5, 8, 9, 6, 12, 9, 11, 13]\n self.assertEqual(h.data, expected, 'should remove the old min and '+\n 'add new value correctly')", "def min_value(root_elem):\r\n\tmin_val = root_elem.value\r\n\tstack = [root_elem]\r\n\twhile stack:\r\n\t\t# Capture the current element and add it to the result\r\n\t\tcurrent = stack.pop()\r\n\t\tif current.value < min_val:\r\n\t\t\tmin_val = current.value\r\n\t\t# if the current element has a left/right child, add it to the stack for checking\r\n\t\tif current.right:\r\n\t\t\tstack.append(current.right)\r\n\t\tif current.left:\r\n\t\t\tstack.append(current.left)\r\n\treturn min_val", "def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)", "def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")", "def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")", "def get_min_value(self, dim):\n return self._min_values[dim]" ]
[ "0.86202234", "0.8542973", "0.8532992", "0.85131574", "0.8460315", "0.84360385", "0.8252153", "0.81792325", "0.8092698", "0.8078686", "0.80421853", "0.79581094", "0.7663294", "0.76157564", "0.75814164", "0.7493778", "0.74732834", "0.7461151", "0.7447867", "0.7397468", "0.73450094", "0.73435974", "0.73394644", "0.73330003", "0.72903585", "0.7249945", "0.71954393", "0.71705204", "0.71020305", "0.7098565", "0.70860296", "0.7078853", "0.7078853", "0.7069156", "0.7059136", "0.70514494", "0.7049476", "0.7048503", "0.70427936", "0.7040899", "0.7036295", "0.6998615", "0.69781923", "0.6972228", "0.6965694", "0.6931243", "0.6931243", "0.6928862", "0.69073594", "0.69072366", "0.6902423", "0.68875873", "0.68646234", "0.68596613", "0.6854005", "0.6854005", "0.6843242", "0.68234116", "0.6808797", "0.6796728", "0.6773602", "0.67578787", "0.67566395", "0.6751882", "0.67514527", "0.6742531", "0.67235726", "0.67215186", "0.67173207", "0.6715366", "0.6715366", "0.67148876", "0.6714507", "0.67053956", "0.6700318", "0.6677139", "0.6674701", "0.6672802", "0.66645676", "0.6649853", "0.66408837", "0.6632909", "0.66316587", "0.66281176", "0.66239065", "0.66017646", "0.65539014", "0.6530632", "0.6527628", "0.650273", "0.64913493", "0.64717066", "0.6466517", "0.6445418", "0.6432541", "0.64322823", "0.642346", "0.6416131", "0.6416131", "0.6409173" ]
0.7234268
26
Gets the minimum element of the heap and removes it.
def take_min(self): return self.get_first()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_min(self):\r\n # Should raise an exception of size is 0...\r\n if self._size == 0: raise KeyError # Can't remove from an empty heap\r\n result = self._data[0] # remember the smallest\r\n self._data[0] = None # None is so we don't have a reference.\r\n self._size -= 1 # don't forget we have one less\r\n # bring the last to the front and stick the None at the end\r\n self.swap(0, self._size)\r\n # and let the item inserted at the front \"drift down\"\r\n self.down_heap(0)\r\n return result # finally return what was the minimum\r", "def delete_min(self):\n #The length is 1 because the heap list was initialized with 0\n if len(self.heap_list) == 1:\n return \"Empty heap.\"\n\n #Store the min value of the heap\n top = self.heap_list[1]\n\n #Move the last value of the heap to the top\n self.heap_list[1] = self.heap_list[self.current_size]\n\n #Pop the last value from the heap (that was moved to the top)\n *self.heap_list, _ = self.heap_list\n\n # Decrease the size of the heap\n self.current_size -= 1\n\n #Move down the top value to the appropriate position (following the definition of a min heap)\n #The value is at index 1 since the heap list was initialized with 0) \n self.sift_down(1)\n\n #Return the min value of the heap\n return top", "def delete_min(self):\n\n self.switch(0, -1)\n\n min = self.heap.pop(-1)\n\n self.bubble_down(0)\n\n return min", "def pop_smallest(self):\n smallest = heapq.heappop(self.heap)\n del self.set[smallest]\n return smallest", "def remove_min(self):\n if self._size == 1: # Only root node in heap\n return self._delete_node(self.root())\n min_node = self._array[0] # Root node has min value\n last = self._array[self._size-1] # Bottom-right-most node\n self._swap(min_node, last) # Move last node to root\n element = self._delete_node(min_node) # Delete root\n self._downheap(last) # Down-heap bubble last node\n if self._size == self._N//4 and self._N > BinaryTree.DEFAULT_CAPACITY:\n self._resize_array(self._N // 2) # Halve size of array\n return element", "def pop(self):\n if self.heap == [0]:\n raise EmptyHeapException('Heap is empty.')\n self.heap[1], self.heap[-1] = self.heap[-1], self.heap[1]\n minimum = self.heap[-1] # Store min val to return later\n self.heap = self.heap[:-1] # Remove final element\n self._percolate_down(1)\n return minimum", "def delMin(self):\n retval = self.heapList[1]\n self.heapList[1] = self.heapList[self.currentSize]\n self.currentSize = self.currentSize - 1\n self.heapList.pop()\n self.percDown(1)\n return retval", "def remove_min(self) -> Optional[T]:\n if self._array == []:\n return None\n else:\n # Remove top node\n value = self._array[0]\n self._array = self._array[1:]\n # If nodes remaing in the min heap...\n if self._array:\n # Move end node to the top\n end_node = self._array.pop()\n self._array = [end_node] + self._array\n # Rebuild the heap (heapify)\n self.__build()\n # Return the top node\n return value", "def remove_min(self) -> object:\n if self.is_empty():\n raise MinHeapException\n return\n parent_index=0\n parent=self.get_min()\n #parent=5\n #print(parent)\n #print(self)\n self.heap.swap(parent_index,self.heap.length()-1)\n self.heap.pop()\n if self.is_empty():\n return parent\n min_child=self.find_min_child(1,2)\n while min_child!=None:\n if self.heap.get_at_index(min_child)>self.heap.get_at_index(parent_index):\n break\n self.heap.swap(min_child,parent_index)\n parent_index=min_child\n if parent_index==None:\n break\n min_child=self.find_min_child((parent_index * 2)+1,(parent_index * 2) + 2)\n return parent", "def pop_smallest(self):\n values = [item[0] for item in self.items] #list of the values\n #values = L[:]\n heapq.heapify(values)\n smallest = heapq.heappop(values)#not forgetting heapq.heapify(values)\n #directly writing t = heapq.heappop([4,2,4]) would result in t = 4\n i = self.getItemByValue(smallest)\n self.items.remove(i)\n return i[1]", "def remove_min(self):\r\n try:\r\n if self.is_empty():\r\n raise \"List is Empty\"\r\n \r\n self.swap(0,len(self._data)-1) \r\n element = self._data.pop() # remove the value from list.\r\n self._heapify_after_remove(0) # heapify the list\r\n return element._key, element._value\r\n \r\n except Exception, e:\r\n print \"Error occurred in HeapDistance: remove_min\", e\r\n print traceback.print_exc(e)", "def remove_min(self) -> object:\n if self.is_empty() == True:\n raise MinHeapException\n\n # minimum value to be returned\n min_val = self.get_min()\n\n # get last index\n end = self.heap.length() - 1\n\n # root index\n root = 0\n\n # swap first and last nodes and remove last value\n self.heap.swap(root, end)\n self.heap.pop()\n\n # length\n length = self.heap.length()\n\n # left index and right index\n left_i = (2 * root) + 1\n right_i = (2 * root) + 2\n\n # if heap has only one value\n if left_i > length - 1:\n return min_val\n\n # if heap has only left child\n if right_i > length - 1:\n if self.heap.get_at_index(left_i) < self.heap.get_at_index(root):\n self.heap.swap(left_i, root)\n return min_val\n else:\n return min_val\n\n # percolate down heap\n while left_i < length and right_i < length:\n replace_val = self.heap.get_at_index(root)\n left_child = self.heap.get_at_index(left_i)\n right_child = self.heap.get_at_index(right_i)\n\n # find index to swap nodes and check that a node exists\n if self.find_replacement(left_i, right_i, left_child, right_child, replace_val):\n node = self.find_replacement(\n left_i, right_i, left_child, right_child, replace_val)\n\n # swap nodes, set new root and child indices\n self.heap.swap(root, node)\n root = node\n left_i = (node * 2) + 1\n right_i = (node * 2) + 2\n\n return min_val", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)", "def extractMin(self):\n if not self.heap:\n raise IndexError(\"there is no root\")\n elif len(self.heap) < 2:\n return self.heap.pop()\n else:\n self.heap[0], oldMin = self.heap.pop(), self.heap[0]\n self._shiftDown()\n return oldMin", "def remove(self):\n max_item = self.heaplist[1]\n self.heaplist[1] = self.heaplist[self.currentsize]\n self.currentsize -= 1\n self.heaplist.pop()\n self.shift_item_down(1)\n return max_item", "def deleteMin(self):\n heap = self._heap\n position = self._position\n\n try:\n end = heap.pop(-1)\n except IndexError:\n raise KeyError('pqdict is empty')\n\n if heap:\n node = heap[0]\n # grab last node in PQ to root and sink it down appropriately\n heap[0] = end\n position[end.key] = 0\n self._sink(0)\n else:\n node = end\n del position[node.key] # delete index from position dict\n return node.key, node.value", "def remove_min(self):\r\n if self.is_empty():\r\n raise Exception('Priority queue is empty.')\r\n self._swap(0, len(self._data) - 1) # put minimum item at the end\r\n item = self._data.pop() # and remove it from the list;\r\n self._downheap(0) # then fix new root\r\n return (item._key, item._value)", "def delete_min(self):\n min_val = self.peek_min()\n self.remove(min_val)\n return min_val", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty')\n self._swap(0, len(self) - 1)\n item = self._data.pop()\n self._down_heap(0)\n return (item._key, item._value)", "def extractmin(self):\n if len(self.heap) == 0: \n return None\n i = self.heap[0]\n last = self.heap[-1]\n del self.heap[-1]\n if len(self.heap) > 0:\n self.siftdown(last, 0)\n return i", "def remove(self): \n \n popped = self.Heap[self.FRONT] \n self.Heap[self.FRONT] = self.Heap[self.size] \n self.size-= 1\n self.min_heapify(self.FRONT) \n return popped", "def min(self):\r\n if self._size == 0: raise KeyError # Nothing to return if heap empty\r\n return self._data[0] # so simple!\r", "def delete_max(self):\n retval = self.heap_list[1]\n self.heap_list[1] = self.heap_list[self.size]\n self.size = self.size - 1\n pop_val = self.heap_list.pop()\n self.percolate_down(1)\n return retval", "def pop(self):\n try:\n top_node = self._heap[0]\n self._heap = [self._heap[-1]] + self._heap[1:-1]\n self.sort_down(0)\n return top_node\n except IndexError:\n raise IndexError('Cannot pop from an empty heap')", "def extract_min(self):\r\n if self.is_empty():\r\n return None\r\n min_elem = self.heap_array[0]\r\n aux_elem = self.heap_array.pop()\r\n\r\n if self.is_empty() == False:\r\n self.heap_array[0] = aux_elem\r\n\r\n current_index = 0\r\n left_child_index = (2 * current_index) + 1\r\n current_value = self.heap_array[current_index]\r\n\r\n while left_child_index < len(self.heap_array): # loop that will repeat until no violation of the minheap properties exist\r\n current_min = current_value\r\n\r\n for i in range(2): # this loop is in place so that both children are compared and the smaller of the two is chosen \r\n if (left_child_index + i) > len(self.heap_array)-1: # condition to avoid out of bounds\r\n continue\r\n else:\r\n if int(self.heap_array[left_child_index + i]) < int(current_min): # if child is smaller than parent\r\n current_min = self.heap_array[left_child_index + i ] # set current minimum value\r\n current_min_index = left_child_index + i # and cureent minimim index( index where current minimum value is found )\r\n if current_min == current_value: # if no property is broken (in this case, the parent is actually less than its' children)\r\n break\r\n else: # if propert is broken\r\n self.heap_array[current_index], self.heap_array[current_min_index] = self.heap_array[current_min_index], self.heap_array[current_index] # swap the elements \r\n current_index = current_min_index\r\n left_child_index = int((2 * current_index) + 1)\r\n return min_elem", "def delete_top_from_max_heap(x):\n last = x[-1]\n x = x.at[0].set(last)[:-1]\n return heapify_subtree(x, 0)", "def dequeue(self):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Remove and return min item from heap, if any\n ...", "def pop(self):\n while self.heap:\n priority, count, smallest = hpq.heappop(self.heap)\n if smallest is not self.REMOVED:\n del self.set[smallest]\n return priority, smallest\n raise KeyError('pop from an empty priority queue')", "def pop(self):\n if len(self.heap)==0:\n raise ValueError(\"Tried popping empty heap\")\n return heapq.heappop(self.heap)[1]", "def poll(self):\n assert len(self.heap) > 0, \"ERROR: Heap is empty.\"\n item = self.heap[0]\n self.heap[0] = self.heap.pop()\n self.heapify_down()\n return item", "def pop_min(self):\n if self.get_size() == 0:\n return None\n\n # put minimum item at the end\n self.swap(0, len(self.table) - 1)\n\n # and remove it from the list;\n item = self.table.pop()\n\n # then fix new root\n self.percolate_down(0)\n return item", "def remove(self):\n # non empty heap: get first element\n if len(self.heap) > 0:\n removed = self.heap[0]\n\n # empty heap: return None\n else:\n return None\n\n # heap with one element: remove it and return\n if len(self.heap) == 1:\n return self.heap.pop()\n\n # put last element on the begining of the heap\n self.heap[0] = self.heap.pop()\n\n # descend new root while needed\n index, leftChild, rightChild = self.getChilds(0)\n while (leftChild < self.size() and \\\n self.heap[index] < self.heap[leftChild]) or \\\n (rightChild < self.size() and \\\n self.heap[index] < self.heap[rightChild]):\n\n # swap smallest child with parent\n if rightChild == len(self.heap) or \\\n self.heap[leftChild] > self.heap[rightChild]:\n\n # swap with left child and set current node as left child\n self.swap(index, leftChild)\n index, leftChild, rightChild = self.getChilds(leftChild)\n\n else:\n # swap with right child and set current node as right child\n self.swap(index, rightChild)\n index, leftChild, rightChild = self.getChilds(rightChild)\n\n # return removed node\n return removed", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n item = self._data.delete(self._data.first())\n return (item._key, item._value)", "def findmin(self):\n return self.heap[0] if len(self.heap) > 0 else None", "def remove_min(self) -> Tuple[K, V]:\n while self.queue:\n # pylint: disable=unused-variable\n value, count, key = heapq.heappop(self.queue)\n if value is not REMOVED:\n del self.__key_map__[key]\n return (key, value)\n return None", "def get_min(self) -> object:\n if not self.is_empty():\n return self.heap.get_at_index(0)\n else:\n raise MinHeapException", "def get_min(self) -> object:\n if self.is_empty()==True:\n return None\n return self.heap.get_at_index(0)", "def pop(self):\n\t\tif self.heap:\n\t\t\treturn heapq.heappop(self.heap)[1]\n\t\telse:\n\t\t\traise Exception('Trying to pop from empty PriorityQueue.')", "def peek(self):\n heaps = self.priorities\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n if len(heap_list) == 0:\n self._remove_key()\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n return heap_list[0]", "def removeMinimum(self, i = 1):\n\n # print(\"I\", i, self.heap[i], self.noOfRemovedElements)\n\n # Base cases\n if self.heap[i] == 'NaN' :\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n if 2 * i + 1 > self.noOfElements or 2 * i > self.noOfElements:\n self.heap[i] == \"NaN\"\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Initializing children element positions\n child1 = 2 * i\n child2 = ( 2 * i ) + 1\n # print(\"child 1\", child1, self.heap[child1])\n # print(\"child 2\", child2, self.heap[child2])\n\n # Case when there are no children\n if self.heap[child1] == 'NaN' and self.heap[child2] == 'NaN':\n self.heap[i] = 'NaN'\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Case when there is only one child\n elif self.heap[child2] == 'NaN':\n self.heap[i], self.heap[child1] = self.heap[child1], \"NaN\"\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Case when there is only one child, same as above\n elif self.heap[child1] == 'NaN':\n self.heap[i], self.heap[child2] = self.heap[child2], \"NaN\"\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Swapping parent with the smaller child\n # Bubbling down\n if self.heap[child1].dijkstraCriterion <= self.heap[child2].dijkstraCriterion:\n self.heap[i], self.heap[child1] = self.heap[child1], self.heap[i]\n self.removeMinimum( child1 )\n else:\n self.heap[i], self.heap[child2] = self.heap[child2], self.heap[i]\n self.removeMinimum( child2 )", "def pop(self):\n heap = self.heap\n if len(heap) < 1:\n return None\n\n ret_val = self.front()\n self.__delete(0)\n return ret_val", "def pop(self):\n temp = self.elements.pop(0)\n self._heapify()\n return temp", "def pop(self):\n try:\n result = self._heap_list.pop(0)\n except IndexError:\n raise IndexError(\"Cannot pop from an empty heap.\")\n self._build_heap()\n return result", "def remove(self):\n # non empty heap: get first element\n if len(self.heap) > 0:\n removed = self.heap[0]\n\n # empty heap: return None\n else:\n return None\n\n # heap with one element: remove it and return\n if len(self.heap) == 1:\n return self.heap.pop()\n\n # put last element on the begining of the heap\n self.heap[0] = self.heap.pop()\n\n # descend new root while needed\n index = 0\n leftChild = (2 * index) + 1\n rightChild = (2 * index) + 2\n while (leftChild < len(self.heap) and \\\n self.heap[index][1] > self.heap[leftChild][1]) or \\\n (rightChild < len(self.heap) and \\\n self.heap[index][1] > self.heap[rightChild][1]):\n\n # swap smallest child with parent\n if rightChild == len(self.heap) or \\\n self.heap[leftChild][1] < self.heap[rightChild][1]:\n\n # swap with left child\n swap = self.heap[index]\n self.heap[index] = self.heap[leftChild]\n self.heap[leftChild] = swap\n\n # update indexes\n index = leftChild\n leftChild = (2 * index) + 1\n rightChild = (2 * index) + 2\n\n else:\n\n # swap with right child\n swap = self.heap[index]\n self.heap[index] = self.heap[rightChild]\n self.heap[rightChild] = swap\n\n # update indexes\n index = rightChild\n leftChild = (2 * index) + 1\n rightChild = (2 * index) + 2\n\n # return removed node\n return removed", "def min(self):\n return self.heap[1]", "def extract_min(self):\n if self.is_empty():\n raise ValueError(\"Priority queue is empty\")\n\n edge_tuple = heapq.heappop(self.__heap)\n ew = edge_tuple[1]\n return ew.edge()", "def pop(self):\n return heapq.heappop(self.heap)", "def pop(self):\n\n while self.heap:\n# #logger_cagada.debug(\"elem de heap %s\" % self.heap)\n priority, node = self.heappop(self.heap)\n if node is not self.REMOVED:\n del self.entry_finder[node]\n return priority, node\n raise KeyError('pop from an empty priority queue')", "def remove_min(self):\r\n if self.is_empty():\r\n raise Empty(\"Priority queue is empty.\")\r\n item = self.data.delete(self.data.first())\r\n return item.key, item.value", "def extractMinimum(self):\n\n return self.heap[1]", "def pop(self):\n root = self.heap[1]\n del self.rank[root]\n x = self.heap.pop() # remove last leaf\n if self: # if heap is not empty\n self.heap[1] = x # put last leaf to root\n self.rank[x] = 1\n self.down(1) # maintain heap order\n return root", "def heappop(heap):\n lastelt = heap.pop() # raises appropriate IndexError if heap is empty\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n Heap.siftup(heap, 0)\n return returnitem\n return lastelt", "def pop(self):\n root = self.heap[1]\n del self.rank[root]\n x = self.heap.pop() # remove last leaf\n if self: # if heap is not empty\n self.heap[1] = x # move the last leaf\n self.rank[x] = 1 # to the root\n self.down(1) # maintain heap order\n return root", "def pop(self) -> Optional[T]:\n try:\n elem = heapq.heappop(self._heap).val\n self._unique_values.remove(elem)\n except IndexError:\n return None\n return elem", "def get_min(h: Heap) -> Node:\n prev, curr = _min(h)\n return curr", "def pop(self):\n _, _, obj = heapq.heappop(self._heap)\n return obj", "def _heapify_after_remove(self,ele):\r\n \r\n if self._chk_left(ele):\r\n left = self._left(ele)\r\n find_small_child = left\r\n # below to find which child has small integer\r\n if self._chk_right(ele):\r\n right = self._right(ele)\r\n if self._data[left] > self._data[right]:\r\n find_small_child = right\r\n \r\n if self._data[find_small_child] < self._data[ele]:\r\n self.swap(ele, find_small_child)\r\n self._heapify_after_remove(find_small_child)", "def pop(self):\n if self.n == 0:\n raise ValueError(\"Heap is empty\")\n value = self.ar[0]\n self.n -= 1\n self.ar[0] = self.ar[self.n]\n self.heapify(0)\n return value", "def pop(self):\n if self.size is 0:\n print \"Heap is empty\"\n return\n # Swap the top most element with the last one\n self._swap(0, self.size - 1)\n poppedKey = self.queue[self.size - 1]\n # Reduce the size of the queue\n self.size -= 1\n # Rebalance\n self._heapify(0)\n return poppedKey", "def min(self):\n return tuple(self.__heap[0])", "def front(self):\n if self.size() < 1:\n return None\n else:\n # TODO: Return min item from heap, if any\n ...", "def get(self):\n size = self.size()\n if size < 0:\n return None\n res = self.heap[0]\n self.heap[0], self.heap[size - 1] = self.heap[size - 1], self.heap[0]\n self.heap.pop()\n self.sift_down(0)\n # self.sift_down_recursion(0)\n return res", "def pop(self):\n priority, value = heappop(self._heap)\n return (-1 * priority, value)", "def _find_min(self):\n if self.is_empty(): # is_empty inherited from base class\n raise Empty('Priority queue is empty')\n small = self._data.first()\n walk = self._data.after(small)\n while walk is not None:\n if walk.element() < small.element():\n small = walk\n walk = self._data.after(walk)\n return small", "def pop(self):\n\n def sub_pop():\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heap = heaps[keys]\n pop = heap.pop()\n return pop\n\n try:\n val = sub_pop()\n except IndexError:\n self._remove_key()\n val = sub_pop()\n\n return val", "def pop(self):\n if not self.empty():\n self.size -= 1\n return heapq.heappop(self.queue)\n else:\n return None", "def min_heap(self): \n \n for pos in range(self.size//2, 0, -1): \n self.min_heapify(pos)", "def delete_min(self):\n node = self.root \n if node is None:\n return \n # move to the leftmost \n while node.left is not None:\n node = node.left\n # promote the node's right subtree \n if node.parent is not None:\n node.parent.left = node.right \n # if node's parent is None, the root is the smallest element \n else: \n self.root = node.right \n if node.right is not None:\n node.right.parent = node.parent \n parent = node.parent \n node.parent = None \n node.left = None \n node.right = None \n return node, parent", "def pop(self):\n\n assert self.size > 0, \"Cannot pop item! The MaxHeap is empty!\"\n ret = self.items[0]\n self.items[0] = self.items[self.size - 1]\n self.items[self.size - 1] = None\n self.size -= 1\n self._shift_down(0)\n return ret", "def pop(self):\n heap = self.heap\n popped_key = heap[1]\n if len(heap) == 2:\n return heap.pop()\n heap[1] = key = heap.pop()\n\n i = 1\n while True:\n left = i * 2\n if len(heap) <= left:\n break\n left_key = heap[left]\n right = i * 2 + 1\n right_key = right < len(heap) and heap[right]\n if right_key and right_key < left_key:\n child_key = right_key\n child = right\n else:\n child_key = left_key\n child = left\n if key <= child_key:\n break\n self.heap[i], self.heap[child] = child_key, key\n i = child\n return popped_key", "def heappop(heap):\n lastelt = heap.pop()\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n _siftup(heap, 0)\n return returnitem\n return lastelt", "def remove_min(self):\n p = self._find_min()\n item = self._data.delete(p)\n return (item._key, item._value)", "def heap_pop_max(heap):\n last = heap.pop()\n if heap:\n return_item = heap[0]\n heap[0] = last\n heapq._siftup_max(heap, 0)\n else:\n return_item = last\n return return_item", "def peek(self):\n if self.is_empty():\n raise ValueError(\"Heap is empty\")\n return self._heap[0]", "def main():\n heap = MinHeap()\n for i in range(10):\n heap.add(i)\n print(heap.peek())\n for i in range(4):\n heap.poll()\n print(heap.peek())", "def del_min(self):\n min_idx = self.__pq[1]\n self.__swap(1, self.__n)\n self.__n -= 1\n self.__sink(1)\n self.__keys[self.__pq[self.__n + 1]] = None\n self.__qp[self.__pq[self.__n + 1]] = -1\n return min_idx", "def pop(self):\r\n try:\r\n key = heapq.heappop(self.heap)\r\n return self.elements[key]\r\n except:\r\n raise StopIteration", "def pop(self) -> Article:\n return heapq.heappop(self.heap)", "def pop(self):\r\n while self.pq:\r\n priority, count, task = heapq.heappop(self.pq)\r\n if task is not self.REMOVED:\r\n del self.entry_finder[task]\r\n return task\r\n raise KeyError('pop from an empty priority queue')", "def push_pop(self, item, priority):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Replace and return min item from heap, if any\n ...", "def pop(self):\n # if not empty\n if not self.empty():\n # swap min element with last and pop from data\n popped = self._data[0]\n self.swap(0, len(self)-1) # swap elements\n self._data.pop()\n # move swapped node to correct place\n self.percolate_down(0)\n\n return popped.get_value()[1]\n # if empty\n return None", "def pop(self):\n return heapq.heappop(self.array)", "def pop(self) -> tuple:\n item = self.__heap.popleft()\n\n if len(self) > 1:\n self.__heap.appendleft(self.__heap.pop())\n self.__sift_down(0)\n\n return item", "def test_extract_min_and_insert(self):\n data = [4, 5, 8, 9, 6, 12, 9, 11, 13]\n h = Heap(data)\n\n min_value = h.extract_min_and_insert(2)\n self.assertEqual(min_value, 4, 'should return the min value')\n expected = [2, 5, 8, 9, 6, 12, 9, 11, 13]\n self.assertEqual(h.data, expected, 'should remove the old min and '+\n 'add new value correctly')", "def __heappop(heap, nodes, pos, stopPos = None):\n # Default stopping position to end of heap\n stopPos = stopPos if not None else len(heap) - 1\n \n # Swap target node with stopping position, re-order heap to stopping\n # position minus one, then pop the target node\n Graph.__swapHeapNodes(heap, nodes, pos, stopPos)\n Graph.__siftdown(heap, nodes, pos, stopPos - 1)\n node = heap.pop(stopPos)\n \n # Delete node from dictionary and return\n del nodes[node[1]]\n return node", "def pop(self):\n self.data[0], self.data[-1] = self.data[-1], self.data[0]\n result = self.data.pop()\n self.heapify_down(0)\n return result", "def pop(self):\n priority, key = self.__heap[0]\n self.__swap(0, len(self.__heap) - 1)\n del self.__position[key]\n del self.__heap[-1]\n\n if self:\n self.__bubble_down(0)\n\n return priority, key", "def extract_minOld(H):\n minDist = approxInf\n u = None\n for v in H:\n if v[1] <= minDist:\n minDist = v[1]\n u = v\n return(H.pop(u))", "def test_remove(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n h.remove(2)\n\n self.assertTrue(Heap.is_heap(data), 'should preserve heap property')\n self.assertNotIn(8, h.data, 'the value corresponding to the index was removed')", "def _remove_key(self):\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heaps.pop(keys)", "def find_min_in_max_heap(self):\n min_number = None\n last_parent = (self.size - 1) // 2\n first_leaf = last_parent + 1\n # Shortcut to find first_leaf:\n # (self.size - 1) //2 + 1 = (self.size + 1) // 2\n # But for simplicity, we will use variable first_leaf in steps\n for index in range(first_leaf, self.size):\n if min_number is None:\n min_number = self.heap[index]\n else:\n min_number = min(min_number, self.heap[index])\n\n return min_number", "def pop(self):\n (cost, node) = heapq.heappop(self.heap)\n self.states.pop(node.state, None) # remove state\n return node", "def pop(self):\n (_,_,path) = heapq.heappop(self.frontierpq)\n return path", "def pop(self):\n return heappop(self.priority_queue)[1]", "def top(heap):\n return heap[_root()]", "def heapreplace(self, key, value):\n if self.is_empty():\n raise IndexError('Priority queue is empty')\n small = self.min()\n self._data[0]._key = key\n self._data[0]._value = value\n self._down_heap(0)\n return small", "def heap_pop(self, value):\n if value is None or self.get_size() == 0:\n return\n\n if self.find(value) is not None:\n # end of list\n position = self.find(value)\n last = self.get_size() - 1\n\n # pop element and percolate down\n self.swap(position, last)\n self.table.pop()\n self.percolate_down(position)\n return", "def minHeap(self):\n for pos in range(self.size // 2, 0, -1):\n self.minHeapify(pos)", "def pop(self):\n if not self._heap:\n log.debug(\"popped from an empty heap\")\n return\n\n popped_contact = heapq.heappop(self._heap)[1]\n del self._node_dict[popped_contact.getId()]\n\n return popped_contact", "def pop(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n up_next = self.priority_queue[nextkey][0]\n self.priority_queue[nextkey] = self.priority_queue[nextkey][1:]\n return up_next\n else:\n raise IndexError(\"There's nothing in your queue\")" ]
[ "0.8762053", "0.8528082", "0.85083675", "0.8469478", "0.83009934", "0.82481664", "0.8209636", "0.81854427", "0.8066061", "0.80339926", "0.80249083", "0.7980516", "0.788914", "0.7866211", "0.7851768", "0.78122234", "0.7769824", "0.77424836", "0.76917297", "0.7681458", "0.7667105", "0.7639529", "0.76127577", "0.75566995", "0.755009", "0.7523308", "0.7498252", "0.74303293", "0.7372537", "0.73620546", "0.73602974", "0.7355255", "0.7325804", "0.7297063", "0.7248754", "0.7232589", "0.7228921", "0.7221417", "0.72123283", "0.7198382", "0.7192382", "0.7183191", "0.71807665", "0.7176425", "0.71582365", "0.7150898", "0.71415794", "0.71292275", "0.7127623", "0.71055144", "0.70852005", "0.7058338", "0.70473856", "0.7040018", "0.70364696", "0.70222247", "0.7000884", "0.69789326", "0.6966014", "0.6958123", "0.6922458", "0.6906727", "0.690189", "0.6898315", "0.6895785", "0.68219525", "0.6816684", "0.68016416", "0.6768828", "0.67657495", "0.67633337", "0.67622507", "0.6756295", "0.6741656", "0.6737469", "0.6727622", "0.6718788", "0.6717154", "0.6716369", "0.67025656", "0.6702338", "0.6685706", "0.6678809", "0.6654489", "0.66342306", "0.65887463", "0.6572286", "0.6554171", "0.654632", "0.6545196", "0.6544172", "0.65406305", "0.6526247", "0.6518991", "0.65063685", "0.6500299", "0.64837486", "0.6480539", "0.6472331", "0.64407647", "0.64401567" ]
0.0
-1
Gets the maximum element of the heap.
def max(self): return self.get_first()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heap_extract_max(self, A):\n max = A[0]\n A[0] = A[len(A)-1]\n del A[-1] #use del so it stays out of the function\n self.max_heapify(A, 0)\n return max", "def heap_pop_max(heap):\n last = heap.pop()\n if heap:\n return_item = heap[0]\n heap[0] = last\n heapq._siftup_max(heap, 0)\n else:\n return_item = last\n return return_item", "def heap_extract_max(self, A):\n maxi = A[0]\n A[0] = A[-1]\n del A[-1] #use del so it stays out of the function\n self.max_heapify(A, 0)\n return maxi", "def peek_max(self):\n if self.root:\n return self.root.max().value\n raise ValueError(\"cannot perform peek_max on an empty tree\")", "def get_max(self):\n return self.max[-1]", "def max(self):\n assert self.__stack\n return self.__max_values[-1]", "def get_max(self):\n # 0(1)\n return self.max_stack.peek()\n\n # Don't need find_max we returned max_stack.peek()", "def return_the_maximum(self):\n\n return self.__max_stack[-1]", "def get_max(self):\n\t\tif self.right:\n\t\t\treturn self.right.get_max()\n\t\treturn self.value", "def delete_max(self):\n retval = self.heap_list[1]\n self.heap_list[1] = self.heap_list[self.size]\n self.size = self.size - 1\n pop_val = self.heap_list.pop()\n self.percolate_down(1)\n return retval", "def max(self):\n no = self.root\n if no:\n no = self.__search_node_max_esq(no)\n if no:\n return no.valor\n return None", "def max(self):\n if self.right is None:\n return self.item\n else:\n return self.right.max()", "def find_max(self):\n if self.right:\n return self.right.find_max()\n return self.data", "def max():\n return KeeperOfMinOrMax(int.__lt__)", "def _get_maximum(self):\n return self._maximum", "def find_max(self):\n return max(self.nodes, key=int)", "def find_max(self):\n\n if self.right:\n return self.right.find_max()\n\n return self.data", "def find_min_in_max_heap(self):\n min_number = None\n last_parent = (self.size - 1) // 2\n first_leaf = last_parent + 1\n # Shortcut to find first_leaf:\n # (self.size - 1) //2 + 1 = (self.size + 1) // 2\n # But for simplicity, we will use variable first_leaf in steps\n for index in range(first_leaf, self.size):\n if min_number is None:\n min_number = self.heap[index]\n else:\n min_number = min(min_number, self.heap[index])\n\n return min_number", "def delete_max(self):\n max_val = self.peek_max()\n self.remove(max_val)\n return max_val", "def largest_element(a):\n\n return None", "def pop(self):\n if self.heap == [0]:\n raise EmptyHeapException('Heap is empty.')\n self.heap[1], self.heap[-1] = self.heap[-1], self.heap[1]\n minimum = self.heap[-1] # Store min val to return later\n self.heap = self.heap[:-1] # Remove final element\n self._percolate_down(1)\n return minimum", "def get_max(self):\n return self._max", "def max_child(self, i):\n if (i * 2) + 2 > len(self._heap):\n return (i * 2) + 1\n else:\n if self._heap[(i * 2) + 1] > self._heap[(i * 2) + 2]:\n return (i * 2) + 1\n else:\n return (i * 2) + 2", "def find_max(self):\r\n maxVal = self.items[1]\r\n if maxVal is None:\r\n return None\r\n \r\n for i in range(1,len(self.items)):\r\n if self.items[i] is not None:\r\n if self.items[i] > maxVal:\r\n maxVal = self.items[i]\r\n return maxVal", "def max(self):\n return self._max(self.root)", "def max_heapify(heap, i):\n left_child = left(i)\n right_child = right(i)\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n else:\n largest = i\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest != i:\n swap(heap, i, largest)\n max_heapify(heap, largest)", "def max_child(self, index):\n if self.empty():\n return None\n if self._has_left(index):\n left = self._left(index)\n large = left\n if self._has_right(index):\n right = self._right(index)\n if self._data[right] == self._data[left]:\n large = right\n if self._data[right] > self._data[left]:\n large = right\n return large\n return None", "def max(self):\n return max(self)", "def pop(self):\n if len(self.heap)==0:\n raise ValueError(\"Tried popping empty heap\")\n return heapq.heappop(self.heap)[1]", "def get_max(self):\n\n max_value = self.head.value\n current_node = self.head\n # while current_node.next is not None: # when current_node = current.tail, this will not iterate\n while current_node.next is not None: # when current_node = current.tail, this will not iterate\n current_node = current_node.next\n # checks if the value is larger than our max value so far\n if max_value < current_node.value:\n max_value = current_node.value\n return max_value", "def get_maximum ( self, object ):\n return self.maximum", "def remove(self):\n max_item = self.heaplist[1]\n self.heaplist[1] = self.heaplist[self.currentsize]\n self.currentsize -= 1\n self.heaplist.pop()\n self.shift_item_down(1)\n return max_item", "def getMaxValue(self):\n if self.right is None:\n return self.data\n return self.right.getMaxValue()", "def max(self):\n return self.__max", "def get_max(self):\n if not self:\n return None\n return self.right.get_max() if self.right else self.value #Ternarary Operator", "def max(self):\n return self._max", "def max(self):\n return self._max", "def min(self):\r\n if self._size == 0: raise KeyError # Nothing to return if heap empty\r\n return self._data[0] # so simple!\r", "def get(self):\n size = self.size()\n if size < 0:\n return None\n res = self.heap[0]\n self.heap[0], self.heap[size - 1] = self.heap[size - 1], self.heap[0]\n self.heap.pop()\n self.sift_down(0)\n # self.sift_down_recursion(0)\n return res", "def get_max(self):\n if self.root is None: # BC1\n return float('-inf')\n\n current = self.root\n while current.right is not None: # Traverse like a linked-list\n current = current.right\n\n return current.key", "def max(self):\n most = self.data[0]\n \n for i in range(len(self.data)):\n if self.data[i] > least:\n most = self.data[i]\n return most", "def max(self):\n return self.root.rightmost", "def max_stack(self):\n if self.maxx == []:\n return None\n return self.maxx[-1]", "def maximum(self):\n return max(self.numbers)", "def peek(self):\n if self.is_empty():\n raise ValueError(\"Heap is empty\")\n return self._heap[0]", "def pop(self):\n try:\n top_node = self._heap[0]\n self._heap = [self._heap[-1]] + self._heap[1:-1]\n self.sort_down(0)\n return top_node\n except IndexError:\n raise IndexError('Cannot pop from an empty heap')", "def get_max_value(self):\n max_value = max(self.values)\n return max_value", "def _get_maximum_value(self):\n if hasattr(self, '_maximum_value'):\n return self._maximum_value\n return None", "def max(self):\n if not self.root:\n return None\n\n node, parent = Treap._traverse(self.root, 'right')\n return node.key", "def build_max_heap(heap):\n\tfor j in range(heap.len//2, -1, -1):\n\t\tmax_heapify(heap, j)", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def max(self):\n node = self\n while node.right:\n node = node.right\n return node", "def get_max(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def top(heap):\n return heap[_root()]", "def pop(self):\n\t\tif self.heap:\n\t\t\treturn heapq.heappop(self.heap)[1]\n\t\telse:\n\t\t\traise Exception('Trying to pop from empty PriorityQueue.')", "def maximum_value(self):\n if not self.root:\n return \"the tree is empty!\"\n\n max_val = self.root.value\n\n def _max_value(node):\n nonlocal max_val\n if not node:\n return\n if node.value > max_val:\n max_val = node.value\n\n _max_value(node.left)\n _max_value(node.right)\n _max_value(self.root)\n return max_val", "def imax(self):\n return self.elem.index(max(self))", "def deep_max(self):\r\n node = self\r\n while not node.is_leaf():\r\n node = node.children[-1]\r\n return node.keys[-1] if node.keys else None", "def largest_tile(self, board):\n return max(get_state(board))", "def build_max_heap(A):\r\n i = int((len(A)-2)//2)\r\n while i >= 0:\r\n max_heapify(A, i)\r\n i -= 1\r\n return A", "def max(self):\n\n maximum = -float('inf')\n\n for stack in self:\n stack_max = stack.max()\n if stack_max > maximum:\n maximum = stack_max\n\n return maximum", "def take_max(self):\n return self.delete_first()", "def max(self) -> \"Node\":\n current = self\n while current.right is not None:\n current = current.right\n return current", "def pop(self):\n if self.size is 0:\n print \"Heap is empty\"\n return\n # Swap the top most element with the last one\n self._swap(0, self.size - 1)\n poppedKey = self.queue[self.size - 1]\n # Reduce the size of the queue\n self.size -= 1\n # Rebalance\n self._heapify(0)\n return poppedKey", "def find_max(self):\n return self.root and self.root.find_max()", "def delete_top_from_max_heap(x):\n last = x[-1]\n x = x.at[0].set(last)[:-1]\n return heapify_subtree(x, 0)", "def min(self):\n return self.heap[1]", "def getNextHighest(self):\r\n maxScore = -1\r\n idx = -1\r\n for i, s in enumerate(self.scores):\r\n if s.score > maxScore:\r\n maxScore = s.score\r\n idx = i\r\n if idx != -1:\r\n score = self.scores[idx]\r\n del self.scores[idx]\r\n return score\r\n else:\r\n return None", "def get_max(self):\n return self.serie.max()", "def findmaxnode(self):\n if not self._rightchild:\n return self\n return self._rightchild.findmaxnode()", "def largest_item(list):\n pass", "def top(self):\n if len(self.heap) > 0:\n return self.heap[0]\n\n return None", "def top(self):\n if len(self.heap) > 0:\n return self.heap[0]\n\n return None", "def find_max(self, root = None):\n return self._find_max(root if root else self.root)", "def get_max(self):\n current = self\n while current.hasRight(): # This is the belief that the max has to be to the right. If you can't go right either in the begining or any more\n # if current has a right this line will be set and will keep going from line 129 to 130 until there are no more rights.\n current = current.right\n # this line returns as soon there is no more rights. breaking out of the loop.\n return current.value", "def max(self):\n max = 0\n a = self.array_form\n for i in xrange(len(a)):\n if a[i] != i and a[i] > max:\n max = a[i]\n return max", "def get_min(self) -> object:\n if self.is_empty()==True:\n return None\n return self.heap.get_at_index(0)", "def top(self) -> int:\n top_element = self.queue.get()\n self.queue.put(top_element)\n for _ in range(self.queue.qsize()-1):\n tmp = self.queue.get()\n self.queue.put(tmp)\n return top_element", "def find_maximum_value(self):\n if self.root: \n self.max_val = self.root.value\n else:\n return 'No tree found'\n def inner(root):\n if root.left:\n inner(root.left)\n\n if root.right:\n inner(root.right)\n\n if self.max_val < root.value:\n self.max_val = root.value\n\n inner(self.root)\n return self.max_val", "def pop(self):\n priority, value = heappop(self._heap)\n return (-1 * priority, value)", "def peek(self):\n assert len(self.heap) > 0, \"ERROR: Heap is empty.\"\n return self.heap[0]", "def maximum(self):\n return self.properties.get('maximum')", "def max_heapify(self, i):\n largest, left_index, right_index = i, 2*i+1, 2*i+2\n current_length = self.heap_size\n\n if (left_index < current_length) and (self.heap[left_index].priority_key > self.heap[largest].priority_key):\n largest = left_index\n\n if (right_index < current_length) and (self.heap[right_index].priority_key > self.heap[largest].priority_key):\n largest = right_index\n\n if largest != i:\n self.heap[largest], self.heap[i] = self.heap[i], self.heap[largest]\n self.max_heapify(largest)\n return self.heap", "def poll(self):\n assert len(self.heap) > 0, \"ERROR: Heap is empty.\"\n item = self.heap[0]\n self.heap[0] = self.heap.pop()\n self.heapify_down()\n return item", "def pop(self):\n item = self.stack.pop()\n\n if item == self.max[-1]: # pop if the same element\n self.max.pop()\n\n return item", "def top(self) -> int:\n return self.push_queue[-1]", "def get_heap(self):\n return self._heap", "def pop(self):\n\n assert self.size > 0, \"Cannot pop item! The MaxHeap is empty!\"\n ret = self.items[0]\n self.items[0] = self.items[self.size - 1]\n self.items[self.size - 1] = None\n self.size -= 1\n self._shift_down(0)\n return ret", "def build_max_heap(a):\r\n for i in range(math.floor((len(a) - 1)/2), -1, -1):\r\n max_heapify(a, i)", "def get_max_value(self, dim):\n return self._max_values[dim]", "def get_value_at(self, i):\n return self.default_value if i > self.last_item else self.heap[i]", "def argMax(self):\n if len(self.keys()) == 0: return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def get_greatest_probability(self):\n greatest = -1\n for i in range(self.dim):\n for j in range(self.dim):\n if self.kb[i][j] > greatest:\n greatest = self.kb[i][j]\n\n return greatest", "def last(self):\n if self.is_empty():\n raise ValueError('Queue is empty!')\n return self.last_node().element().value()", "def heap_push_max(heap, item):\n heap.append(item)\n heapq._siftdown_max(heap, 0, len(heap)-1)" ]
[ "0.7984895", "0.79142934", "0.77526826", "0.75617915", "0.75280285", "0.7499288", "0.74823093", "0.74128735", "0.7341171", "0.73030984", "0.7244219", "0.7195734", "0.71896243", "0.7186615", "0.71417284", "0.71403116", "0.71197176", "0.71066064", "0.7081008", "0.70789945", "0.7047889", "0.70186454", "0.70007277", "0.69863737", "0.6980601", "0.6977832", "0.697695", "0.6965406", "0.6962505", "0.6932741", "0.6928291", "0.6913982", "0.6912427", "0.6903181", "0.68980587", "0.6886488", "0.6886488", "0.67889774", "0.6770234", "0.6756439", "0.6753997", "0.6748933", "0.6742746", "0.6741396", "0.6720065", "0.67182535", "0.67057014", "0.6705665", "0.67009515", "0.668259", "0.6659962", "0.6659962", "0.6659962", "0.6659962", "0.6659962", "0.6659962", "0.66535276", "0.66477674", "0.664297", "0.6633584", "0.66287684", "0.66123044", "0.6611648", "0.65999544", "0.65993077", "0.65981525", "0.65911293", "0.65875024", "0.6586312", "0.6585303", "0.65783876", "0.6558483", "0.6551379", "0.6546004", "0.65436465", "0.6538879", "0.6535862", "0.6535862", "0.6531942", "0.6520528", "0.6509921", "0.65061724", "0.64989525", "0.6498798", "0.6495488", "0.64950395", "0.64912486", "0.6482937", "0.64802796", "0.6470064", "0.64641947", "0.64610827", "0.6452105", "0.645029", "0.64501834", "0.64477783", "0.6417701", "0.64045846", "0.6404028", "0.639976" ]
0.73672485
8
Gets the maximum element of the heap and removes it.
def take_max(self): return self.delete_first()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_max(self):\n retval = self.heap_list[1]\n self.heap_list[1] = self.heap_list[self.size]\n self.size = self.size - 1\n pop_val = self.heap_list.pop()\n self.percolate_down(1)\n return retval", "def remove(self):\n max_item = self.heaplist[1]\n self.heaplist[1] = self.heaplist[self.currentsize]\n self.currentsize -= 1\n self.heaplist.pop()\n self.shift_item_down(1)\n return max_item", "def heap_pop_max(heap):\n last = heap.pop()\n if heap:\n return_item = heap[0]\n heap[0] = last\n heapq._siftup_max(heap, 0)\n else:\n return_item = last\n return return_item", "def delete_top_from_max_heap(x):\n last = x[-1]\n x = x.at[0].set(last)[:-1]\n return heapify_subtree(x, 0)", "def pop(self):\n if self.heap == [0]:\n raise EmptyHeapException('Heap is empty.')\n self.heap[1], self.heap[-1] = self.heap[-1], self.heap[1]\n minimum = self.heap[-1] # Store min val to return later\n self.heap = self.heap[:-1] # Remove final element\n self._percolate_down(1)\n return minimum", "def delete_max(self):\n max_val = self.peek_max()\n self.remove(max_val)\n return max_val", "def heap_extract_max(self, A):\n max = A[0]\n A[0] = A[len(A)-1]\n del A[-1] #use del so it stays out of the function\n self.max_heapify(A, 0)\n return max", "def pop(self):\n if len(self.heap)==0:\n raise ValueError(\"Tried popping empty heap\")\n return heapq.heappop(self.heap)[1]", "def pop(self):\n try:\n top_node = self._heap[0]\n self._heap = [self._heap[-1]] + self._heap[1:-1]\n self.sort_down(0)\n return top_node\n except IndexError:\n raise IndexError('Cannot pop from an empty heap')", "def heap_extract_max(self, A):\n maxi = A[0]\n A[0] = A[-1]\n del A[-1] #use del so it stays out of the function\n self.max_heapify(A, 0)\n return maxi", "def remove_min(self):\r\n # Should raise an exception of size is 0...\r\n if self._size == 0: raise KeyError # Can't remove from an empty heap\r\n result = self._data[0] # remember the smallest\r\n self._data[0] = None # None is so we don't have a reference.\r\n self._size -= 1 # don't forget we have one less\r\n # bring the last to the front and stick the None at the end\r\n self.swap(0, self._size)\r\n # and let the item inserted at the front \"drift down\"\r\n self.down_heap(0)\r\n return result # finally return what was the minimum\r", "def pop(self):\n\n assert self.size > 0, \"Cannot pop item! The MaxHeap is empty!\"\n ret = self.items[0]\n self.items[0] = self.items[self.size - 1]\n self.items[self.size - 1] = None\n self.size -= 1\n self._shift_down(0)\n return ret", "def delMin(self):\n retval = self.heapList[1]\n self.heapList[1] = self.heapList[self.currentSize]\n self.currentSize = self.currentSize - 1\n self.heapList.pop()\n self.percDown(1)\n return retval", "def pop(self):\n if self.n == 0:\n raise ValueError(\"Heap is empty\")\n value = self.ar[0]\n self.n -= 1\n self.ar[0] = self.ar[self.n]\n self.heapify(0)\n return value", "def pop(self):\n\t\tif self.heap:\n\t\t\treturn heapq.heappop(self.heap)[1]\n\t\telse:\n\t\t\traise Exception('Trying to pop from empty PriorityQueue.')", "def pop(self):\n if self.size is 0:\n print \"Heap is empty\"\n return\n # Swap the top most element with the last one\n self._swap(0, self.size - 1)\n poppedKey = self.queue[self.size - 1]\n # Reduce the size of the queue\n self.size -= 1\n # Rebalance\n self._heapify(0)\n return poppedKey", "def pop(self):\n try:\n result = self._heap_list.pop(0)\n except IndexError:\n raise IndexError(\"Cannot pop from an empty heap.\")\n self._build_heap()\n return result", "def delete_min(self):\n\n self.switch(0, -1)\n\n min = self.heap.pop(-1)\n\n self.bubble_down(0)\n\n return min", "def pop(self):\n temp = self.elements.pop(0)\n self._heapify()\n return temp", "def delete_min(self):\n #The length is 1 because the heap list was initialized with 0\n if len(self.heap_list) == 1:\n return \"Empty heap.\"\n\n #Store the min value of the heap\n top = self.heap_list[1]\n\n #Move the last value of the heap to the top\n self.heap_list[1] = self.heap_list[self.current_size]\n\n #Pop the last value from the heap (that was moved to the top)\n *self.heap_list, _ = self.heap_list\n\n # Decrease the size of the heap\n self.current_size -= 1\n\n #Move down the top value to the appropriate position (following the definition of a min heap)\n #The value is at index 1 since the heap list was initialized with 0) \n self.sift_down(1)\n\n #Return the min value of the heap\n return top", "def pop(self):\n heap = self.heap\n if len(heap) < 1:\n return None\n\n ret_val = self.front()\n self.__delete(0)\n return ret_val", "def poll(self):\n assert len(self.heap) > 0, \"ERROR: Heap is empty.\"\n item = self.heap[0]\n self.heap[0] = self.heap.pop()\n self.heapify_down()\n return item", "def pop(self):\n return heapq.heappop(self.heap)", "def pop(self):\n # O(1)\n # Your code here\n item = self.stack.pop() # O(1)\n # check if we're removing the max\n #if item == max: #O(1)\n # if so, we need to update self. max\n #new_max = self.find_max() # O(n) # Don't need find anymore\n # self.max = new_max #O(1)\n # self.max = item\n #return self.stack.pop()\n self.max_stack.pop()\n return item", "def pop(self) -> Optional[T]:\n try:\n elem = heapq.heappop(self._heap).val\n self._unique_values.remove(elem)\n except IndexError:\n return None\n return elem", "def pop(self):\n priority, value = heappop(self._heap)\n return (-1 * priority, value)", "def pop(self):\n _, _, obj = heapq.heappop(self._heap)\n return obj", "def remove(self): \n \n popped = self.Heap[self.FRONT] \n self.Heap[self.FRONT] = self.Heap[self.size] \n self.size-= 1\n self.min_heapify(self.FRONT) \n return popped", "def pop_smallest(self):\n smallest = heapq.heappop(self.heap)\n del self.set[smallest]\n return smallest", "def pop(self):\n root = self.heap[1]\n del self.rank[root]\n x = self.heap.pop() # remove last leaf\n if self: # if heap is not empty\n self.heap[1] = x # move the last leaf\n self.rank[x] = 1 # to the root\n self.down(1) # maintain heap order\n return root", "def pop(self):\n root = self.heap[1]\n del self.rank[root]\n x = self.heap.pop() # remove last leaf\n if self: # if heap is not empty\n self.heap[1] = x # put last leaf to root\n self.rank[x] = 1\n self.down(1) # maintain heap order\n return root", "def pop(self):\n item = self.stack.pop()\n\n if item == self.max[-1]: # pop if the same element\n self.max.pop()\n\n return item", "def del_max(self):\r\n maxVal = self.find_max()\r\n if maxVal is not None:\r\n self.items[1] = self.items[self.size]\r\n self.items[self.size] = None\r\n self.size -= 1\r\n self.perc_down(1)", "def remove_min(self):\n if self._size == 1: # Only root node in heap\n return self._delete_node(self.root())\n min_node = self._array[0] # Root node has min value\n last = self._array[self._size-1] # Bottom-right-most node\n self._swap(min_node, last) # Move last node to root\n element = self._delete_node(min_node) # Delete root\n self._downheap(last) # Down-heap bubble last node\n if self._size == self._N//4 and self._N > BinaryTree.DEFAULT_CAPACITY:\n self._resize_array(self._N // 2) # Halve size of array\n return element", "def pop(self):\n if not self.empty():\n self.size -= 1\n return heapq.heappop(self.queue)\n else:\n return None", "def remove_min(self) -> Optional[T]:\n if self._array == []:\n return None\n else:\n # Remove top node\n value = self._array[0]\n self._array = self._array[1:]\n # If nodes remaing in the min heap...\n if self._array:\n # Move end node to the top\n end_node = self._array.pop()\n self._array = [end_node] + self._array\n # Rebuild the heap (heapify)\n self.__build()\n # Return the top node\n return value", "def dequeue(self):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Remove and return min item from heap, if any\n ...", "def pop(self):\n\n while self.heap:\n# #logger_cagada.debug(\"elem de heap %s\" % self.heap)\n priority, node = self.heappop(self.heap)\n if node is not self.REMOVED:\n del self.entry_finder[node]\n return priority, node\n raise KeyError('pop from an empty priority queue')", "def pop(self):\n while self.heap:\n priority, count, smallest = hpq.heappop(self.heap)\n if smallest is not self.REMOVED:\n del self.set[smallest]\n return priority, smallest\n raise KeyError('pop from an empty priority queue')", "def pop(self):\n heap = self.heap\n popped_key = heap[1]\n if len(heap) == 2:\n return heap.pop()\n heap[1] = key = heap.pop()\n\n i = 1\n while True:\n left = i * 2\n if len(heap) <= left:\n break\n left_key = heap[left]\n right = i * 2 + 1\n right_key = right < len(heap) and heap[right]\n if right_key and right_key < left_key:\n child_key = right_key\n child = right\n else:\n child_key = left_key\n child = left\n if key <= child_key:\n break\n self.heap[i], self.heap[child] = child_key, key\n i = child\n return popped_key", "def get(self):\n size = self.size()\n if size < 0:\n return None\n res = self.heap[0]\n self.heap[0], self.heap[size - 1] = self.heap[size - 1], self.heap[0]\n self.heap.pop()\n self.sift_down(0)\n # self.sift_down_recursion(0)\n return res", "def pop(self):\n\n def sub_pop():\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heap = heaps[keys]\n pop = heap.pop()\n return pop\n\n try:\n val = sub_pop()\n except IndexError:\n self._remove_key()\n val = sub_pop()\n\n return val", "def pop(self):\n return heapq.heappop(self.array)", "def pop_smallest(self):\n values = [item[0] for item in self.items] #list of the values\n #values = L[:]\n heapq.heapify(values)\n smallest = heapq.heappop(values)#not forgetting heapq.heapify(values)\n #directly writing t = heapq.heappop([4,2,4]) would result in t = 4\n i = self.getItemByValue(smallest)\n self.items.remove(i)\n return i[1]", "def remove_min(self) -> object:\n if self.is_empty():\n raise MinHeapException\n return\n parent_index=0\n parent=self.get_min()\n #parent=5\n #print(parent)\n #print(self)\n self.heap.swap(parent_index,self.heap.length()-1)\n self.heap.pop()\n if self.is_empty():\n return parent\n min_child=self.find_min_child(1,2)\n while min_child!=None:\n if self.heap.get_at_index(min_child)>self.heap.get_at_index(parent_index):\n break\n self.heap.swap(min_child,parent_index)\n parent_index=min_child\n if parent_index==None:\n break\n min_child=self.find_min_child((parent_index * 2)+1,(parent_index * 2) + 2)\n return parent", "def heap_pop(self, value):\n if value is None or self.get_size() == 0:\n return\n\n if self.find(value) is not None:\n # end of list\n position = self.find(value)\n last = self.get_size() - 1\n\n # pop element and percolate down\n self.swap(position, last)\n self.table.pop()\n self.percolate_down(position)\n return", "def remove_min(self) -> object:\n if self.is_empty() == True:\n raise MinHeapException\n\n # minimum value to be returned\n min_val = self.get_min()\n\n # get last index\n end = self.heap.length() - 1\n\n # root index\n root = 0\n\n # swap first and last nodes and remove last value\n self.heap.swap(root, end)\n self.heap.pop()\n\n # length\n length = self.heap.length()\n\n # left index and right index\n left_i = (2 * root) + 1\n right_i = (2 * root) + 2\n\n # if heap has only one value\n if left_i > length - 1:\n return min_val\n\n # if heap has only left child\n if right_i > length - 1:\n if self.heap.get_at_index(left_i) < self.heap.get_at_index(root):\n self.heap.swap(left_i, root)\n return min_val\n else:\n return min_val\n\n # percolate down heap\n while left_i < length and right_i < length:\n replace_val = self.heap.get_at_index(root)\n left_child = self.heap.get_at_index(left_i)\n right_child = self.heap.get_at_index(right_i)\n\n # find index to swap nodes and check that a node exists\n if self.find_replacement(left_i, right_i, left_child, right_child, replace_val):\n node = self.find_replacement(\n left_i, right_i, left_child, right_child, replace_val)\n\n # swap nodes, set new root and child indices\n self.heap.swap(root, node)\n root = node\n left_i = (node * 2) + 1\n right_i = (node * 2) + 2\n\n return min_val", "def heap_push_max(heap, item):\n heap.append(item)\n heapq._siftdown_max(heap, 0, len(heap)-1)", "def pop(self):\n self.data[0], self.data[-1] = self.data[-1], self.data[0]\n result = self.data.pop()\n self.heapify_down(0)\n return result", "def remove_min(self):\r\n try:\r\n if self.is_empty():\r\n raise \"List is Empty\"\r\n \r\n self.swap(0,len(self._data)-1) \r\n element = self._data.pop() # remove the value from list.\r\n self._heapify_after_remove(0) # heapify the list\r\n return element._key, element._value\r\n \r\n except Exception, e:\r\n print \"Error occurred in HeapDistance: remove_min\", e\r\n print traceback.print_exc(e)", "def remove(self):\n # non empty heap: get first element\n if len(self.heap) > 0:\n removed = self.heap[0]\n\n # empty heap: return None\n else:\n return None\n\n # heap with one element: remove it and return\n if len(self.heap) == 1:\n return self.heap.pop()\n\n # put last element on the begining of the heap\n self.heap[0] = self.heap.pop()\n\n # descend new root while needed\n index, leftChild, rightChild = self.getChilds(0)\n while (leftChild < self.size() and \\\n self.heap[index] < self.heap[leftChild]) or \\\n (rightChild < self.size() and \\\n self.heap[index] < self.heap[rightChild]):\n\n # swap smallest child with parent\n if rightChild == len(self.heap) or \\\n self.heap[leftChild] > self.heap[rightChild]:\n\n # swap with left child and set current node as left child\n self.swap(index, leftChild)\n index, leftChild, rightChild = self.getChilds(leftChild)\n\n else:\n # swap with right child and set current node as right child\n self.swap(index, rightChild)\n index, leftChild, rightChild = self.getChilds(rightChild)\n\n # return removed node\n return removed", "def heappop(heap):\n lastelt = heap.pop() # raises appropriate IndexError if heap is empty\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n Heap.siftup(heap, 0)\n return returnitem\n return lastelt", "def pop(self):\r\n try:\r\n key = heapq.heappop(self.heap)\r\n return self.elements[key]\r\n except:\r\n raise StopIteration", "def pop(self):\n if self.stack == [] and self.maxx == []:\n return None\n \n if self.stack[-1] == self.maxx[-1]:\n self.maxx.pop(-1)\n return self.stack.pop(-1)", "def pop(self):\n return heappop(self.priority_queue)[1]", "def pop(self) -> Article:\n return heapq.heappop(self.heap)", "def remove(self):\n # non empty heap: get first element\n if len(self.heap) > 0:\n removed = self.heap[0]\n\n # empty heap: return None\n else:\n return None\n\n # heap with one element: remove it and return\n if len(self.heap) == 1:\n return self.heap.pop()\n\n # put last element on the begining of the heap\n self.heap[0] = self.heap.pop()\n\n # descend new root while needed\n index = 0\n leftChild = (2 * index) + 1\n rightChild = (2 * index) + 2\n while (leftChild < len(self.heap) and \\\n self.heap[index][1] > self.heap[leftChild][1]) or \\\n (rightChild < len(self.heap) and \\\n self.heap[index][1] > self.heap[rightChild][1]):\n\n # swap smallest child with parent\n if rightChild == len(self.heap) or \\\n self.heap[leftChild][1] < self.heap[rightChild][1]:\n\n # swap with left child\n swap = self.heap[index]\n self.heap[index] = self.heap[leftChild]\n self.heap[leftChild] = swap\n\n # update indexes\n index = leftChild\n leftChild = (2 * index) + 1\n rightChild = (2 * index) + 2\n\n else:\n\n # swap with right child\n swap = self.heap[index]\n self.heap[index] = self.heap[rightChild]\n self.heap[rightChild] = swap\n\n # update indexes\n index = rightChild\n leftChild = (2 * index) + 1\n rightChild = (2 * index) + 2\n\n # return removed node\n return removed", "def pop(self):\n priority, key = self.__heap[0]\n self.__swap(0, len(self.__heap) - 1)\n del self.__position[key]\n del self.__heap[-1]\n\n if self:\n self.__bubble_down(0)\n\n return priority, key", "def pop(self) -> tuple:\n item = self.__heap.popleft()\n\n if len(self) > 1:\n self.__heap.appendleft(self.__heap.pop())\n self.__sift_down(0)\n\n return item", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)", "def pop(self) -> int:\n if self.empty():\n raise RuntimeError(\"Queue is empty!\")\n result = self.data[self.head]\n self.data[self.head] = None\n self.head = (self.head + 1) % len(self.data)\n self.size -= 1\n if 0 < self.size < len(self.data) // 4 and len(self.data) > 10:\n self.resize(len(self.data) // 2)\n return result", "def pop(self):\r\n while self.pq:\r\n priority, count, task = heapq.heappop(self.pq)\r\n if task is not self.REMOVED:\r\n del self.entry_finder[task]\r\n return task\r\n raise KeyError('pop from an empty priority queue')", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty')\n self._swap(0, len(self) - 1)\n item = self._data.pop()\n self._down_heap(0)\n return (item._key, item._value)", "def _heapify_after_remove(self,ele):\r\n \r\n if self._chk_left(ele):\r\n left = self._left(ele)\r\n find_small_child = left\r\n # below to find which child has small integer\r\n if self._chk_right(ele):\r\n right = self._right(ele)\r\n if self._data[left] > self._data[right]:\r\n find_small_child = right\r\n \r\n if self._data[find_small_child] < self._data[ele]:\r\n self.swap(ele, find_small_child)\r\n self._heapify_after_remove(find_small_child)", "def max_heapify(heap, i):\n left_child = left(i)\n right_child = right(i)\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n else:\n largest = i\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest != i:\n swap(heap, i, largest)\n max_heapify(heap, largest)", "def pop(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n up_next = self.priority_queue[nextkey][0]\n self.priority_queue[nextkey] = self.priority_queue[nextkey][1:]\n return up_next\n else:\n raise IndexError(\"There's nothing in your queue\")", "def heappop(heap):\n lastelt = heap.pop()\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n _siftup(heap, 0)\n return returnitem\n return lastelt", "def pop(self) -> int:\n last = self.queue.popleft()\n while self.queue:\n self.aux_queue.append(last)\n last = self.queue.popleft()\n self.queue, self.aux_queue = self.aux_queue, self.queue\n return last", "def pop(self) -> T:\n while self.priority_queue:\n _, _, (item,) = heapq.heappop(self.priority_queue)\n if item is not None:\n del self.entry_finder[item] # type: ignore\n return cast(T, item)\n raise KeyError('pop from an empty priority queue')", "def pop(self) -> int:\n cur = None\n if(not self.empty()):\n cur = self.queue[0] \n self.queue = self.queue[1:] \n return cur", "def pop(self):\n result = self.peek()\n self.item_count -= 1\n index = 1\n mem_size = len(self.items)\n while True:\n left = index * 2\n right = left + 1\n if self.is_invalid_index(left) and self.is_invalid_index(right):\n # Neither child exists, so delete this item.\n self.mark_invalid_index(index)\n return result\n elif self.is_invalid_index(right):\n # Right child does not exist, so bubble up from left.\n self.items[index] = self.items[left]\n index = left\n elif self.is_invalid_index(left):\n # Left child does not exist, so bubble up from right.\n self.items[index] = self.items[right]\n index = right\n elif self.is_heap_order(self.items[left], self.items[right]):\n # Left child should be on top, so bubble up from left.\n self.items[index] = self.items[left]\n index = left\n else:\n # Right child should be on top, so bubble up from right.\n self.items[index] = self.items[right]\n index = right", "def min(self):\r\n if self._size == 0: raise KeyError # Nothing to return if heap empty\r\n return self._data[0] # so simple!\r", "def pop(self):\n assert self.__stack\n self.__stack.pop()\n self.__max_values.pop()", "def max_heapify_unrecursive(heap, i):\n while True:\n left_child = left(i)\n right_child = right(i)\n largest = i\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest == i:\n return\n swap(heap, i, largest)\n i = largest", "def remove_min(self):\r\n if self.is_empty():\r\n raise Exception('Priority queue is empty.')\r\n self._swap(0, len(self._data) - 1) # put minimum item at the end\r\n item = self._data.pop() # and remove it from the list;\r\n self._downheap(0) # then fix new root\r\n return (item._key, item._value)", "def deleteMin(self):\n heap = self._heap\n position = self._position\n\n try:\n end = heap.pop(-1)\n except IndexError:\n raise KeyError('pqdict is empty')\n\n if heap:\n node = heap[0]\n # grab last node in PQ to root and sink it down appropriately\n heap[0] = end\n position[end.key] = 0\n self._sink(0)\n else:\n node = end\n del position[node.key] # delete index from position dict\n return node.key, node.value", "def pop(self):\n # if not empty\n if not self.empty():\n # swap min element with last and pop from data\n popped = self._data[0]\n self.swap(0, len(self)-1) # swap elements\n self._data.pop()\n # move swapped node to correct place\n self.percolate_down(0)\n\n return popped.get_value()[1]\n # if empty\n return None", "def test_remove(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n h.remove(2)\n\n self.assertTrue(Heap.is_heap(data), 'should preserve heap property')\n self.assertNotIn(8, h.data, 'the value corresponding to the index was removed')", "def dequeue(self):\n if self._size == 0:\n raise EmptyQueue('dequeue from empty queue')\n priority = self._heap[0].priority\n value = self._heap[0].value\n key = self._heap[0].key\n del self._index[key]\n item = self._heap.pop()\n self._size -= 1\n if self._size == 0:\n return priority, value, key\n self._heap[0] = item\n self._index[item.key] = 0\n self._sift_down(0)\n return priority, value, key", "def pop(self):\n\n self.__max_stack.pop()\n\n return self.__stack.pop()", "def peek(self):\n heaps = self.priorities\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n if len(heap_list) == 0:\n self._remove_key()\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n return heap_list[0]", "def pop(self):\n removed_node = self.top\n self.top = self.top._next\n self._size -= 1\n return removed_node.val", "def extractmin(self):\n if len(self.heap) == 0: \n return None\n i = self.heap[0]\n last = self.heap[-1]\n del self.heap[-1]\n if len(self.heap) > 0:\n self.siftdown(last, 0)\n return i", "def remove(self):\n length = len(self.heap_list)\n if length == 0:\n return False\n elif length == 1:\n self.heap_list.pop()\n return True\n else:\n # exchange last data with top data and remove last data\n self.heap_list[0] = self.heap_list[length-1]\n self.heap_list.pop()\n # adjust max-heap from top to bottom\n self.sift_down(0, len(self.heap_list)-1)\n return True", "def __delete(self, index):\n heap = self.heap\n if index < len(heap) - 1:\n heap[index] = heap[len(heap) - 1]\n heap.pop()\n else:\n heap.pop()\n return\n\n value = heap[index][self.VALUE]\n while True:\n left_child_index = 2 * index + 1 # L child of k is 2k+1\n right_child_index = 2 * index + 2 # R child of k is 2k+2\n child_index = None\n if right_child_index < len(heap):\n right_child_value = heap[right_child_index][self.VALUE]\n left_child_value = heap[left_child_index][self.VALUE]\n child_index = right_child_index if (right_child_value < left_child_value) else left_child_index\n if child_index is None and left_child_index < len(heap):\n child_index = left_child_index\n\n if child_index is None or value <= heap[child_index][self.VALUE]:\n return\n else:\n self.__swap(child_index, index)\n index = child_index", "def pop(self): # 06:30 Lecture Week 2 \"Stacks\" (16:24)\n if self.isEmpty():\n raise Exception(\"Stack underflow\")\n item = self.first.Item # save item to return\n self.first = self.first.Next # delete last Node added\n self.N -= 1\n return item # return the saved item", "def pop(self) -> int:\n return self.q.popleft()", "def pop(self) -> int:\n return self.q.pop(0)", "def pop(self) -> int:\n return self.q.pop(0)", "def peek(self):\n if self.is_empty():\n raise ValueError(\"Heap is empty\")\n return self._heap[0]", "def pop(self):\r\n\r\n if not self.is_empty():\r\n\r\n half_cap = self._capacity // 2\r\n item = self._data[self._size-1]\r\n self._data[self._size-1] = 0\r\n self._size -= 1\r\n\r\n if self._size <= half_cap:\r\n if half_cap != 0:\r\n\r\n self.shrink()\r\n\r\n return item\r\n\r\n else:\r\n pass", "def pop(self) -> int:\n while len(self.push_queue) != 1:\n self.pop_queue.append(self.push_queue.pop(0))\n self.push_queue, self.pop_queue = self.pop_queue, self.push_queue\n return self.pop_queue.pop()", "def extractMin(self):\n if not self.heap:\n raise IndexError(\"there is no root\")\n elif len(self.heap) < 2:\n return self.heap.pop()\n else:\n self.heap[0], oldMin = self.heap.pop(), self.heap[0]\n self._shiftDown()\n return oldMin", "def delete_min(self):\n min_val = self.peek_min()\n self.remove(min_val)\n return min_val", "def pop(self):\n self.queue.insert(len(self.queue), self.queue[0])\n self.queue.remove(self.queue[0])\n return self.queue.pop()", "def pop(self):\n temp = self.high_low.pop(0)\n self.compare_parent(self.high_low.index(self.high_low[-1]))", "def pop(self):\n if self._size > 0:\n elem = self.first.data\n self.first = self.first.next\n self._size = self._size - 1\n return elem\n \n raise IndexError('The queue is empty! ')", "def max_heapify(self, i):\n largest, left_index, right_index = i, 2*i+1, 2*i+2\n current_length = self.heap_size\n\n if (left_index < current_length) and (self.heap[left_index].priority_key > self.heap[largest].priority_key):\n largest = left_index\n\n if (right_index < current_length) and (self.heap[right_index].priority_key > self.heap[largest].priority_key):\n largest = right_index\n\n if largest != i:\n self.heap[largest], self.heap[i] = self.heap[i], self.heap[largest]\n self.max_heapify(largest)\n return self.heap", "def heap_sort_increase(alist):\r\n heap = MaxHeap()\r\n heap.build_heap(alist)\r\n originalSize = heap.size\r\n for i in range(heap.size):\r\n maxVal = heap.items[1]\r\n heap.del_max()\r\n heap.items[originalSize-i] = maxVal\r\n return heap.items[1:originalSize+1]" ]
[ "0.8782797", "0.8520369", "0.834615", "0.80668986", "0.79649794", "0.77679366", "0.77248996", "0.76835424", "0.7677773", "0.7659807", "0.76241404", "0.7556659", "0.75099057", "0.75092083", "0.74387366", "0.74314845", "0.73972696", "0.7393631", "0.7384405", "0.7384216", "0.73763555", "0.7325895", "0.73126805", "0.72823405", "0.72802526", "0.72776085", "0.72686815", "0.7259677", "0.72526115", "0.72477984", "0.724614", "0.72422063", "0.71998525", "0.7142924", "0.71160823", "0.7105997", "0.7104609", "0.7078952", "0.7064806", "0.703853", "0.7037419", "0.70001316", "0.69887304", "0.697507", "0.6968576", "0.6951099", "0.69495803", "0.694784", "0.6944883", "0.6904606", "0.6904388", "0.68854034", "0.6877468", "0.6846866", "0.67763245", "0.67617023", "0.67522526", "0.6745187", "0.67432535", "0.67232925", "0.6722743", "0.66704315", "0.6669114", "0.666412", "0.66481143", "0.662628", "0.66151065", "0.66116077", "0.6605612", "0.65801984", "0.6579938", "0.65687436", "0.65574545", "0.6544397", "0.65393716", "0.6528367", "0.652387", "0.6513865", "0.6512432", "0.64974284", "0.6489155", "0.6472493", "0.64644897", "0.6458237", "0.6453958", "0.6449806", "0.6448962", "0.6442281", "0.6436379", "0.6436379", "0.6423633", "0.64204973", "0.6407837", "0.64065254", "0.63916975", "0.63890624", "0.63649964", "0.63609225", "0.6355614", "0.6353705" ]
0.6685935
61
Transcodes a file src to a file dest.
def transcode(self, src: Path, dest: Path) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copyFile( src, dest ):\n\tinFile = open( src, 'r' )\n\toutFile = open( dest, 'w' )\n\tfor line in inFile:\n\t\toutFile.write( line )\n\toutFile.close()\n\tinFile.close()", "def compressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.compress(data, 1)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)", "def case_convert_file_to_file(source_path: str, dest_path: str, style: CaseStyleEnum) -> None:\n with open(source_path, 'r') as f:\n contents = f.read()\n new_contents = case_convert_stream(contents, style)\n with open(dest_path, 'w') as f:\n f.write(new_contents)", "def _copy_file ( self, source, dest ):\n return", "def unify(src, dst):\n\n # NOTE: at this point it is assumed files are unifiable\n\n # get a temp file name\n dir = os.path.split(src)[0]\n tmp_handle, tmp_path = tempfile.mkstemp(dir=dir)\n os.close(tmp_handle)\n\n # rename the destination, in case we need to back out\n os.rename(dst, tmp_path)\n\n # link source to destination\n try:\n os.link(src, dst)\n except:\n # back out\n print 'Could not link %s -> %s, backing out' % (src, dst)\n try:\n if os.path.exists(dst):\n os.unlink(dst)\n os.rename(tmp_path, dst)\n except:\n print 'Could not back out!!! the destination file is still there as', tmp_file\n raise exceptions.OSError\n\n # done, remove the temp file\n os.unlink(tmp_path)", "def compress(self, sourcefile, destinationfile):\n with open(sourcefile, 'rb') as src_file, open(destinationfile,\n 'wb') as dest_file: # Öffne die Quell- und Zieldatei\n dest_file.write(bytes(\"rl3\", 'utf-8')) # Schreibe rl3 in die neue Datei zur Algorythmuserkennung\n extension_orig = bytes(os.path.splitext(sourcefile)[1][1:], 'utf-8') # Splitte die Dateiendung\n dest_file.write(len(extension_orig).to_bytes(1, 'big')) # Schreibe die Länge der Dateiendung\n dest_file.write(extension_orig) # Schreibe die Dateiendung\n counter = 1 # Setze den Wiederhohlungszähler auf 1\n last_byte = None # Erstelle die leere Variable mit dem letzten Byte\n chunk = src_file.read(self.chunk_size) # Liest Bytes aus\n while chunk: # Solange Bytes existieren\n for byte in chunk: # Für jedes Bytes\n if last_byte is not None and last_byte == byte and counter < self.MAXBYTES: # Wenn das letzte Byte gleich dem neuen Byts ist und die Anzahl nicht überschritten worden ist\n counter += 1 # Erhöhe den Zähler\n else: # Sonst\n if last_byte is not None: # Wenn das letzte Byte existiert\n if counter > (self.MAXBYTES - 255): # Wenn es sich lohnt zu komprimieren\n dest_file.write(ord(self.MARKER).to_bytes(1, 'big')) # Schreibe das Markierungszeichen\n dest_file.write((counter - (self.MAXBYTES - 255)).to_bytes(1,\n 'big')) # Schreibe die Anzahl der Wiederhohlungen des Zeichen\n dest_file.write(last_byte.to_bytes(1, 'big')) # Schreibe das Zeichen\n else: # Sonst\n for i in range(counter): # Für die Anzahl der zeichen\n dest_file.write(last_byte.to_bytes(1, 'big')) # Schreibe das Zeichen\n if last_byte == ord(\n self.MARKER): # Wenn das Zeichen gleich dem Markierungzeichen ist\n dest_file.write(b'\\x00') # Schreibe 0 dahinter\n counter = 1 # Setze den Zähler auf 1 zurück\n last_byte = byte # Merke das aktuelle Byte für den Vergleich\n chunk = src_file.read(self.chunk_size) # Lese die neuen Bytes aus\n if counter > (self.MAXBYTES - 255): # Wenn es sich lohnt zu komprimieren\n dest_file.write(ord(self.MARKER).to_bytes(1, 'big')) # Schreibe das Markierungszeichen\n dest_file.write((counter - (self.MAXBYTES - 255)).to_bytes(1,\n 'big')) # Schreibe die Anzahl der Wiederhohlungen des Zeichen\n dest_file.write(last_byte.to_bytes(1, 'big')) # Schreibe das Zeichen\n else: # Sonst\n for i in range(counter): # Für die Anzahl der zeichen\n dest_file.write(last_byte.to_bytes(1, 'big')) # Schreibe das Zeichen\n if last_byte == ord(self.MARKER): # Wenn das Zeichen gleich dem Markierungzeichen ist\n dest_file.write(b'\\x00') # Schreibe 0 dahinter", "def decompressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.decompress(data)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)", "def copy_file(src, dest):\n with open_local_or_gcs(src, 'r') as h_src:\n with open_local_or_gcs(dest, 'w') as h_dest:\n shutil.copyfileobj(h_src, h_dest)", "def copy_file(file, destination):\n with open(file, 'rb') as infile, open(destination, 'wb') as outfile:\n outfile.write(infile.read())", "def convert(src, dst):\n with open(dst, 'w', encoding = 'utf-8') as myFile:\n records = read(src)\n for tag in sorted(records.keys()):\n myFile.write('%s %s\\n' %(tag, records[tag]))", "def handle_file(self, source_path, dest_path):\n raise NotImplemented", "def compress(src,dstfile):\n\tafile = zipfile.ZipFile(dstfile,\"w\",zipfile.ZIP_DEFLATED)\n\tfor root,dirs,files in os.walk(src):\n\t\tfor filename in files:\n\t\t\tabspath = osp.join(root,filename)\n\t\t\trelpath = osp.relpath(abspath,src)\n\t\t\tafile.write(abspath, relpath)\n\tafile.close();", "def _copy_file(src, dest):\n\n if src is None or dest is None:\n raise ValueError(\"src and dest must not be None\", src, dest)\n\n if not os.path.isfile(src):\n raise ValueError(\"src file does not appear to exist\", src)\n\n # if error on copy, subprocess will raise CalledProcessError\n try:\n subprocess.run(\n [\"/usr/bin/ditto\", src, dest], check=True, stderr=subprocess.PIPE\n )\n except subprocess.CalledProcessError as e:\n logging.critical(\n f\"ditto returned error: {e.returncode} {e.stderr.decode(sys.getfilesystemencoding()).rstrip()}\"\n )\n raise e", "def copy_file(fromf,tof, fromapp, toapp):\n f2w=open(tof,\"w\")\n with open(fromf) as f:\n for line in f:\n newline=line.replace(fromapp,toapp)\n f2w.write(newline.replace(fromapp.upper(),toapp.upper()))\n f2w.close()", "def copystat(src, dest):\n import shutil\n\n shutil.copystat(str(src), str(dest))", "def convert_tmpfile(src_file_name:str, dest_path:str):\n src_path = os.path.join(\n current_app.config['UPLOAD_FOLDER'],\n src_file_name\n )\n if not os.path.exists(src_path):\n abort(http.HTTPStatus.BAD_REQUEST, message='raw file not exist')\n pathlib.Path(os.path.dirname(dest_path)).mkdir(parents=True, exist_ok=True)\n shutil.move(src_path, dest_path)", "def process_file(src_file, dest_file):\n # read data\n with open(src_file) as fil:\n new_data = fil.read()\n # generate a chain of templates\n parent_template = None\n current_template = dest_file\n cursor = 1\n if EXTEND_FLAG in new_data:\n new_data = new_data.replace(EXTEND_FLAG, \"\")\n while exists(current_template):\n parent_template = current_template\n current_template = \"%s%s%d\" % (dest_file, CHILD_TPL_FLAG, cursor)\n cursor += 1\n # write data\n with open(current_template, \"w\") as fil:\n if parent_template:\n # in the chain of templates each has to extend one another\n new_data = \"\\n\".join([\n \"{%% extends \\\"%s\\\" %%}\" % parent_template,\n new_data\n ])\n fil.write(new_data)", "def copyFile(srcPath, destPath):\n shutil.copy(srcPath, destPath)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def process_file(cmap, source, destination):\n line = source.readline()\n while line:\n destination.write(process_line(cmap, line))\n line = source.readline()\n\n source.close()\n destination.close()", "def copy_file(source_file_name, dest_file_name):\n print(\"Copying \" + source_file_name + \" to \" + dest_file_name)\n shutil.copy2(source_file_name, dest_file_name)\n print(\"Copying done.\")", "def compress_stream(src, dst):\n with gzip.GzipFile(fileobj=dst, mode='wb') as gz:\n for block in iterfile(src):\n gz.write(block)", "def decompressFile(infile, outfile):\n decoder = Decoder(infile)\n for data in decoder.bytes():\n outfile.write(data)", "def copyFile(src_dir, dst_dir, f_name):\n\n try:\n src_file = open(osp.join(src_dir, f_name),\"rb\")\n dst_file = open(osp.join(dst_dir, f_name),\"wb\")\n dst_file.write(src_file.read())\n dst_file.close()\n src_file.close()\n except Exception, e:\n msg = \"!!! In copying files from < %s > dir to < %s > dir exception occur. Details: %s.\" % (src_dir,dst_dir, str(e))\n print >> import_out, msg\n LOG('performImportToPortal',INFO,'copyFile', msg)", "def convert_for_submission(source_dir, target_dir):\r\n files = subfiles(source_dir, suffix=\".nii.gz\", join=False)\r\n maybe_mkdir_p(target_dir)\r\n for f in files:\r\n img = sitk.ReadImage(join(source_dir, f))\r\n out_file = join(target_dir, f[:-7] + \".nii\")\r\n sitk.WriteImage(img, out_file)", "def pythonify(file_name, src_dir, dst_dir):\n src_dir = src_dir + [file_name]\n dst_dir = dst_dir + [file_name + '.py']\n src = os.path.join(template_path, *src_dir)\n dst = os.path.join(template_path, *dst_dir)\n shutil.move(src, dst)", "def copyFile(src, dest):\n try:\n shutil.copy(src,dest)\n except shutil.Error as e:\n print(\"Error: \" + str(e))\n except IOError as e:\n print(\"Error: \" + e.strerror)", "def copy_file(fs, inpath, outpath):\n fs.copy(inpath, outpath)", "def put_file(self, src_fname, dst_fname):\n dst_fname = os.path.normpath(dst_fname)\n self.mkdirs(os.path.dirname(dst_fname))\n self._put(src_fname, dst_fname)", "def copy_model_file(src_dir, dst_dir, file_name, file_ext=None):\n file_ext = 'osm' if file_ext is None else file_ext\n\n src_file = src_dir.joinpath('in.{}'.format(file_ext))\n dst_file = dst_dir.joinpath('{}.{}'.format(file_name, file_ext))\n try:\n shutil.copyfile(src_file, dst_file)\n except FileNotFoundError:\n print('''in.{} doesn't exist.'''.format(file_ext))", "def dst_to_src(self,dst_file):\n rel_path=os.path.relpath(dst_file,start=self.dst_root)\n if (rel_path == '.'):\n rel_path=''\n else:\n rel_path= '/'+rel_path\n if (os.sep != '/'):\n # if directoty path sep isn't / then translate for URI \n rel_path=rel_path.replace(os.sep,'/')\n return(self.src_root+rel_path)", "def transcode(filename, enc_data):\n base = os.path.splitext(filename)[0]\n exe = g.muxapp if g.transcoder_path == \"auto\" else g.transcoder_path\n\n # ensure valid executable\n if not exe or not os.path.exists(exe) or not os.access(exe, os.X_OK):\n xprint(\"Encoding failed. Couldn't find a valid encoder :(\\n\")\n time.sleep(2)\n return filename\n\n command = shlex.split(enc_data['command'])\n newcom, outfn = command[::], \"\"\n\n for n, d in enumerate(command):\n\n if d == \"ENCODER_PATH\":\n newcom[n] = exe\n\n elif d == \"IN\":\n newcom[n] = filename\n\n elif d == \"OUT\":\n newcom[n] = outfn = base\n\n elif d == \"OUT.EXT\":\n newcom[n] = outfn = base + \".\" + enc_data['ext']\n\n returncode = subprocess.call(newcom)\n\n if returncode == 0 and g.delete_orig:\n os.unlink(filename)\n\n return outfn", "def copy_file(file: str, dest: str) -> None:\n\tuux.show_debug(\"Copying \" + str(file) + \" => \" + str(dest))\n\tshutil.copy2(file, dest)", "def copy_to_local(src_file):\r\n if not_exists(src_file, \"Source File\"):\r\n return 1, 0\r\n _local_file = os.path.basename(src_file)\r\n if wrap_cp_file(src_file, _local_file):\r\n return 1, 0\r\n return 0, _local_file", "def copy_to_local(src_file):\r\n if not_exists(src_file, \"Source File\"):\r\n return 1, 0\r\n _local_file = os.path.basename(src_file)\r\n if wrap_cp_file(src_file, _local_file):\r\n return 1, 0\r\n return 0, _local_file", "def copy(self, src_path: str, tgt_path: str) -> None:", "def process(self, source, dest):\n\n if os.path.isfile(dest):\n print(\"File %s exists -> aborting\" % dest)\n exit(1)\n print(dest)\n \n fin = open(source)\n fout = open(dest, 'w')\n for l in fin.readlines():\n l = l.replace(\"AUTHOR\", self.author)\n l = l.replace(\"DESCRIPTION\", self.description)\n l = l.replace(\"NAMESPACE\", self.namespace)\n l = l.replace(\"MyComponent\", self.className)\n l = l.replace(\"INCDIR\", self.hDir)\n l = l.replace(\"CXXDIR\", self.cxxDir)\n l = l.replace(\"YEAR\", str(self.now.year))\n l = l.replace(\"DATE\", \"%d %s %d\" % (self.now.day, self.now.strftime(\"%b\"), self.now.year))\n fout.write(l)\n fout.close()\n fin.close()", "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def _extract_gz(src, dst):\n assert src.endswith(\".gz\"), \"{} is not a valid .gz file.\".format(src)\n assert os.path.exists(src), \"{} does not exist.\".format(src)\n\n with gzip.open(src, \"rb\") as f_src:\n # xxx.postfix.gz --> xxx.postfix\n file_name = os.path.basename(src)[:-3]\n with open(os.path.join(dst, file_name), \"wb\") as f_dst:\n shutil.copyfileobj(f_src, f_dst)", "def minify(src, dst, exclude_files=None, file_processors=None):\n\n LOGGER.info(\"copying files in <%s> to <%s>\", src, dst)\n\n file_processors = DEFAULT_PROCESSORS if file_processors is None else file_processors\n prefix = os.path.join(src, \"\")\n\n for src_path in _walk_files(src, exclude_files):\n assert src_path.startswith(prefix)\n\n dst_path = os.path.join(dst, src_path[len(prefix) :])\n dst_dir, dst_file = os.path.split(dst_path)\n os.makedirs(dst_dir, exist_ok=True)\n\n _, ext = os.path.splitext(dst_file)\n ext = ext[1:].lower() if ext else None\n processor = file_processors.get(ext, copyfileobj)\n\n LOGGER.debug(\n \"copying file <%s> to <%s> using processor %r\",\n src_path,\n dst_path,\n processor,\n )\n\n with open(src_path, \"rb\") as fsrc, open(dst_path, \"wb\") as fdst:\n processor(fsrc, fdst)", "def copy_file(src_file,dst_folder):\n from shutil import copyfile\n from os.path import split\n copyfile(src_file, dst_folder+split(src_file)[1])\n return", "def write_c_source(self, dst):\n wfd = open(dst, \"wt\")\n wfd.write(self.generate_c_source())\n wfd.close()", "def WriteArchiveSourceMappingFile(compiled_archive_file_path,\n output_archive_source_mapping_file,\n translated_source_files,\n objc_file_path,\n file_open=open):\n with file_open(output_archive_source_mapping_file, 'w') as f:\n for translated_source_file in translated_source_files.split(','):\n file_path = os.path.relpath(translated_source_file, objc_file_path)\n f.write(compiled_archive_file_path + ':' + file_path + '\\n')", "def cp(src, dest):\n _shutil.copy2(native(src), native(dest))", "def moveFile(src, dest, bak=\"bak\"):\n\t\n\tmessage = \"processing: {0} -> {1}\".format(src, dest)\n\tlogger.info(message)\n\n\t#compare the source and destination, if the files are the same do nothing\n\tif os.path.exists(src) and os.path.exists(dest): \n\t\tmessage = \"file {0} found, comparing to {1}\".format(src, dest)\n\t\tlogger.info(message)\n\t\t(fileCheck, fileSig) = verifyFile(src, dest)\n\t\tif fileCheck:\n\t\t\tmessage = \"source file {0} matches destination file {1}\".format(src, dest)\n\t\t\tlogger.info(message)\n\t\t\treturn True\n\t\t\n\t#checks to see if the destination file exists, then creates a backup of it\n\tif os.path.exists(dest):\n\t\tbackupFileName = \"{0}.{1}\".format(dest, bak)\n\t\tmessage = \"file {0} exists, creating backup: {1}\".format(dest, backupFileName)\n\t\tlogger.info(message)\n\t\ttry:\n\t\t\tshutil.move(dest, backupFileName)\n\t\texcept IOError as errorMessage:\n\t\t\tlogger.error(errorMessage)\n\t\t\treturn False\n\t\t\n\t#attempts to copy the source file to the destination, \n\tif os.path.exists(src):\n\t\tmessage = \"copying {0} to {1})\".format(src, dest)\n\t\ttry:\n\t\t\tshutil.copy(src, dest)\n\t\texcept IOError as errorMessage:\n\t\t\tlogger.error(errorMessage)\n\t\t\tshutil.move(backupFilenName, dest)\n\t\t\treturn False\n\t\t\n\t#verify that files are the same\n\t(fileCheck, fileSig) = verifyFile(src, dest)\n\tif fileCheck:\n\t\tmessage = \"File transfer verified {0} -> {1}\".format(src, dest)\n\t\tlogger.info(message)\n\t\tmessage = \"File Signature for {0}: {1}\".format(src, fileSig)\n\t\tlogger.info(message)\n\t\treturn True\n\telse:\n\t\tmessage = \"file signatures do not match, rolling back {0} -> {1}\".format(backupFileName, dest)\n\t\tlogger.error(message)\n\t\n\t#roll back file\n\ttry:\n\t\tshutil.move(backupFileName, dest)\n\texcept IOError as errorMessage:\n\t\tlogger.error(errorMessage)\n\t\treturn False\n\t\n\treturn True", "def put(self, src, dst):\r\n abs_src = os.path.expanduser(src)\r\n assert os.path.exists(abs_src), 'File does not exist, cannot copy: %s' % abs_src\r\n return self._do_put(abs_src, dst)", "def Compress(input_filename, output_filename):\n _Write(zlib.compress(_Read(input_filename)), output_filename)", "def copy(self, fname):\n _, ext = osp.splitext(fname)\n spath = osp.join(self.src, fname)\n oname = fname\n path = osp.join(self.dst, oname)\n os.makedirs(osp.dirname(path), exist_ok=True)\n if ext in [\".css\"]:\n content = self.include(fname)\n with open(path, \"wt\") as fp:\n fp.write(content)\n else:\n shutil.copyfile(spath, path)\n return osp.relpath(oname, self.root)", "def convert(fname_src, verbose=False):\n if not os.path.isfile(fname_src):\n raise IOError('File not found: %s' % fname_src)\n\n # File names.\n b, e = os.path.splitext(fname_src)\n fname_dst = b + '.m4a'\n\n # Build command.\n cmd = 'ffmpeg -y -i \"%s\" \"%s\"' % (fname_src, fname_dst)\n\n t0 = time.time()\n std_out, std_err = run_cmd(cmd)\n dt = time.time() - t0\n\n if dt < 0.01:\n raise Exception('Problem processing file: %s %s %s %s' % (fname_src, std_out, std_err, cmd))\n\n if std_out.lower().find('error') >= 0:\n raise Exception('Problem processing file: %s %s %s %s' % (fname_src, std_out, std_err, cmd))\n\n # Done.\n return fname_dst", "def __copyfile2(source, destination):\n logger.info(\"copyfile2: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.copy2(source, destination)\n return True\n except Exception as e:\n logger.error(\n \"copyfile2: %s -> %s failed! Error: %s\", source, destination, e\n )\n return False", "def convert_files(enumerated_src_file):\n i, src_file = enumerated_src_file\n src_file = src_file.strip()\n file_extension, acodec, quality = audio_codec()\n\n dst_file = '.'.join(src_file.split('.')[:-1]) + file_extension\n sys.stdout.write(str(i + 1) + ': ' + src_file + ' -> ' + dst_file + '\\n')\n subprocess.call(['ffmpeg', '-i', src_file, '-vn', '-acodec',\n acodec, '-aq', quality, dst_file, '-loglevel', 'quiet'])\n return src_file", "def convert_translations(self, dest_dir):\n if not os.path.isdir(dest_dir):\n os.makedirs(dest_dir)\n total_translation_rows = 0\n with open(os.path.join(dest_dir, 'translations.txt'),\n 'w+b') as out_file:\n writer = csv.DictWriter(\n out_file, fieldnames=NEW_TRANSLATIONS_FIELDS)\n writer.writeheader()\n for filename in sorted(os.listdir(self.src_dir)):\n if not (filename.endswith('.txt') and\n os.path.isfile(os.path.join(self.src_dir, filename))):\n print('Skipping %s' % filename)\n continue\n table_name = filename[:-len('.txt')]\n if table_name == 'translations':\n continue\n total_translation_rows += self._translate_table(\n dest_dir, table_name, writer)\n print('Total translation rows: %s' % total_translation_rows)", "def transfer_files(src: str, dst: str, move_src_data: bool = False):\n if move_src_data:\n logger.info('Move {0} to {1}'.format(src, dst))\n shutil.move(src, dst)\n else:\n logger.info('Copy {0} to {1}'.format(src, dst))\n copy_tree(src, dst)", "def copyfile(source, dest, buffer_size=1024*1024):\n if not hasattr(source, 'read'):\n source = open(source, 'rb')\n if not hasattr(dest, 'write'):\n dest = open(dest, 'wb')\n while 1:\n copy_buffer = source.read(buffer_size)\n if copy_buffer:\n dest.write(copy_buffer)\n else:\n break\n source.close()\n dest.close()\n return True", "def replaceFileAtomic(source_path, dest_path):\n\n if python_version >= 0x300:\n os.replace(source_path, dest_path)\n else:\n importFromInlineCopy(\"atomicwrites\", must_exist=True).replace_atomic(\n source_path, dest_path\n )", "def decompress_stream(src, dst):\n with gzip.GzipFile(fileobj=src, mode='rb') as gz:\n for block in iterfile(gz):\n dst.write(block)", "def putFile(self, _src, _dst, delExisting = True):\n\n #-------------------- \n # Delete existing _dst from XNAT host.\n #-------------------- \n if delExisting:\n r = self.__httpsRequest('DELETE', _dst)\n #print(\"%s Uploading\\nsrc: '%s'\\n_dst: '%s'\"%(_src, _dst))\n\n\n\n #-------------------- \n # Clean '_dst' string and endcode\n #-------------------- \n _dst = Xnat.path.makeXnatUrl(self.host, _dst)\n _dst = str(_dst).encode('ascii', 'ignore')\n\n\n\n #-------------------- \n # Put the file in XNAT using the internal '__httpsRequest'\n # method.\n #-------------------- \n with open(_src, 'rb') as f:\n response = self.__httpsRequest('PUT', _dst, files={'file': f}, \n headers={'Content-Type': 'application/octet-stream'}, stream=True)\n\n return response", "def _encrypt(self, src_filepath, dest_filepath):\r\n self.log.info(\"Encrypting file {0} to {1}.\".format(src_filepath, dest_filepath))\r\n\r\n gpg = gnupg.GPG(options=self.gpg_options)\r\n key_data = open(self.key_file, mode='rb').read()\r\n import_result = gpg.import_keys(key_data)\r\n self.log.info(\"Key import results: {0}\".format(import_result.results))\r\n\r\n with open(src_filepath, 'rb') as f:\r\n status = gpg.encrypt_file(f,\r\n passphrase=self._passphrase,\r\n output=dest_filepath,\r\n recipients=self.recipients)\r\n self.log.info(\"ok: {0}, status:{1}, stderr: {2}\".format(status.ok, status.status, status.stderr))\r\n\r\n if status.ok and self.remove_unencrypted:\r\n os.remove(src_filepath)\r\n\r\n if not status.ok:\r\n raise AirflowException(\"Failed to encrypt file {0}: {1}\"\r\n .format(src_filepath, status.stderr))\r\n\r\n self.log.info(\"Completed file encryption.\")", "def compile(path_to_src, path_to_dest, connections, tunables, file_type=None):\n\n # if not provided a file type, infer from file extension\n if file_type == None:\n file_type = path_to_src.split(\".\")[-1]\n\n assert file_type in tokenizers\n tokenizer = tokenizers[file_type]\n\n graph = build_graph(connections)\n\n with open(path_to_src, 'r') as file:\n src = file.read()\n\n tokens = tokenizer.tokenize(src)\n\n tokens = first_pass(tokens, graph)\n\n #tokens = second_pass(tokens, gates)\n\n #tokens = third_pass(tokens, gates)\n\n compiled = tokenizer.detokenize(tokens)\n\n with open(path_to_dest, 'w') as file:\n file.write(compiled)", "def _archiveData(self, src, dest):\n \troot = os.getcwd()\n \tsrcPath = join(root,src)\n \tdestPath = join(root,dest)\n \tf = [] #Array with list of files to copy\n \ts = [] #Array with list of files successfully copied\n \tfor (dirpath, dirnames, filenames) in walk(srcPath):\n \t\tf.extend(filenames)\n \t\tif len(f) > 0:\n \t\t\tfor i in f:\n \t\t\t\tif str(i) != 'archiving_log.txt' and str(i) != 'archiving_log.txt~' and str(i) != 'archivingScript.py':\n \t\t\t\t\ttry:\n \t\t\t\t\t\tbuffer_size = int(20000)\n \t\t\t\t\t\tfileSrcPath = join(dirpath, i)\n \t\t\t\t\t\tfileDestPath = join(destPath, i)\n \t\t\t\t\t\twith open(fileSrcPath, 'rb') as fsrc:\n \t\t\t\t\t\t\twith open(fileDestPath, 'wb') as fdest:\n \t\t\t\t\t\t\t\tcopy = shutil.copyfileobj(fsrc,fdest,buffer_size)\n \t\t\t\t\t\t\t\tcopy\n \t\t\t\t\t\t\t\tself._backupLog('Copy Operation File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\t'+ 'Path: '+ str(srcPath)+'\\n')\n \t\t\t\t\t\t\t\ts.append(i)\n \t\t\t\t\texcept shutil.Error as e:\n \t\t\t\t\t\tself._backupLog('Error: %s' % e + '\\t' + 'File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n \t\t\t\t\texcept IOError as e:\n \t\t\t\t\t\tself._backupLog('Error: %s' % e.strerror + '\\t' + 'File: '+str(i)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n \tif len(s) >0:\n for (dirpath,dirnames,filenames) in walk(srcPath):\n for cfile in f:\n for sfile in s:\n if cfile == sfile:\n try:\n filetoDelete = join(srcPath, cfile)\n os.remove(filetoDelete)\n self._backupLog('Delete Operation File: '+str(cfile)+ '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')\n except OSError, e:\n self._backupLog('Error deleting file: %s - %s.' % (e.filename, e.strerror) + '\\t' + 'Time: '+ str(time.strftime(\"%H:%M:%S\")) + '\\n')", "def _uncompress(fname, outdir, msg=msg):\n import os\n assert os.access(fname, os.R_OK), \"could not access [%s]\" % fname\n fname = os.path.abspath(os.path.realpath(fname))\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n orig_dir = os.getcwd()\n try:\n os.chdir(outdir)\n ext = os.path.splitext(fname)[1][1:] # drop the dot\n if ext in ('gz', 'bz2'):\n import tarfile\n f = tarfile.open(fname, 'r:%s'%ext)\n f.extractall()\n else:\n err = 'extension [%s] not handled (yet?)' % ext\n msg.error(err)\n raise ValueError(err)\n finally:\n os.chdir(orig_dir)", "def _move(self, in_file, dest):\n dest = os.path.abspath(dest)\n _, in_base_name = os.path.split(in_file)\n dest_parent_dir, _ = os.path.split(dest)\n if os.path.exists(dest):\n out_file = os.path.join(dest, in_base_name)\n else:\n if not os.path.exists(dest_parent_dir):\n os.makedirs(dest_parent_dir)\n out_file = dest\n shutil.move(in_file, dest)\n\n return out_file", "def copy_files(orig_dir, dest_dir): \n files = [file for file in os.listdir(orig_dir)]\n \n for file in files:\n origin = os.path.join(orig_dir,file)\n destination = os.path.join(dest_dir,file)\n shutil.copyfile(origin,destination)", "def translate_files(input_file, output_file, translate_dict, delete_symbols):\n\n for line in input_file:\n result = translate(line, translate_dict, delete_symbols)\n output_file.write(result)", "def cp_to_file(fn0, fn):\n\n # keep rewriting attributes\n shutil.copyfile(fn0, fn)", "def process_files(inpath=os.path.join(os.curdir, \"data/raw\"), outpath=os.path.join(os.curdir, \"data/processed\")):\n filenames = [f for f in os.listdir(inpath) if fnmatch.fnmatch(f, '*.txt')]\n print \"fixing ascii encoding...\"\n for f in filenames:\n print f\n infile = os.path.join(inpath, f)\n outname = os.path.join(outpath, f)\n with open(outname, 'w') as outfile:\n text = open(infile).read()\n text = fix_ascii(text)\n outfile.write(text)", "def moveFile(source, dest):\n try:\n shutil.move(source, dest) \n except IOError as e:\n print (\"Unable to move file. %s\" %(e))", "def compress_file(in_file: str, out_file: str) -> None:\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = build_frequency_dict(text)\n tree = build_huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (tree.num_nodes_to_bytes() + tree_to_bytes(tree) +\n int32_to_bytes(len(text)))\n result += compress_bytes(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "def convert_file_to_mp4(from_: Path,\n to_: Path = None) -> None:\n if from_.suffix == '.mp4':\n os.rename(from_, to_)\n logger.info(f\"{from_} is even a mp4 file, move it to destination\")\n return\n\n if to_ is not None and to_.suffix != '.mp4':\n logger.error(f\"Destination file must have .mp4 extension, \"\n f\"but '{to_.suffix}' found in '{to_}'\")\n return\n\n to_ = to_ or change_suffix_to_mp4(from_)\n try:\n convert(from_, to_)\n except Exception:\n pass\n else:\n # move processed video\n os.rename(from_, CONVERTED_VIDEOS_FOLDER / from_)\n logger.debug(\n f\"Converted successfully, source file {short_filename(from_, 8)} \"\n f\"moved to {CONVERTED_VIDEOS_FOLDER}\"\n )", "def _copy_file(source_path, destination_path, compress_imagery=True, hard_link=False):\n\n source_file = str(source_path)\n destination_file = str(destination_path)\n\n # Copy to destination path.\n original_suffix = source_path.suffix.lower()\n suffix = destination_path.suffix.lower()\n\n output_paths = [destination_path]\n\n if destination_path.exists():\n _LOG.info('Destination exists: %r', destination_file)\n elif (original_suffix == suffix) and hard_link:\n _LOG.info('Hard linking %r -> %r', source_file, destination_file)\n os.link(source_file, destination_file)\n # If a tif image, compress it losslessly.\n elif suffix == '.tif' and compress_imagery:\n _LOG.info('Copying compressed %r -> %r', source_file, destination_file)\n check_call(\n [\n 'gdal_translate',\n '--config', 'GDAL_CACHEMAX', '512',\n '--config', 'TILED', 'YES',\n '-co', 'COMPRESS=lzw',\n '-co', 'predictor=2',\n source_file, destination_file\n ]\n )\n # If gdal output an IMD file, include it in the outputs.\n imd_file = destination_path.parent.joinpath('{}.IMD'.format(destination_path.stem))\n if imd_file.exists():\n output_paths.append(imd_file)\n else:\n _LOG.info('Copying %r -> %r', source_file, destination_file)\n shutil.copyfile(source_file, destination_file)\n\n return output_paths", "def link_source_into_tmp(infile, tmpdir, frag_base,\n suffix='.in'):\n logging.debug(\"Linking input input: %r\" % ({'infile':infile,\n 'tmpDir':tmpdir,\n 'base':frag_base}))\n tmp_file_name=getFragmentPath(tmpdir, frag_base, 1, suffix)\n os.link(infile, tmp_file_name)\n return 1", "def expand(self, sourcefile):\n with open(sourcefile, 'rb') as src_file: # Öffne die zu expandierende Datei\n if src_file.read(3) == b'rl3': # Wenn sie eine RL3 Datei ist\n extension_counter = src_file.read(1) # Lese die Anzahl der Bytes der Endung aus\n extension_orig = src_file.read(\n int.from_bytes(extension_counter, 'big')) # Lese die Endung auf Basis der Anzahl aus\n outputfile = os.path.splitext(sourcefile)[0] # Splitte den Dateinamen vom Pfad\n if os.path.isfile(\n outputfile + \".\" + extension_orig.decode(\"utf-8\")): # Überprüfe ob die Datei existiert\n number = 1 # Setz Dateinummer auf eins\n while os.path.isfile(outputfile + str(number) + \".\" + extension_orig.decode(\n \"utf-8\")): # Wiederhohle solange bis die Datei nicht existiert\n number += 1 # Erhöhe die Dateinummer\n outputfile += str(number) # Füge dem Dateiname die Nummer hinzu\n outputfile += \".\" + extension_orig.decode(\"utf-8\") # Füge dem Dateinamen die Endung hinzu\n with open(outputfile, 'wb') as dest_file: # Öffne die Zieldatei\n chunk = src_file.read(self.chunk_size) # Lese die Bytes aus\n counter = False # Aktuelles Byte ist keine Zähler\n value = False # Aktuelles Byte ist nicht der Wert\n count = 0 # Null Wiederhohlungen vom Wert\n while chunk: # Solange Bytes da sind\n for byte in chunk: # Gehe durch jedes Byte\n if byte == ord(\n self.MARKER) and not counter and not value: # Wenn das Byte ein Markierungszeichen ist und Zähler und Wert nicht aktiv sind\n counter = True # Aktiviere den Zähler\n elif counter: # Wenn der Zähler aktiv ist\n if byte == 0: # Wenn das aktuelle Byte null ist\n dest_file.write(ord(self.MARKER).to_bytes(1, 'big')) # Schreibe den Marker\n counter = False # Desktiviere den Zähler\n else: # Sonst\n count = byte # Setze die Anzahl auf den Wert des Bytes\n counter = False # Deaktiviere den Zähler\n value = True # Aktiviere den Wert\n elif value: # Wenn der Wert aktiv ist\n for i in range(count + (self.MAXBYTES - 255)): # Für die Aazahl im Zähler\n dest_file.write(byte.to_bytes(1, 'big')) # Schreibe die Bytes\n value = False # Deaktiviere den Wert\n else: # Sonst\n dest_file.write(byte.to_bytes(1, 'big')) # Schreibe das Byte\n chunk = src_file.read(self.chunk_size) # Lese neue Bytes ein\n if counter: # Wenn der Zähler aktiv ist\n dest_file.write(ord(self.MARKER).to_bytes(1, 'big')) # Schreibe den Marker\n else: # Sonst\n raise RLedError # Werfe den RLedError", "def update_file(dst, src, language, mutator):\n\n # if the source and destination are the same, we're updating in place\n inplace = dst == src\n\n if isinstance(src, str):\n # if a filename was provided, open the file\n if inplace:\n mode = \"r+\"\n else:\n mode = \"r\"\n src = open(src, mode)\n\n orig_lines = []\n\n # grab all of the lines of the file and strip them of their line ending\n old_lines = list(line.rstrip(\"\\r\\n\") for line in src)\n new_lines = list(mutator(old_lines, src.name, language))\n\n for line in src:\n line = line\n\n if inplace:\n # if we're updating in place and the file hasn't changed, do nothing\n if old_lines == new_lines:\n return\n\n # otherwise, truncate the file and seek to the beginning.\n dst = src\n dst.truncate(0)\n dst.seek(0)\n elif isinstance(dst, str):\n # if we're not updating in place and a destination file name\n # was provided, create a file object\n dst = open(dst, \"w\")\n\n for line in new_lines:\n dst.write(line)\n dst.write(\"\\n\")", "def copyfile(source, dest, newname=None):\n\n if not os.path.exists(source):\n #print 'no such file %s' %source\n return False\n shutil.copy(source, newname)\n dest = os.path.join(dest, newname)\n if os.path.exists(dest):\n os.remove(dest)\n shutil.move(newname, dest)\n return True", "def upload_file(self, source, dest):\n print(f\"Uploading {source} to {dest}\")\n with open(source, \"rb\") as data:\n self.client.upload_blob(name=dest, data=data)", "def move_file(src, dst):\n # Sanity checkpoint\n src = re.sub('[^\\w/\\-\\.\\*]', '', src)\n dst = re.sub('[^\\w/\\-\\.\\*]', '', dst)\n if len(re.sub('[\\W]', '', src)) < 5 or len(re.sub('[\\W]', '', dst)) < 5:\n debug.log(\"Error: Moving file failed. Provided paths are invalid! src='%s' dst='%s'\"%(src, dst))\n else:\n # Check destination\n check = False\n if dst[-1] == '/':\n if os.path.exists(dst):\n check = True # Valid Dir\n else:\n debug.log(\"Error: Moving file failed. Destination directory does not exist (%s)\"%(dst)) #DEBUG\n elif os.path.exists(dst):\n if os.path.isdir(dst):\n check = True # Valid Dir\n dst += '/' # Add missing slash\n else:\n debug.log(\"Error: Moving file failed. %s exists!\"%dst)\n elif os.path.exists(os.path.dirname(dst)):\n check = True # Valid file path\n else:\n debug.log(\"Error: Moving file failed. %s is an invalid distination!\"%dst)\n if check:\n # Check source\n files = glob.glob(src)\n if len(files) != 0:\n debug.log(\"Moving File(s)...\", \"Move from %s\"%src, \"to %s\"%dst)\n for file_ in files:\n # Check if file contains invalid symbols:\n invalid_chars = re.findall('[^\\w/\\-\\.\\*]', os.path.basename(file_))\n if invalid_chars:\n debug.graceful_exit((\"Error: File %s contains invalid \"\n \"characters %s!\"\n )%(os.path.basename(file_), invalid_chars))\n continue\n # Check file exists\n if os.path.isfile(file_):\n debug.log(\"Moving file: %s\"%file_)\n shutil.move(file_, dst)\n else:\n debug.log(\"Error: Moving file failed. %s is not a regular file!\"%file_)\n else: debug.log(\"Error: Moving file failed. No files were found! (%s)\"%src)", "def write_to_file(original_path, new_path):\n print(f\"[INFO]: Transform data from binary to text file {new_path}\")\n with open(new_path, mode='wt', encoding='utf-8') as new_file:\n with open(original_path, mode='rb') as original_file:\n for line in original_file:\n new_file.write(line.decode())", "def change_encoding_folder(dest):\n target_encoding = 'utf-8'\n file_type = '.txt'\n\n for root, dirs, files in os.walk(dest):\n for OneFileName in files:\n if OneFileName.find(file_type) == -1:\n continue\n OneFullFileName = join(root, OneFileName)\n fr = open(OneFullFileName, 'r')\n l = fr.read()\n sorce_encoding = chardet.detect(l)['encoding'] # 检测编码\n if sorce_encoding == target_encoding:\n continue\n l = l.decode(sorce_encoding, 'ignore').encode(target_encoding)\n\n fr.close()\n fw = open(OneFullFileName, 'w')\n fw.write(l)\n fw.close()", "def convert_to_jpg_then_compress(self):\n\t\tself._compressed_file_name = 'c_' + self.file_name\n\t\tself._compressed_save_path = self.full_path.replace(self.file_name, self._compressed_file_name).replace('.png', '.jpg')\n\n\t\timage = Image.open(self.full_path)\n\t\timage.save(self._compressed_save_path)\n\n\t\timage = Image.open(self._compressed_save_path)\n\t\timage.save(self._compressed_save_path, quality=85, progressive=False)\n\n\t\tself._compressed_file_size = ufo.get_file_size_in_bytes(self._compressed_save_path)\n\n\t\ttransfer_path = self._compressed_save_path.replace('c_' + self.file_name, self.file_name).replace('/configuration_files/', '/quasar_site_django/')\n\t\tufo.copy_file_to_path(self._compressed_save_path, transfer_path)", "def _copyFile(self, source, dstDir):\n dstFile = os.path.join(dstDir, os.path.basename(source))\n touch = \"/usr/bin/touch\" if OSUtilities.isMacOS() else \"/bin/touch\"\n subprocess.call([touch, dstFile])\n subprocess.call([\"/bin/cp\", source, dstDir])\n self._logger.info(\"Copying file \" + source + \" to \" + dstDir)\n self._numCopiedFiles += 1", "def convert_and_move_file (filename, origpath, wavpath, mp4path, mono):\n name, ext = path.splitext(filename)\n if ext == \".mp4\":\n print(filename)\n convert_to_wav (filename, name, origpath, wavpath, mono)\n\n if not path.exists(mp4path):\n makedirs(mp4path)\n oldlocation = path.join(origpath, filename)\n newlocation = path.join(mp4path, filename)\n shutil.move(oldlocation, newlocation)", "def main(source_dir, dest_dir):\n\n paths = []\n for root, _, files in os.walk(source_dir):\n paths.extend([os.path.join(root, f) for f in files])\n\n def copy(source_path, skip_existing=True):\n \"\"\"Copies a file from source_path to source_path with\n source_dir replaced by dest_dir.\n\n Arguments:\n source_path(str): Path to a file to be copied.\n skip_existing(bool): True to skip copying files\n when the destination file already exists.\n \"\"\"\n\n dest_path = source_path.replace(source_dir.strip('/'), dest_dir.strip('/'))\n\n # Skip if dest file already exists\n if skip_existing and os.path.exists(dest_path):\n return\n\n # Create directory if necessary\n os.makedirs(os.path.dirname(dest_path), exist_ok=True)\n\n copyfile(source_path, dest_path)\n\n p_umap(copy, paths)", "def _decrypt(self, src_filepath, dest_filepath):\r\n self.log.info(\"Decrypting file {0} to {1}.\".format(src_filepath, dest_filepath))\r\n\r\n gpg = gnupg.GPG(options=self.gpg_options)\r\n key_data = open(self.key_file, mode='rb').read()\r\n import_result = gpg.import_keys(key_data)\r\n self.log.info(\"Key import results: {0}\".format(import_result.results))\r\n\r\n with open(src_filepath, 'rb') as f:\r\n status = gpg.decrypt_file(f,\r\n passphrase=self._passphrase,\r\n output=dest_filepath)\r\n self.log.info(\"ok: {0}, status:{1}, stderr: {2}\".format(status.ok, status.status, status.stderr))\r\n\r\n if status.ok and self.remove_encrypted:\r\n os.remove(src_filepath)\r\n\r\n if not status.ok:\r\n raise AirflowException(\"Failed to decrypt file {0}: {1}\"\r\n .format(src_filepath, status.stderr))\r\n\r\n self.log.info(\"Completed file decryption.\")", "def copy_file(self, dst, tmpdir=None):\n if tmpdir is None:\n tmpfn = sameDir\n else:\n tmpfn = lambda _: tmpdir._path\n assert isinstance(dst, Path)\n with open(self._path, 'rb') as src_fd:\n with safeopen(dst._path, 'wb', useDir=tmpfn) as dst_fd:\n copyfileobj(src_fd, dst_fd)", "def process(self, event):\n # the file will be processed there\n print event.src_path, event.event_type # print now only for degug\n\n for i in self.ignore:\n if i in event.src_path or os.path.isdir(event.src_path):\n print \"Ignoring...\"\n return\n\n mod_file = event.src_path.split(self.source)[1]\n for r in self.rules:\n mod_file = mod_file.replace(r[0], r[1])\n\n print \"Writing:\", (self.destination + mod_file)\n \n input_file = utils.readFile(event.src_path)\n\n file_type = mod_file.split(\".\")[-1]\n reverted = utils.revert( input_file, \"(*\", \"*)\" ) if file_type == \"thy\" else utils.revert( input_file, \"/*\", \"*/\" )\n \n if len( reverted ) == 0 and len( input_file ) != 0:\n print \"Something might be wrong??\"\n else: utils.writeFile( self.destination + mod_file, reverted )", "def act_copy_file(self, file_source, file_target):\n try:\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.copy2(file_source, file_target)\n self.logger.debug('%s: Action: <copy> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file copy: %s -> %s', file_source, file_target)", "def copy_file(src, dst, ignore=None):\n # Sanity checkpoint\n src = re.sub('[^\\w/\\-\\.\\*]', '', src)\n dst = re.sub('[^\\w/\\-\\.\\*]', '', dst)\n if len(re.sub('[\\W]', '', src)) < 5 or len(re.sub('[\\W]', '', dst)) < 5:\n debug.log(\"Error: Copying file failed. Provided paths are invalid! src='%s' dst='%s'\"%(src, dst))\n else:\n # Check destination\n check = False\n if dst[-1] == '/':\n if os.path.exists(dst):\n check = True # Valid Dir\n else:\n debug.log(\"Error: Copying file failed. Destination directory does not exist (%s)\"%(dst)) #DEBUG\n elif os.path.exists(dst):\n if os.path.isdir(dst):\n check = True # Valid Dir\n dst += '/' # Add missing slash\n else:\n debug.log(\"Error: Copying file failed. %s exists!\"%dst)\n elif os.path.exists(os.path.dirname(dst)):\n check = True # Valid file path\n else:\n debug.log(\"Error: Copying file failed. %s is an invalid distination!\"%dst)\n if check:\n # Check source\n files = glob.glob(src)\n if ignore is not None: files = [fil for fil in files if not ignore in fil]\n if len(files) != 0:\n debug.log(\"Copying File(s)...\", \"Copy from %s\"%src, \"to %s\"%dst) #DEBUG\n for file_ in files:\n # Check file exists\n if os.path.isfile(file_):\n debug.log(\"Copying file: %s\"%file_) #DEBUG\n shutil.copy(file_, dst)\n else:\n debug.log(\"Error: Copying file failed. %s is not a regular file!\"%file_) #DEBUG\n else: debug.log(\"Error: Copying file failed. No files were found! (%s)\"%src) #DEBUG", "def convertFile(file_name):\n for format in source_formats:\n try:\n with codecs.open(file_name, 'rU', format) as source_file:\n write_conversion(source_file)\n return\n except UnicodeDecodeError:\n pass\n\n print(\"Error: failed to convert '\" + file_name + \"'.\")", "def copy_file(filename, dst):\n # Create dir if needed\n dir_path = os.path.dirname(os.path.expanduser(dst))\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n src = os.path.join(get_data(''), filename)\n dst = os.path.expanduser(dir_path)\n shutil.copy2(src, dst)", "def _put(self, src_fname, dst_fname):\n logging.info('Transferring file %s to %s', src_fname, self._ip_addr)\n sftp_cli = self._get_sftp_client()\n if sftp_cli is None:\n raise Exception('Not supported without ssh.')\n return sftp_cli.put(src_fname, dst_fname)", "def copy_ext(src, dst, include=None, exclude=None):\n # Find files from the specified extensions.\n files = find_files(src, include, exclude)\n # Transform all file paths in relative.\n rel = [os.path.relpath(file, src) for file in files]\n # Concatenate the relative path to the destination folder.\n dst = [f'{dst}\\\\{rel}' for rel in rel]\n # Run in a thread pool.\n parallel.run(copy, list(zip(files, dst)), thread=True)", "def _gzip_file(filename):\n gzip_filename = filename + '.gz'\n with open(filename, 'rb') as f_in, gzip.open(gzip_filename, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)", "def RestoreCase(dirc, dest):\n subprocess.call(['cp', '-r', dirc, dest])", "def file_copy_from_local(self, path, dest):\n if not j.sal.fs.exists(path):\n raise j.exceptions.Base(\"{} doesn't exist on local file system\".format(path))\n\n with open(path, \"rb\") as f:\n self.file_write(dest, f, append=False, create=True)\n return", "def translator(filename: str, outfile):\r\n progname = filename[:-3]\r\n vm_code = parser(filename)\r\n for line in vm_code:\r\n out_line = trans_line(line, progname)\r\n outfile.write(out_line) # write out_line to file\r", "def transcodetomp4(file_in, logger):\n\n import subprocess\n\n file_out = file_in.replace('.mkv', '.mp4')\n\n if os.path.isfile('/usr/bin/avconv'):\n\n convert_command = 'su securityspy -c \\\"/usr/bin/avconv -i \"{}\" -f mp4 -vcodec copy -acodec '.format(file_in) + \\\n 'libfaac -b:a 112k -ac 2 -y \"{}\"'.format(file_out) + \"\\\"\"\n\n try:\n subprocess.check_call(convert_command, shell=True)\n except subprocess.CalledProcessError:\n logger.error(\"The command to transcode: {} --- failed...\".format(convert_command))\n return file_in\n\n return file_out\n else:\n return file_in\n # fin", "def separate(in_file, orig_dir, dest_dir):\n files = set()\n with open(in_file, encoding=\"utf8\") as f:\n for l in f:\n files.add(l.split()[0])\n \n dest = pathlib.Path(dest_dir)\n if not dest.exists():\n dest.mkdir()\n \n for p in pathlib.Path(orig_dir).iterdir():\n if p.stem in files:\n print(\"Moviendo\", p.name)\n p.rename(dest / p.name)", "def download_file(src, dst):\n subprocess.check_output(cmd_preamble + [\"cp\", f\"jot://{src}\", dst])" ]
[ "0.6751052", "0.6534881", "0.61683327", "0.6163265", "0.61372477", "0.6119148", "0.60524434", "0.602882", "0.598594", "0.5982905", "0.5955934", "0.59134305", "0.5866599", "0.585062", "0.58318645", "0.5749547", "0.5694051", "0.56439036", "0.5634352", "0.5634352", "0.5634352", "0.5614407", "0.55886227", "0.5582447", "0.5545823", "0.55201465", "0.55132014", "0.5504407", "0.5501212", "0.54894537", "0.5460742", "0.5458791", "0.5441091", "0.5436041", "0.54335356", "0.54120255", "0.54120255", "0.54048645", "0.53939706", "0.5369373", "0.53613764", "0.5358013", "0.53464305", "0.5331949", "0.53212905", "0.5308028", "0.5303354", "0.5282347", "0.52762336", "0.52439755", "0.5243079", "0.5241368", "0.5236652", "0.5228909", "0.5221632", "0.5216366", "0.5215599", "0.5210689", "0.5207035", "0.5196966", "0.5192665", "0.51811934", "0.5177924", "0.5176973", "0.51714057", "0.516565", "0.51653266", "0.51589954", "0.51526093", "0.5148315", "0.5143621", "0.51432353", "0.5124609", "0.5119239", "0.5109242", "0.50914955", "0.5086761", "0.506904", "0.5052107", "0.5051276", "0.5033304", "0.50228703", "0.50101924", "0.50092816", "0.5003446", "0.49886382", "0.49795747", "0.4968211", "0.4963219", "0.49611717", "0.49559015", "0.49540344", "0.49496597", "0.49484774", "0.49396816", "0.49382344", "0.49335584", "0.49332368", "0.49321693", "0.4929064" ]
0.7879794
0
Takes an integer below 1001 and converts it into english text. Ignore spaces and hyphens as the instructions require.
def int2text(integer): # Numbers 1-99 are handled by simply looking up words in the special_case # dictionary. if integer < 100: return digit2text(integer) elif integer < 1000: # If exactly some hundred, then just return the word for the hundred's # place and the word 'hundred' if integer%100 == 0: return digit2text(integer/100)+'hundred' # Otherwise return the word for the hundred's place, the word # 'hundredand' and do some composition to make the rest of the words. else: return digit2text(integer/100)+'hundredand'+\ digit2text(integer%100) # Special case for 1000. elif integer == 1000: return "onethousand"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def english(number):\r\n if number == 0:\r\n return 'zero'\r\n word = ''\r\n for step in itertools.count():\r\n number, rest = divmod(number, 1000)\r\n word = format_num(en3(rest), step) + word\r\n if number == 0:\r\n return word.strip()", "def number2text(integer):\n\n numbers_1_20_char = [\"one\", \"two\", \"three\", \"four\", \"five\",\n \"six\", \"seven\", \"eight\", \"nine\", \"ten\",\n \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\",\n \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\", \"twenty\"]\n\n numbers_21_99_int = list(range(20, 100, 10))\n numbers_21_99_char = [\"twenty\", \"thirty\", \"forty\", \"fifty\",\n \"sixty\", \"seventy\", \"eighty\", \"ninety\"]\n\n numbers_100_999_int = list(range(100,1000,100))\n numbers_100_999_char = [\"one hundred\", \"two hundred\", \"three hundred\", \"four hundred\", \"five hundred\",\n \"six hundred\", \"seven hundred\", \"eight hundred\", \"nine hundred\"]\n\n number_1000_int = 1000\n number_1000_char = \"one thousand\"\n\n if integer <= 0:\n raise ValueError(\"The number must be higher than 0, and smaller than 1001\")\n elif 1 <= integer <= 19:\n word = numbers_1_20_char[integer - 1]\n elif 20 <= integer <= 99:\n if integer in numbers_21_99_int:\n word = numbers_21_99_char[int(integer/10) - 2]\n else:\n inBetween = list(str(integer))\n lastword = numbers_1_20_char[int(inBetween[1]) - 1]\n firstword = numbers_21_99_char[int(int(inBetween[0])) - 2]\n word = \"\".join([firstword, lastword])\n elif 100 <= integer <= 999:\n if integer in numbers_100_999_int:\n word = numbers_100_999_char[int(integer/100) - 1]\n else:\n inBetween = list(str(integer))\n firstword = numbers_100_999_char[int(integer / 100) - 1]\n if int(inBetween[2]) == 0:\n if int(inBetween[1]) == 1:\n word = \"\".join([firstword, \"and\", \"ten\"])\n else:\n secondword = numbers_21_99_char[int(int(inBetween[1])) - 2]\n word = \"\".join([firstword, \"and\", secondword])\n else:\n number = (int(inBetween[1])*10) + int(inBetween[2])\n if 1 <= number <= 20:\n secondword = numbers_1_20_char[number - 1]\n word = \"\".join([firstword, \"and\", secondword])\n else:\n secondword = numbers_21_99_char[int(int(inBetween[1])) - 2]\n thirdword = numbers_1_20_char[int(int(inBetween[2])) - 1]\n word = \"\".join([firstword, \"and\", secondword, thirdword])\n elif integer == number_1000_int:\n word = number_1000_char\n\n return word", "def transforme(n):\n if n<10 :\n return '0'+str(n)\n else :\n return str(n)", "def digit2text(integer):\n # If the integer is in the special cases dictionary, then look up the word,\n # return it, and we're done.\n if integer in special_case_dict.keys():\n return special_case_dict[integer]\n # Otherwise compose the word, by taking the number in the ten's place and\n # multiplying by 10 (i.e. integer/10*10 evaluates to a number in the set\n # {10, 20, 30, 40, 50, 60, 70, 80, 90} for any input integer between 10-99.\n # Then add word for the number in the one's place\n else:\n return special_case_dict[integer/10*10]+special_case_dict[integer%10]", "def translateNumber(n):\r\n if type(n) != str:\r\n return None\r\n else:\r\n translation = \"\"\r\n word = \"\"\r\n for c in n:\r\n if c != ' ':\r\n word += c\r\n elif word in Numbers:\r\n translation += Numbers[word] + \" \"\r\n else:\r\n translation += word + \" \"\r\n return translation", "def num2words(num):\n # Create a dictionary of all unique numbers from 1 to 1,000\n num2words = {0:'', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five', 6:'six', 7:'seven',\\\n 8:'eight', 9:'nine', 10:'ten', 11:'eleven', 12:'twelve', 13:'thirteen', 14:'fourteen',\\\n 15:'fifteen', 16:'sixteen', 17:'seventeen', 18:'eighteen', 19:'nineteen', 20:'twenty',\\\n 30:'thirty', 40:'forty', 50:'fifty', 60:'sixty', 70:'seventy', 80:'eighty',\\\n 90:'ninety', 1000:'onethousand'}\n result = ''\n while True:\n try:\n result += num2words[num]\n return result\n except:\n pass\n try:\n result += num2words[num-num%10] + num2words[num%10]\n return result\n except:\n result += num2words[(num - num%100)//100] + 'hundred'\n num = num%100\n if num == 0:\n return result\n else:\n result += 'and'", "def integer_to_english_numeral(n, activate_tts=False):\n if activate_tts is None:\n activate_tts = False\n elif not isinstance(activate_tts, bool):\n raise TypeError('Argument \"activate_tts\" is not a boolean')\n if not isinstance(n, int):\n raise TypeError('Not an integer')\n if n < 0:\n raise ValueError('Not a positive integer')\n if n > 999999999999:\n raise OverflowError('Integer greater than 999,999,999,999')\n return cardinal_numerals_eng.integer_to_english(n, activate_tts)", "def large_int_word(x):\n\n digits = [int(i) for i in str(x)]\n units = tens = hundreds = thousands = ''\n\n if len(digits) == 1:\n units = UNITS[digits[-1]]\n else:\n units = UNIT_PREFIXES[digits[-1]]\n tens = TENS[digits[-2]]\n if len(digits) >= 3:\n hundreds = HUNDREDS[digits[-3]]\n if len(digits) >= 4:\n thousands = UNITS[digits[-4]] + 'illin'\n if len(digits) >= 5:\n raise\n\n return units + tens + hundreds + thousands + 'illion'", "def hundreds_text(num):\n hundreds_digit = num // 100\n tens_digit = num % 100\n hundreds_text = singles[hundreds_digit] + ' ' + \"Hundred\"\n return hundreds_text + ' ' + tens_text(tens_digit)", "def _cardinal2word(strNumber):\n return Number.convertNumberIntoLetters(strNumber)", "def num_to_words(amount):\n digits = {\n 0: 'нуль', 1: 'одна',\n 2: 'дві', 3: 'три',\n 4: 'чотири', 5: 'п\\'ять',\n 6: 'шість', 7: 'сім',\n 8: 'вісім', 9: 'дев\\'ять',\n 10: 'десять', 11: 'одинадцять',\n 12: 'дванадцять', 13: 'тринадцять',\n 14: 'чотирнадцять', 15: 'п\\'ятнадцять',\n 16: 'шістнадцять', 17: 'сімнадцять',\n 18: 'вісімнадцять', 19: 'дев\\'ятнадцять'\n }\n\n dozens = {\n 2: 'двадцять', 3: 'тридцять',\n 4: 'сорок', 5: 'п\\'ятдесят',\n 6: 'шістдесят', 7: 'сімдесят',\n 8: 'вісімдесят', 9: 'дев\\'яносто'\n }\n\n hundreds = {\n 1: 'сто', 2: 'двісті',\n 3: 'триста', 4: 'чотириста',\n 5: 'п\\'ятсот', 6: 'шістсот',\n 7: 'сімсот', 8: 'вісімсот',\n 9: 'дев\\'ятсот'\n }\n\n strnumber = str(amount)\n if amount < 20:\n return digits[amount]\n elif amount < 100:\n if strnumber[-1] == '0':\n return dozens[int(strnumber[0])]\n else:\n return dozens[int(strnumber[0])] + \" \" + num_to_words(int(strnumber[1]))\n else:\n if strnumber[1:3] == '00':\n return hundreds[int(strnumber[0])]\n else:\n return hundreds[int(strnumber[0])] + \" \" + num_to_words(int(strnumber[1:3]))", "def intRender(self, number):\n\n data = unicode(number)\n bites = list()\n\n while data:\n bites.append(data[-3:])\n data = data[:-3]\n\n return \" \".join(reversed(bites))", "def int2word(n):\n # break the number into groups of 3 digits using slicing\n # each group representing hundred, thousand, million, billion, ...\n n3 = []\n r1 = \"\"\n # create numeric string\n ns = str(n)\n for k in range(3, 33, 3):\n r = ns[-k:]\n q = len(ns) - k\n # break if end of ns has been reached\n if q < -2:\n break\n else:\n if q >= 0:\n n3.append(int(r[:3]))\n elif q >= -1:\n n3.append(int(r[:2]))\n elif q >= -2:\n n3.append(int(r[:1]))\n r1 = r\n\n # print n3 # test\n\n # break each group of 3 digits into\n # ones, tens/twenties, hundreds\n # and form a string\n nw = \"\"\n for i, x in enumerate(n3):\n b1 = x % 10\n b2 = (x % 100) // 10\n b3 = (x % 1000) // 100\n # print b1, b2, b3 # test\n if x == 0:\n continue # skip\n else:\n t = thousands[i]\n if b2 == 0:\n nw = ones[b1] + t + nw\n elif b2 == 1:\n nw = tens[b1] + t + nw\n elif b2 > 1:\n nw = twenties[b2] + ones[b1] + t + nw\n if b3 > 0:\n nw = ones[b3] + \"hundred \" + nw\n return nw", "def convert_number(number):\n return ' ' + ' '.join(list(int_to_roman(number))) + ' '", "def int_to_str(number):\n rb = RuleBasedNumberFormat(URBNFRuleSetTag.SPELLOUT, Locale('pl_PL'))\n verbalized = rb.format(int(number))\n return verbalized", "def hundreds_conversion(positive_int):\n positive_int = str(positive_int)\n if int(positive_int[-3]) < 4:\n return 'C' * int(positive_int[-3])\n if int(positive_int[-3]) == 4:\n return 'CD'\n if int(positive_int[-3]) == 5:\n return 'D'\n if int(positive_int[-3]) == 6:\n return 'DC'\n if int(positive_int[-3]) == 7:\n return 'DCC'\n if int(positive_int[-3]) == 8:\n return 'DCCC'\n if int(positive_int[-3]) == 9:\n return 'CM'", "def _to_cn(number):\n\n chinese_numeral_dict = {\n '0': '零',\n '1': '一',\n '2': '二',\n '3': '三',\n '4': '四',\n '5': '五',\n '6': '六',\n '7': '七',\n '8': '八',\n '9': '九'\n }\n chinese_unit_map = [('', '十', '百', '千'),\n ('万', '十万', '百万', '千万'),\n ('亿', '十亿', '百亿', '千亿'),\n ('兆', '十兆', '百兆', '千兆'),\n ('吉', '十吉', '百吉', '千吉')]\n chinese_unit_sep = ['万', '亿', '兆', '吉']\n\n reversed_n_string = reversed(str(number))\n\n result_lst = []\n unit = 0\n\n for integer in reversed_n_string:\n if integer is not '0':\n result_lst.append(chinese_unit_map[unit // 4][unit % 4])\n result_lst.append(chinese_numeral_dict[integer])\n unit += 1\n else:\n if result_lst and result_lst[-1] != '零':\n result_lst.append('零')\n unit += 1\n\n result_lst.reverse()\n\n # clean convert result, make it more natural\n if result_lst[-1] is '零':\n result_lst.pop()\n\n result_lst = list(''.join(result_lst))\n\n for unit_sep in chinese_unit_sep:\n flag = result_lst.count(unit_sep)\n while flag > 1:\n result_lst.pop(result_lst.index(unit_sep))\n flag -= 1\n\n '''\n length = len(str(number))\n if 4 < length <= 8:\n flag = result_lst.count('万')\n while flag > 1:\n result_lst.pop(result_lst.index('万'))\n flag -= 1\n elif 8 < length <= 12:\n flag = result_lst.count('亿')\n while flag > 1:\n result_lst.pop(result_lst.index('亿'))\n flag -= 1\n elif 12 < length <= 16:\n flag = result_lst.count('兆')\n while flag > 1:\n result_lst.pop(result_lst.index('兆'))\n flag -= 1\n elif 16 < length <= 20:\n flag = result_lst.count('吉')\n while flag > 1:\n result_lst.pop(result_lst.index('吉'))\n flag -= 1\n '''\n\n return ''.join(result_lst)", "def convert(number):\n out = \"\"\n if number % 3 == 0:\n out = \"Pling\"\n if number % 5 == 0:\n out = out + \"Plang\"\n if number % 7 == 0:\n out = out + \"Plong\"\n if out == \"\":\n out = str(number)\n return out", "def numbers2words():\n\tmy_num = None\n\twhile my_num != \"0\":\n\t\tmy_num = input(\"Please enter a number greater than 0 and less than 1 trillion: \")\n\t\tprint(name_num(int(my_num.replace(\",\",\"\"))))", "def textualize(num):\n if isinstance(num, float):\n num = int(num)\n # special case\n if num == 0:\n return 'zero'\n\n # if the number is negative, we put the word\n # 'negative' in front of it.\n is_negative = False\n if num < 0:\n is_negative = True\n num = -1 * num\n\n num = str(num)\n # pad with zeroes\n while len(num) % 3 != 0:\n num = ''.join([ '0', num ])\n\n # as groups are textualized, their strings will be\n # appended to this list\n num_string = []\n group_counter = 0\n while len(num) > 0:\n group = num[-3:]\n num = num[:-3]\n text = _textualize_group(group)\n\n # thousand, million, etc.\n if group_counter > 0 and text:\n group_name = group_names[group_counter]\n text = ' '.join([ text, group_name ])\n\n if text:\n num_string.insert(0, text)\n\n group_counter += 1\n\n if is_negative:\n num_string.insert(0, 'negative')\n\n return ' '.join(num_string)", "def convert(n):\n if n in numbersDict:\n return len(numbersDict[n]), numbersDict[n]\n # else, n is greater than 20\n\n # reverse so that n[0] is the ones place an so on\n n = list(map(int, reversed(str(n))))\n\n word = []\n\n wordHundred = \"hundred\"\n wordAnd = \"and\"\n wordThousand = \"thousand\"\n\n if (n[1]*10 + n[0]) in numbersDict:\n word.append(numbersDict[(n[1]*10 + n[0])])\n else:\n word.append(numbersDict.get(n[0], \"\"))\n word.append(numbersDict.get(n[1] * 10, \"\"))\n\n if len(n) > 2:\n if n[1] or n[0]: word.append(wordAnd)\n hundreds = numbersDict.get(n[2], \"\")\n needHundred = wordHundred if hundreds else \"\"\n word.append(needHundred)\n word.append(hundreds)\n\n if len(n) > 3:\n thousands = numbersDict.get(n[3], \"\")\n needThousand = wordThousand if thousands else \"\"\n word.append(needThousand)\n word.append(thousands)\n\n return len(\"\".join(word)), \" \".join(reversed(word))", "def indian_word_currency(value):\n if isinstance(value, int) and value < 100:\n return str(value)\n if isinstance(value, float) and value < 99:\n return str(value)\n\n try:\n if isinstance(value, str):\n if '.' not in value and int(value) < 99:\n return value\n if float(value) < 99:\n return value\n except (ValueError, TypeError):\n return value\n\n value_integer = str(value).split('.')[0]\n value_len = len(value_integer)\n if value_len > 7:\n crores = value_integer[:-7]\n lakhs = value_integer[-7:-5]\n if crores == '1' and lakhs == '00':\n return '1 Crore'\n if lakhs == '00':\n return '%s Crores' % crores\n return '%s.%s Crores' % (crores, lakhs)\n elif value_len > 5:\n lakhs = value_integer[:-5]\n thousands = value_integer[-5:-3]\n if lakhs == '1' and thousands == '00':\n return '1 Lakh'\n if thousands == '00':\n return '%s Lakhs' % lakhs\n return '%s.%s Lakhs' % (lakhs, thousands)\n elif value_len > 3:\n thousands = value_integer[:-3]\n hundreds = value_integer[-3:-1]\n if thousands == '1' and hundreds == '00':\n return '1 Thousand'\n if hundreds == '00':\n return '%s Thousands' % thousands\n return '%s.%s Thousands' % (thousands, hundreds)\n else:\n hundreds = value_integer[:-2]\n tens_ones = value_integer[-2:]\n if hundreds == '1' and tens_ones == '00':\n return '1 Hundred'\n if tens_ones == '00':\n return '%s Hundreds' % hundreds\n return '%s.%s Hundreds' % (hundreds, tens_ones)", "def cardinal(n, friendly=True):\n if friendly:\n n_abs = abs(n)\n\n if n_abs < 20:\n return cardinal(n, friendly=False)\n\n if n_abs < 100 and n_abs % 10 == 0:\n return cardinal(n, friendly=False)\n\n if n_abs < 1000 and n_abs % 100 == 0:\n return cardinal(n, friendly=False)\n\n if n_abs < 12000 and n_abs % 1000 == 0:\n return cardinal(n, friendly=False)\n\n prefix = \"min \" if n < 0 else \"\"\n\n if n_abs < MILLION:\n q, r = divmod(n_abs, 1000)\n if r == 0:\n return prefix + \"%d duizend\" % q\n\n if n_abs < BILLION:\n q, r = divmod(n_abs, MILLION)\n if r == 0:\n return prefix + \"%d miljoen\" % q\n\n # No friendly variant, just return the numerical representation.\n return unicode(n)\n\n # Code below completely spells out each number.\n\n if n < 0:\n return \"min \" + cardinal(abs(n))\n\n if n < 20:\n return UNITS[n]\n\n if n < 100:\n q, r = divmod(n, 10)\n a = TENS[q]\n if r == 0:\n return a\n b = cardinal(r)\n joiner = \"en\" if not b.endswith(\"e\") else \"ën\"\n return b + joiner + a\n\n if n < 1000:\n q, r = divmod(n, 100)\n a = cardinal(q, friendly=False) if q > 1 else \"\"\n b = cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \"honderd\" + b\n\n if 1000 < n < 10000 and n % 1000:\n # Special case for numbers that are exactly divisble by 100, but\n # not by 1000, e.g. \"tweeëntwintighonderd\"\n q, r = divmod(n, 100)\n if r == 0:\n a = cardinal(q, friendly=False) if q > 1 else \"\"\n return a + \"honderd\"\n\n if n < MILLION:\n q, r = divmod(n, 1000)\n a = cardinal(q, friendly=False) if q > 1 else \"\"\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \"duizend\" + b\n\n if n < BILLION:\n q, r = divmod(n, MILLION)\n a = cardinal(q, friendly=False)\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \" miljoen\" + b\n\n if n < TRILLION:\n q, r = divmod(n, BILLION)\n a = cardinal(q, friendly=False)\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \" miljard\" + b\n\n if n < QUADRILLION:\n q, r = divmod(n, TRILLION)\n a = cardinal(q, friendly=False)\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \" biljoen\" + b\n\n # Fallback to numerical representation\n return unicode(n)", "def int_to_text(self, labels):\n string = []\n for i in labels:\n string.append(self.index_map[i])\n return ''.join(string).replace('', ' ')", "def to_roman(n):\n if not isinstance(n, int):\n try:\n n = int(n)\n except ValueError:\n raise NotIntegerError(\"non-integers cannot be converted\")\n\n if not (0 < n < 4000):\n raise OutOfRangeError(\"number out of range (must be 1..3999)\")\n\n result = \"\"\n for numeral, integer in ROMAN_NUMBER_MAP:\n while n >= integer:\n result += numeral\n n -= integer\n return result", "def translate_number(number):\n return NUMBER_TRANSLATOR[number]", "def tens_text(num):\n if num < 10:\n return singles[num]\n elif num < 20:\n return teens[num]\n elif num < 100:\n tens_digit = num // 10\n singles_digit = num % 10\n if singles_digit == 0:\n return tens[tens_digit]\n else:\n return tens[tens_digit-2] + ' ' + singles[singles_digit]", "def tens_conversion(positive_int):\n # I use an index of [-2] to select the ten's place, and so forth until the thousands\n positive_int = str(positive_int)\n if int(positive_int[-2]) < 4:\n return 'X' * int(positive_int[-2])\n if int(positive_int[-2]) == 4:\n return 'XL'\n if int(positive_int[-2]) == 5:\n return 'L'\n if int(positive_int[-2]) == 6:\n return 'LX'\n if int(positive_int[-2]) == 7:\n return 'LXX'\n if int(positive_int[-2]) == 8:\n return 'LXXX'\n if int(positive_int[-2]) == 9:\n return 'XC'", "def int2roman(num):\n try:\n num_int = int(num)\n except ValueError:\n raise InputError(num, \"Input value must be in integer representation.\")\n except TypeError:\n raise InputError(num, \"Input must be a number, string, or a bytes-like object.\")\n if float(num) != float(num_int):\n raise InputError(num, \"Input cannot be a non-integer decimal value.\")\n else:\n num = int(num)\n if not 0 < num < 5000:\n raise InputError(num, \"Input must be an integer in [1,4999] range.\")\n\n res = \"\"\n for r, i in __extended_map:\n while num >= i:\n res += r\n num -= i\n return res", "def int2dec(n: int) -> str:", "def apnumber(value):\r\n try:\r\n value = int(value)\r\n except (TypeError, ValueError):\r\n return value\r\n if not 0 < value < 10:\r\n return str(value)\r\n return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'),\r\n _('seven'), _('eight'), _('nine'))[value - 1]", "def _int2str(num):\n if num<10:\n return '00%s'%str(num)\n elif 10<=num<100:\n return '0%s'%str(num)\n else:\n return '%s'%str(num)", "def _trans_string(self, n):\r\n return \"%s %d\" % (self.desc, n+1)", "def make_unicode():\r\n for num in range(300, 320):\r\n yield unichr(num)", "def broken(inp):\n return inp.translate(str.maketrans(\"01\", \"10\"))", "def WWRomanNumber(number):\r\n\t\r\n\tif number>4999 or number<1:\r\n\t\traise OverflowError(\"The should be between 1 and 4999, here it is \"+str(number))\r\n\tnumerals = { 1 : \"I\", 4 : \"IV\", 5 : \"V\", 9 : \"IX\", 10 : \"X\", 40 : \"XL\",\r\n\t\t\t\t50 : \"L\", 90 : \"XC\", 100 : \"C\", 400 : \"CD\", 500 : \"D\", 900 : \"CM\", 1000 : \"M\" }\r\n\tresult = \"\"\r\n\tfor value, numeral in sorted(numerals.items(), reverse=True):\r\n\t\twhile number >= value:\r\n\t\t\tresult += numeral\r\n\t\t\tnumber -= value\r\n\treturn result", "def internal_id_to_display_id(i_id: int) -> str:\n i_id = str(i_id).zfill(9)\n return ''.join(i_id[x - 1] for x in [1, 5, 9, 6, 3, 8, 2, 4, 7])", "def unicode_number(x):\n\n output = u\"%g\" % x\n\n if output[0] == u\"-\":\n output = u\"\\u2012\" + output[1:]\n\n index = output.find(u\"e\")\n if index != -1:\n uniout = unicode(output[:index]) + u\"\\u00d710\"\n saw_nonzero = False\n for n in output[index+1:]:\n if n == u\"+\": pass # uniout += u\"\\u207a\"\n elif n == u\"-\": uniout += u\"\\u207b\"\n elif n == u\"0\":\n if saw_nonzero: uniout += u\"\\u2070\"\n elif n == u\"1\":\n saw_nonzero = True\n uniout += u\"\\u00b9\"\n elif n == u\"2\":\n saw_nonzero = True\n uniout += u\"\\u00b2\"\n elif n == u\"3\":\n saw_nonzero = True\n uniout += u\"\\u00b3\"\n elif u\"4\" <= n <= u\"9\":\n saw_nonzero = True\n if saw_nonzero: uniout += eval(\"u\\\"\\\\u%x\\\"\" % (0x2070 + ord(n) - ord(u\"0\")))\n else: uniout += n\n\n if uniout[:2] == u\"1\\u00d7\": uniout = uniout[2:]\n return uniout\n\n return output", "def convert(num):\r\n if len(str(num))==1:\r\n return \"000%i\"%num\r\n elif len(str(num)) == 2:\r\n return \"00%i\"%num\r\n elif len(str(num)) == 3:\r\n return \"0%i\"%num\r\n elif len(str(num)) == 4:\r\n return \"%i\"%num", "def numbersToLetters(number):\n arr = [number[i:i+2] for i in range(0, len(number), 2)]\n result = ''\n for i in arr:\n i = int(i)\n if(i<=48):\n i = i + 48\n result += chr(i)\n return result", "def number_phrase(number):\n thousands, hundreds, tens, ones = [int(d) for d in list('%04d' % number)]\n phrase_parts = []\n\n if thousands:\n phrase_parts.append('%s thousand' % WORDS['digits'][thousands])\n if hundreds:\n phrase_parts.append('%s hundred' % WORDS['digits'][hundreds])\n if (thousands or hundreds) and (tens or ones):\n phrase_parts.append('and')\n if tens:\n if tens == 1:\n phrase_parts.append(WORDS['teens'][10 + ones])\n else:\n phrase_parts.append(WORDS['tens'][tens])\n if ones and tens != 1:\n phrase_parts.append(WORDS['digits'][ones])\n\n return ' '.join(phrase_parts)", "def intword(value, format='%.1f'):\r\n try:\r\n value = int(value)\r\n except (TypeError, ValueError):\r\n return value\r\n\r\n if value < powers[0]:\r\n return str(value)\r\n for ordinal, power in enumerate(powers[1:], 1):\r\n if value < power:\r\n chopped = value / float(powers[ordinal - 1])\r\n return (' '.join([format, _(human_powers[ordinal - 1])])) % chopped\r\n return str(value)", "def _convert_words_to_numbers_nl(text, short_scale=True, ordinals=False):\n text = text.lower()\n tokens = tokenize(text)\n numbers_to_replace = \\\n _extract_numbers_with_text_nl(tokens, short_scale, ordinals)\n numbers_to_replace.sort(key=lambda number: number.start_index)\n\n results = []\n for token in tokens:\n if not numbers_to_replace or \\\n token.index < numbers_to_replace[0].start_index:\n results.append(token.word)\n else:\n if numbers_to_replace and \\\n token.index == numbers_to_replace[0].start_index:\n results.append(str(numbers_to_replace[0].value))\n if numbers_to_replace and \\\n token.index == numbers_to_replace[0].end_index:\n numbers_to_replace.pop(0)\n\n return ' '.join(results)", "def int_to_alpha(num):\n remainder = num\n text = []\n if num >= 26:\n major = remainder // 26\n text.append(ascii_lowercase[remainder // 26 - 1])\n remainder -= major * 26\n text.append(ascii_lowercase[remainder])\n return \"\".join(text)", "def test_anglicize100to999():\n print('Testing anglicize100to999')\n\n result = funcs.anglicize100to999(100)\n introcs.assert_equals(\"one hundred\", result)\n\n result = funcs.anglicize100to999(301)\n introcs.assert_equals(\"three hundred one\", result)\n\n result = funcs.anglicize100to999(999)\n introcs.assert_equals(\"nine hundred ninety nine\", result)", "def cond_int2str(cond_int=0):\n try:\n return {\n 0: '晴',\n 1: '多云',\n 2: '阴',\n 3: '阵雨',\n 4: '雷阵雨',\n 5: '雷阵雨伴有冰雹',\n 6: '雨夹雪',\n 7: '小雨',\n 8: '中雨',\n 9: '大雨',\n 10: '暴雨',\n 11: '大暴雨',\n 12: '特大暴雨',\n 13: '阵雪',\n 14: '小雪',\n 15: '中雪',\n 16: '大雪',\n 17: '暴雪',\n 18: '雾',\n 19: '冻雨',\n 20: '沙尘暴',\n 21: '小到中雨',\n 22: '中到大雨',\n 23: '大到暴雨',\n 24: '暴雨到大暴雨',\n 25: '大暴雨到特大暴雨25',\n 26: '小到中雪',\n 27: '中到大雪',\n 28: '大到暴雪',\n 29: '浮尘',\n 30: '扬沙',\n 31: '强沙尘暴',\n 53: '霾',\n 99: '无'\n }[cond_int]\n except KeyError as e:\n logging.warning(e)\n return \"-\"", "def transform(s):\r\n return 'digit ' + str(s)", "def toRoman(n):\n pass", "def _naturalize_numbers(self, string):\n\n def naturalize_int_match(match):\n return \"%08d\" % (int(match.group(0)),)\n\n string = re.sub(r\"\\d+\", naturalize_int_match, string)\n\n return string", "def _roman2word(strNumber):\n strNumber = strNumber\n cardinalNumber = fromRoman(strNumber)\n return NumberFormula._cardinal2word(cardinalNumber)", "def spell_number(num):\n tens, units = num / 10, num % 10\n tens_str = NUMBERS_10[tens]\n units_str = NUMBERS_1[units]\n if tens == 1:\n return NUMBERS_TEEN[units]\n elif tens:\n if units:\n return \"{t} {u}\".format(t=tens_str, u=units_str)\n return \"{t}\".format(t=tens_str)\n else:\n return units_str", "def translate(nuc):\n\tfrom Bio import Seq\n\ttry:\n\t\ttmp_aa = Seq.translate(nuc.replace('-','N')) #returns string when argument is a string, Bio.Seq otherwise\n\texcept:\n\t\tprint(\"translation failed\",nuc)\n\t\ttmp_aa = 'X'*len(nuc)//3\n\taa_seq = \"\"\n\tfor i,aa in enumerate(tmp_aa):\n\t\tif nuc[i*3:(i+1)*3]=='---':\n\t\t\taa_seq+='-'\n\t\telse:\n\t\t\taa_seq+=aa\n\treturn aa_seq", "def change(st):\n return ''.join('1' if a in st.lower() else '0' for a in map(chr, range(97, 123)))", "def shorten_number(self, number):\n if number < 1000:\n return number\n elif number >= 1000 and number < 1000000:\n num = self.rounded_number(number, 1000)\n val = \"1M\" if num == \"1000\" else num + \"K\"\n return val\n elif number >= 1000000 and number < 1000000000:\n num = self.rounded_number(number, 1000000)\n val = \"1B\" if num==\"1000\" else num + \"M\"\n return val\n elif number >= 1000000000 and number < 1000000000000:\n num = self.rounded_number(number, 1000000000)\n val = \"1T\" if num==\"1000\" else num + \"B\"\n return val\n else:\n num = self.rounded_number(number, 1000000000000)\n return num + \"T\"", "def ta2en(text):\n return IITB_translator(\"ta\", \"en\", text)", "def numerify_iso_label(lab):\n from sage.databases.cremona import class_to_int\n if 'CM' in lab:\n return -1 - class_to_int(lab[2:])\n else:\n return class_to_int(lab.lower())", "def romanify(num):\n result = \"\"\n onesDict = {1:\"I\", 2: \"II\", 3: \"III\", 4: \"IV\", 5: \"V\", 6: \"VI\", 7: \"VII\", 8: \"VIII\", 9: \"IX\", 0:\"\"}\n ones = num%10\n num-=num%10\n result = onesDict[ones] + result\n tensDict = {10:\"X\", 20: \"XX\", 30: \"XXX\", 40:\"XL\", 50:\"L\", 60:\"LX\", 70: \"LXX\", 80: \"LXXX\", 90: \"XC\", 0:\"\"}\n tens = num%100\n num-=num%100\n result = tensDict[tens] + result\n hunsDict = {100:\"C\", 200: \"CC\", 300: \"CCC\", 400:\"CD\", 500:\"D\", 600:\"DC\", 700: \"DCC\", 800: \"DCCC\", 900: \"CM\", 0:\"\"}\n huns = num%1000\n num-=num%1000\n result = hunsDict[huns] + result\n thous = num/1000\n result = \"M\"*thous + result\n \n return result", "def int2roman(i, lower=False):\n warn('The function int2roman is deprecated from JAMS. Use module pyjams.',\n category=DeprecationWarning)\n assert i >= 1, 'integer must be > 0.'\n result = []\n for integer, numeral in numeral_map:\n count = int(i // integer)\n result.append(numeral * count)\n i -= integer * count\n if lower: result = [ i.lower() for i in result ]\n return ''.join(result)", "def wind_speed_int2str(wind_speed_int):\n try:\n return {\n '微风': 0,\n '3-4级': 1,\n '4-5级': 2,\n '5-6级': 3,\n '6-7级': 4,\n '7-8级': 5,\n '8-9级': 6,\n '9-10级': 7,\n '10-11级': 8,\n '11-12级': 9}[wind_speed_int]\n except KeyError as e:\n logging.warning(e)\n return -1", "def thousands_conversion(positive_int):\n positive_int = str(positive_int)\n if int(positive_int[-4]) > 0: # If the thousands place has anything greater than 0, return M multiplied by value\n return 'M' * int(positive_int[-4])\n elif int(positive_int) == 10000: # If the input is 10,000 then it will return 'M' * 10, the maximum value\n return 'M' * 10", "def roman2int(n):\n warn('The function roman2int is deprecated from JAMS. Use module pyjams.',\n category=DeprecationWarning)\n n = str(n).upper()\n i = result = 0\n for integer, numeral in numeral_map:\n while n[i:i + len(numeral)] == numeral:\n result += integer\n i += len(numeral)\n return result", "def convert_to_roman_numeral(positive_int):\n # Have to use if and elif statements to avoid index out of range error\n if len(str(positive_int)) == 5:\n return thousands_conversion(positive_int)\n elif len(str(positive_int)) == 4:\n return thousands_conversion(positive_int) + hundreds_conversion(positive_int) \\\n + tens_conversion(positive_int) + ones_conversion(positive_int)\n elif len(str(positive_int)) == 3:\n return hundreds_conversion(positive_int) + tens_conversion(positive_int) + ones_conversion(positive_int)\n elif len(str(positive_int)) == 2:\n return tens_conversion(positive_int) + ones_conversion(positive_int)\n elif len(str(positive_int)) == 1:\n return ones_conversion(positive_int)\n elif positive_int == 0:\n return '0'", "def spell(number):\n if number > 1000:\n raise # This doesn't handle numbers greater than 1000\n\n if number == 1000:\n return ['one', 'thousand']\n\n if number >= 100:\n if number % 100 == 0:\n return spell(number // 100) + ['hundred']\n else:\n return spell(number // 100 * 100) + ['and'] + spell(number % 100)\n\n if number >= 20:\n names = {\n 20: 'twenty',\n 30: 'thirty',\n 40: 'forty',\n 50: 'fifty',\n 60: 'sixty',\n 70: 'seventy',\n 80: 'eighty',\n 90: 'ninety',\n }\n if number % 10 == 0:\n return [names[number]]\n else:\n return spell(number // 10 * 10) + spell(number % 10)\n\n names = [\n 'zero', 'one', 'two', 'three', 'four',\n 'five', 'six', 'seven', 'eight', 'nine',\n 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen',\n 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen',\n ]\n return [names[number]]", "def romanify(num):\n result = \"\"\n return result", "def _num2str(self, num):\n q, mod = divmod(num, 10)\n suffix = \"th\" if q == 1 else self.SUFFIX_DICT[mod]\n return f\"{num}{suffix}\"", "def MakeHumanReadable(num):\n i = 0\n while i+1 < len(EXP_STRINGS) and num >= (2 ** EXP_STRINGS[i+1][0]):\n i += 1\n rounded_val = round(float(num) / 2 ** EXP_STRINGS[i][0], 2)\n return '%s %s' % (rounded_val, EXP_STRINGS[i][1])", "def transform(s):\n return 'digit ' + str(s)", "def int_with_commas(number):\n try:\n number = int(number)\n if number < 0:\n return '-' + int_with_commas(-number)\n result = ''\n while number >= 1000:\n number, number2 = divmod(number, 1000)\n result = \",%03d%s\" % (number2, result)\n return \"%d%s\" % (number, result)\n except Exception:\n return \"\"", "def _int_to_roman(self, i):\n numeral_map = zip((1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1),\n ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I'))\n result = []\n for integer, numeral in numeral_map:\n count = int(i / integer)\n result.append(numeral * count)\n i -= integer * count\n return ''.join(result)", "def get_number_in_portuguese(self, number, result=\"\"):\n number_as_str = str(number)\n\n # Check if the first char is a \"-\" sign\n first_position = number_as_str[0]\n if \"-\" == first_position:\n result = \"menos\"\n # Removes the negative sign from number\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n\n number_len = len(number_as_str)\n\n if number_len > 1 and self._is_zero_sequence(number_as_str):\n # the rest of the number ends in a zero sequence\n return result.strip()\n\n if first_position == '0':\n if number_len > 1:\n # Cut off the leading zero\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n if not result or result == '-':\n # The number is zero\n return self.ZERO\n\n if number_len > 5:\n # Out of range\n raise NotAcceptable(detail=self.MAX_LIMIT_ERROR)\n\n if number_len == 5:\n # Extract the dozen-thounsands\n first_two_positions = number_as_str[0] + number_as_str[1]\n result = ' '.join([result, self._get_two_digits_number_in_extension(first_two_positions), 'mil'])\n\n if self._is_zero_sequence(number_as_str[2::]):\n # Number ends in a zero sequence\n return result.strip()\n result = ' '.join([result, 'e'])\n\n return self.get_number_in_portuguese(number=number_as_str[2::], result=result)\n\n if number_len == 4:\n result = ' '.join([result, self.THOUSANDS[first_position]])\n\n if self._is_zero_sequence(number_as_str[1::]):\n # Number ends in a zero sequence\n return result.strip()\n result = ' '.join([result, 'e'])\n\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n\n if number_len == 3:\n is_following_zeros = self._is_zero_sequence(number_as_str[1::])\n\n if first_position == '1':\n # Number ends in 1xx\n if is_following_zeros:\n # Number is 100\n result = ' '.join([result, self.CEM])\n return result.strip()\n result = ' '.join([result, 'cento e'])\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n result = ' '.join([result, self.HUNDREDS[first_position]])\n if is_following_zeros:\n # Number ends in a zero sequence\n return result.strip()\n result = ' '.join([result, 'e'])\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n\n if number_len == 2:\n result = ' '.join([result, self._get_two_digits_number_in_extension(number_as_str)])\n return result.strip()\n\n if number_len == 1:\n result = ' '.join([result, self.UNITS[number_as_str]])\n\n return result.strip()", "def convert_ascii_character(x: str):\n return ord(x) * 10 if ord(x) < LIMIT else 0", "def nintl(self):", "def mpc2internal(self,Code):\n if (Code.isdigit()):\n internal_code=int(Code)\n else:\n internal_code=(ord(Code[0])-55)*100+int(Code[1:])\n internal_code = -internal_code\n return (internal_code)", "def num_to_words(data):\n tokens = word_tokenize(str(data))\n new = \"\"\n for word in tokens:\n try:\n word = num2words(int(w))\n except:\n a = 0\n new = new + \" \" + word\n new = np.char.replace(new, \"-\", \" \")\n return new", "def convertbase(number, base=10):\n\n integer = number\n if not integer:\n return '0'\n sign = 1 if integer > 0 else -1\n alphanum = string.digits + string.ascii_lowercase\n nums = alphanum[:base]\n res = ''\n integer *= sign\n while integer:\n integer, mod = divmod(integer, base)\n res += nums[mod]\n return ('' if sign == 1 else '-') + res[::-1]", "def roman_converter(number):\n num_dict = {'I': '1', 'II': '2', 'III': '3', 'IIII': '4', 'V': '5', 'VI': '6', 'VII': '7', 'VIII': '8', 'VIIII': '9', 'X': '10'}\n if number in num_dict:\n return num_dict[number]\n return number", "def translate_leet(phrase):", "def say_chunk(num):\n output_string_list = []\n num_string = str(num)\n\n units = ['zero', 'one', 'two', 'three', 'four', 'five',\n 'six', 'seven', 'eight', 'nine']\n teens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',\n 'sixteen', 'seventeen', 'eighteen', 'nineteen']\n tens = ['twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety']\n\n # singles\n if num < 10:\n output_string_list.append(units[num])\n\n # teens\n elif 10 <= num <= 19:\n output_string_list.append(teens[int(num) % 10])\n\n # tens\n elif 20 <= num <= 99:\n num_str = str(num)\n modifier = int(num_str[0])\n if int(num_str[1]):\n output_string_list.append(\"{}-{}\".format(tens[modifier - 2], units[int(num) % 10]))\n else:\n output_string_list.append(tens[modifier - 2])\n\n # hundreds\n elif 100 <= num <= 999:\n output_string_list.append(units[int(num_string[0])])\n output_string_list.append('hundred')\n\n num = int(num_string[1:])\n if num:\n output_string_list.append('and')\n num_string = str(num)\n modifier = int(num_string[0])\n\n if int(num_string[1]):\n output_string_list.append(\"{}-{}\".format(tens[modifier - 2], units[int(num_string[1:]) % 10]))\n else:\n output_string_list.append(tens[modifier - 2])\n\n return ' '.join(output_string_list)", "def fo_shizzle_my_nizzle(n): \n if n < 0:\n n = \"fo\"\n elif n >= 1 and n < 50: \n n = \"shizzle\"\n elif n >= 50 and n <= 100:\n n = \"my\"\n elif n % 2 == 0 and n % 3 == 0 and n > 100:\n n = \"nizzle\"\n else:\n n = \"\"\n return n", "def cleanInteger(number):\n \n number = str(number).replace(' ', '')\n \n test = number\n for i in range(10):\n test = test.replace(str(i), '')\n \n if test:\n return None\n \n return number", "def int_to_roman(input_rom): # Konverter v rimske številke. Nisem avtor te funkcije.\n ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)\n nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')\n result = []\n for i in range(len(ints)):\n count = int(input_rom / ints[i])\n result.append(nums[i] * count)\n input_rom -= ints[i] * count\n return ''.join(result)", "def _decimal2word(strNumber):\n strNumber = \" komma \".join(re.split(\"[,]\", strNumber))\n strNumber = \" punkt \".join(re.split(\"[.]\", strNumber))\n\n tokenList = []\n for w in re.split(SPACEPATTERN, strNumber):\n w = w.strip()\n if NumberFormula._isCardinalNumber(w):\n w = NumberFormula._cardinal2word(w)\n tokenList.append(w)\n\n return \" \".join(tokenList)", "def zh_num2digit(string):\n for match in zh_nums_iter(string):\n num_str = match.group(0)\n digit_num = parse_zh_num(num_str)\n if digit_num is None:\n continue\n string = string.replace(num_str, str(digit_num), 1)\n return string", "def ones_conversion(positive_int):\n # I use an index of [-1] to select the one's place as it's on the rightmost place numerically\n positive_int = str(positive_int)\n if int(positive_int[-1]) < 4:\n return 'I' * int(positive_int[-1]) # Can multiply I by the corresponding value as long as it's under 4\n if int(positive_int[-1]) == 4:\n return 'IV'\n if int(positive_int[-1]) == 5:\n return 'V'\n if int(positive_int[-1]) == 6:\n return 'VI'\n if int(positive_int[-1]) == 7:\n return 'VII'\n if int(positive_int[-1]) == 8:\n return 'VIII'\n if int(positive_int[-1]) == 9:\n return 'IX'", "def number_as_string(x):\n \n numnames = {1 : \"one\", 2 : \"two\", 3 : \"three\", 4 : \"four\", 5 : \"five\", 6 : \"six\", 7 : \"seven\", 8 : \"eight\", 9 : \"nine\",\n 10 : \"ten\", 11 : \"eleven\", 12 : \"twelve\", 13 : \"thirteen\", 14 : \"fourteen\", 15 : \"fifteen\", 16 : \"sixteen\",\n 17 : \"seventeen\", 18 : \"eighteen\", 19 : \"nineteen\", 20 : \"twenty\", 30 : \"thirty\", 40 : \"forty\", 50 : \"fifty\", \n 60 : \"sixty\", 70 : \"seventy\", 80 : \"eighty\", 90 : \"ninety\"}\n \n numparts = []\n needAnd = (x > 100) and (x % 100)\n if x >= 1000:\n numparts.append(numnames[x/1000])\n numparts.append(\"thousand\")\n x %= 1000\n \n if x >= 100:\n numparts.append(numnames[x/100])\n numparts.append(\"hundred\")\n x %= 100\n \n if needAnd:\n numparts.append(\"and\")\n \n if 11 <= x <= 19:\n numparts.append(numnames[x])\n else:\n if x >= 10:\n numparts.append(numnames[(x/10)*10])\n x %= 10\n\n if x > 0:\n numparts.append(numnames[x])\n \n return \" \".join(numparts)", "def _convert_to_text(self):\n if type(self.data) is not list:\n return -1\n out = str()\n for element in self.data:\n out += chr(int(element))\n return (out)", "def mpc2internal(self,Code):\n \n if (Code.isdigit()):\n internal_code=int(Code)\n\n else:\n internal_code=(ord(Code[0])-55)*100+int(Code[1:])\n\n internal_code = -internal_code\n return (internal_code)", "def convert(number: int) -> str:\n\n sounds = [(3, \"Pling\"), (5, \"Plang\"), (7, \"Plong\")]\n result = [sound for divisor, sound in sounds if number % divisor == 0]\n\n return \"\".join(result) or f\"{number}\"", "def process_int(integer: int) -> str:\n\n return str(integer) if integer else Presenter.DEFAULT", "def encode_high(self, text):\n return ord(text)", "def baseconvert(number,fromdigits,todigits):\r\n base_length = len(todigits)\r\n\r\n if str(number)[0]=='-':\r\n number = str(number)[1:]\r\n neg=1\r\n else:\r\n neg=0\r\n\r\n # make an integer out of the number\r\n x=0\r\n for digit in str(number):\r\n x = x*len(fromdigits) + fromdigits.index(digit)\r\n\r\n # create the result in base 'len(todigits)'\r\n if x is 0:\r\n res = todigits[0]\r\n else:\r\n res=\"\"\r\n while x>0:\r\n digit = x % base_length\r\n res = todigits[digit] + res\r\n x = int(x / base_length)\r\n if neg:\r\n res = \"-\"+res\r\n\r\n return res", "def cvt_to_readable(num):\n\n # Find the degree of the number like if it is in thousands or millions, etc.\n index = int(math.log(num) / math.log(1000))\n\n # Converts the number to the human readable format and returns it.\n newNum = round(num / (1000 ** index), 3)\n degree = UNITS[index]\n\n return (newNum, degree)", "def friendly_number(num):\n # Convert to a (shorter) string for human consumption\n string = \"\"\n # The length of the string can be determined by STRING_LENGTH or by how many\n # characters are necessary to present a base 30 representation of SIZE.\n while STRING_LENGTH and len(string) <= STRING_LENGTH \\\n or len(VALID_CHARS)**len(string) <= SIZE:\n # PREpend string (to remove all obvious signs of order)\n string = VALID_CHARS[num%len(VALID_CHARS)] + string\n num = num/len(VALID_CHARS)\n return string", "def test_anglicize1000():\n print('Testing anglicize1000')\n\n result = funcs.anglicize1000(1)\n introcs.assert_equals(\"one\", result)\n\n result = funcs.anglicize1000(19)\n introcs.assert_equals(\"nineteen\", result)\n\n result = funcs.anglicize1000(20)\n introcs.assert_equals(\"twenty\", result)\n\n result = funcs.anglicize1000(35)\n introcs.assert_equals(\"thirty five\", result)\n\n result = funcs.anglicize1000(50)\n introcs.assert_equals(\"fifty\", result)\n\n result = funcs.anglicize1000(99)\n introcs.assert_equals(\"ninety nine\", result)\n\n result = funcs.anglicize1000(100)\n introcs.assert_equals(\"one hundred\", result)\n\n result = funcs.anglicize1000(301)\n introcs.assert_equals(\"three hundred one\", result)\n\n result = funcs.anglicize1000(999)\n introcs.assert_equals(\"nine hundred ninety nine\", result)", "def enlabel(mi_, ma_):\n\treturn \"Unicode characters from {} to {} codepoints\".format(mi_, ma_)", "def int_to_base(num, base):\n if base<=0: return '0' \n digits = []\n if (num <0):\n \tcur= -num\n else: cur = num\n while(cur>0):\n\t\tdigits.append(str(cur%base))\n\t\tcur/=base\n if (num <0): digits.append('-')\n digits.reverse()\n\n \n \n return ''.join(digits)", "def base2str(self, int_number):\r\n return self.format_base % (float(int_number) / self.mult_base)", "def base_number(number, count, dict_cardinal_num):\n special_numeral = [\"trăm\", \"mười\", \"mươi\", \"linh\", \"lăm\", \"mốt\"]\n list_cardinal_numeral = []\n # Divide number (abc) and follow place's number\n a = number // 100 # hundreds\n b = (number % 100) // 10 # Tens\n c = number % 10 # Ones\n # check a\n if a > 0:\n list_cardinal_numeral.append(dict_cardinal_num[a])\n list_cardinal_numeral.append(special_numeral[0])\n elif a == 0:\n if count > 1 and (b > 0 or c > 0):\n list_cardinal_numeral.append(dict_cardinal_num[a])\n list_cardinal_numeral.append(special_numeral[0])\n # check b\n if b == 0:\n if c > 0:\n if a > 0 or count > 1:\n list_cardinal_numeral.append(special_numeral[3])\n elif b > 0:\n if b == 1:\n list_cardinal_numeral.append(special_numeral[1])\n elif b > 1:\n list_cardinal_numeral.append(dict_cardinal_num[b])\n list_cardinal_numeral.append(special_numeral[2])\n # check c\n if c == 0:\n if count == 1 and a == 0 and b == 0:\n list_cardinal_numeral.append(dict_cardinal_num[c])\n elif c > 0:\n if b >= 1 and c == 5:\n list_cardinal_numeral.append(special_numeral[4])\n elif b >= 2 and c == 1:\n list_cardinal_numeral.append(special_numeral[5])\n else:\n list_cardinal_numeral.append(dict_cardinal_num[c])\n\n return \" \".join(list_cardinal_numeral)", "def ten(number: int) -> str:\n\n string_form = str(number)\n return string_form if number >= 0 else \"0\" + string_form", "def PigToEnglish(str):\r\n\r\n # TODO: Your code here\r\n\r\n\r\n # Change the return to return the converted string\r\n return(\"\")" ]
[ "0.7711457", "0.736089", "0.6570956", "0.65348077", "0.6504594", "0.6472066", "0.6465786", "0.6318002", "0.63022107", "0.6287715", "0.6235496", "0.6192539", "0.61917514", "0.6169387", "0.6149354", "0.6142758", "0.61054987", "0.60986495", "0.60660845", "0.60206014", "0.5872346", "0.5815992", "0.576974", "0.57589585", "0.5754434", "0.575249", "0.57281697", "0.5724484", "0.572187", "0.56664413", "0.56397104", "0.56335497", "0.5563793", "0.5541719", "0.5513101", "0.5509501", "0.5509232", "0.5487418", "0.54808646", "0.5473527", "0.5468297", "0.5466727", "0.54654706", "0.5457051", "0.5445314", "0.5444032", "0.5442528", "0.543856", "0.5430187", "0.5426794", "0.54250884", "0.542064", "0.5410987", "0.5409979", "0.53975654", "0.5397547", "0.5392183", "0.53727126", "0.53723663", "0.53623325", "0.5362063", "0.53554994", "0.5355272", "0.5354714", "0.535066", "0.53431076", "0.5338693", "0.5335968", "0.5335369", "0.5330306", "0.5321312", "0.53109825", "0.5304194", "0.5297337", "0.5297023", "0.5293175", "0.52897394", "0.52889794", "0.5273305", "0.5272518", "0.52597624", "0.525227", "0.5241652", "0.52328545", "0.52312946", "0.5225964", "0.5225922", "0.5221465", "0.52181447", "0.52176726", "0.5215128", "0.5213101", "0.5207093", "0.52059865", "0.52053124", "0.51937985", "0.51916444", "0.519096", "0.51819503", "0.5179478" ]
0.7366785
1
Takes integer digits/double digits and returns the english text for these numbers.
def digit2text(integer): # If the integer is in the special cases dictionary, then look up the word, # return it, and we're done. if integer in special_case_dict.keys(): return special_case_dict[integer] # Otherwise compose the word, by taking the number in the ten's place and # multiplying by 10 (i.e. integer/10*10 evaluates to a number in the set # {10, 20, 30, 40, 50, 60, 70, 80, 90} for any input integer between 10-99. # Then add word for the number in the one's place else: return special_case_dict[integer/10*10]+special_case_dict[integer%10]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def english(number):\r\n if number == 0:\r\n return 'zero'\r\n word = ''\r\n for step in itertools.count():\r\n number, rest = divmod(number, 1000)\r\n word = format_num(en3(rest), step) + word\r\n if number == 0:\r\n return word.strip()", "def int2text(integer):\n # Numbers 1-99 are handled by simply looking up words in the special_case\n # dictionary.\n if integer < 100:\n return digit2text(integer)\n\n elif integer < 1000:\n # If exactly some hundred, then just return the word for the hundred's\n # place and the word 'hundred'\n if integer%100 == 0:\n return digit2text(integer/100)+'hundred'\n # Otherwise return the word for the hundred's place, the word\n # 'hundredand' and do some composition to make the rest of the words.\n else:\n return digit2text(integer/100)+'hundredand'+\\\n digit2text(integer%100)\n # Special case for 1000.\n elif integer == 1000:\n return \"onethousand\"", "def number2text(integer):\n\n numbers_1_20_char = [\"one\", \"two\", \"three\", \"four\", \"five\",\n \"six\", \"seven\", \"eight\", \"nine\", \"ten\",\n \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\",\n \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\", \"twenty\"]\n\n numbers_21_99_int = list(range(20, 100, 10))\n numbers_21_99_char = [\"twenty\", \"thirty\", \"forty\", \"fifty\",\n \"sixty\", \"seventy\", \"eighty\", \"ninety\"]\n\n numbers_100_999_int = list(range(100,1000,100))\n numbers_100_999_char = [\"one hundred\", \"two hundred\", \"three hundred\", \"four hundred\", \"five hundred\",\n \"six hundred\", \"seven hundred\", \"eight hundred\", \"nine hundred\"]\n\n number_1000_int = 1000\n number_1000_char = \"one thousand\"\n\n if integer <= 0:\n raise ValueError(\"The number must be higher than 0, and smaller than 1001\")\n elif 1 <= integer <= 19:\n word = numbers_1_20_char[integer - 1]\n elif 20 <= integer <= 99:\n if integer in numbers_21_99_int:\n word = numbers_21_99_char[int(integer/10) - 2]\n else:\n inBetween = list(str(integer))\n lastword = numbers_1_20_char[int(inBetween[1]) - 1]\n firstword = numbers_21_99_char[int(int(inBetween[0])) - 2]\n word = \"\".join([firstword, lastword])\n elif 100 <= integer <= 999:\n if integer in numbers_100_999_int:\n word = numbers_100_999_char[int(integer/100) - 1]\n else:\n inBetween = list(str(integer))\n firstword = numbers_100_999_char[int(integer / 100) - 1]\n if int(inBetween[2]) == 0:\n if int(inBetween[1]) == 1:\n word = \"\".join([firstword, \"and\", \"ten\"])\n else:\n secondword = numbers_21_99_char[int(int(inBetween[1])) - 2]\n word = \"\".join([firstword, \"and\", secondword])\n else:\n number = (int(inBetween[1])*10) + int(inBetween[2])\n if 1 <= number <= 20:\n secondword = numbers_1_20_char[number - 1]\n word = \"\".join([firstword, \"and\", secondword])\n else:\n secondword = numbers_21_99_char[int(int(inBetween[1])) - 2]\n thirdword = numbers_1_20_char[int(int(inBetween[2])) - 1]\n word = \"\".join([firstword, \"and\", secondword, thirdword])\n elif integer == number_1000_int:\n word = number_1000_char\n\n return word", "def textualize(num):\n if isinstance(num, float):\n num = int(num)\n # special case\n if num == 0:\n return 'zero'\n\n # if the number is negative, we put the word\n # 'negative' in front of it.\n is_negative = False\n if num < 0:\n is_negative = True\n num = -1 * num\n\n num = str(num)\n # pad with zeroes\n while len(num) % 3 != 0:\n num = ''.join([ '0', num ])\n\n # as groups are textualized, their strings will be\n # appended to this list\n num_string = []\n group_counter = 0\n while len(num) > 0:\n group = num[-3:]\n num = num[:-3]\n text = _textualize_group(group)\n\n # thousand, million, etc.\n if group_counter > 0 and text:\n group_name = group_names[group_counter]\n text = ' '.join([ text, group_name ])\n\n if text:\n num_string.insert(0, text)\n\n group_counter += 1\n\n if is_negative:\n num_string.insert(0, 'negative')\n\n return ' '.join(num_string)", "def tens_text(num):\n if num < 10:\n return singles[num]\n elif num < 20:\n return teens[num]\n elif num < 100:\n tens_digit = num // 10\n singles_digit = num % 10\n if singles_digit == 0:\n return tens[tens_digit]\n else:\n return tens[tens_digit-2] + ' ' + singles[singles_digit]", "def translateNumber(n):\r\n if type(n) != str:\r\n return None\r\n else:\r\n translation = \"\"\r\n word = \"\"\r\n for c in n:\r\n if c != ' ':\r\n word += c\r\n elif word in Numbers:\r\n translation += Numbers[word] + \" \"\r\n else:\r\n translation += word + \" \"\r\n return translation", "def spell_number(num):\n tens, units = num / 10, num % 10\n tens_str = NUMBERS_10[tens]\n units_str = NUMBERS_1[units]\n if tens == 1:\n return NUMBERS_TEEN[units]\n elif tens:\n if units:\n return \"{t} {u}\".format(t=tens_str, u=units_str)\n return \"{t}\".format(t=tens_str)\n else:\n return units_str", "def hundreds_text(num):\n hundreds_digit = num // 100\n tens_digit = num % 100\n hundreds_text = singles[hundreds_digit] + ' ' + \"Hundred\"\n return hundreds_text + ' ' + tens_text(tens_digit)", "def num_to_words(amount):\n digits = {\n 0: 'нуль', 1: 'одна',\n 2: 'дві', 3: 'три',\n 4: 'чотири', 5: 'п\\'ять',\n 6: 'шість', 7: 'сім',\n 8: 'вісім', 9: 'дев\\'ять',\n 10: 'десять', 11: 'одинадцять',\n 12: 'дванадцять', 13: 'тринадцять',\n 14: 'чотирнадцять', 15: 'п\\'ятнадцять',\n 16: 'шістнадцять', 17: 'сімнадцять',\n 18: 'вісімнадцять', 19: 'дев\\'ятнадцять'\n }\n\n dozens = {\n 2: 'двадцять', 3: 'тридцять',\n 4: 'сорок', 5: 'п\\'ятдесят',\n 6: 'шістдесят', 7: 'сімдесят',\n 8: 'вісімдесят', 9: 'дев\\'яносто'\n }\n\n hundreds = {\n 1: 'сто', 2: 'двісті',\n 3: 'триста', 4: 'чотириста',\n 5: 'п\\'ятсот', 6: 'шістсот',\n 7: 'сімсот', 8: 'вісімсот',\n 9: 'дев\\'ятсот'\n }\n\n strnumber = str(amount)\n if amount < 20:\n return digits[amount]\n elif amount < 100:\n if strnumber[-1] == '0':\n return dozens[int(strnumber[0])]\n else:\n return dozens[int(strnumber[0])] + \" \" + num_to_words(int(strnumber[1]))\n else:\n if strnumber[1:3] == '00':\n return hundreds[int(strnumber[0])]\n else:\n return hundreds[int(strnumber[0])] + \" \" + num_to_words(int(strnumber[1:3]))", "def int2word(n):\n # break the number into groups of 3 digits using slicing\n # each group representing hundred, thousand, million, billion, ...\n n3 = []\n r1 = \"\"\n # create numeric string\n ns = str(n)\n for k in range(3, 33, 3):\n r = ns[-k:]\n q = len(ns) - k\n # break if end of ns has been reached\n if q < -2:\n break\n else:\n if q >= 0:\n n3.append(int(r[:3]))\n elif q >= -1:\n n3.append(int(r[:2]))\n elif q >= -2:\n n3.append(int(r[:1]))\n r1 = r\n\n # print n3 # test\n\n # break each group of 3 digits into\n # ones, tens/twenties, hundreds\n # and form a string\n nw = \"\"\n for i, x in enumerate(n3):\n b1 = x % 10\n b2 = (x % 100) // 10\n b3 = (x % 1000) // 100\n # print b1, b2, b3 # test\n if x == 0:\n continue # skip\n else:\n t = thousands[i]\n if b2 == 0:\n nw = ones[b1] + t + nw\n elif b2 == 1:\n nw = tens[b1] + t + nw\n elif b2 > 1:\n nw = twenties[b2] + ones[b1] + t + nw\n if b3 > 0:\n nw = ones[b3] + \"hundred \" + nw\n return nw", "def num2words(num):\n # Create a dictionary of all unique numbers from 1 to 1,000\n num2words = {0:'', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five', 6:'six', 7:'seven',\\\n 8:'eight', 9:'nine', 10:'ten', 11:'eleven', 12:'twelve', 13:'thirteen', 14:'fourteen',\\\n 15:'fifteen', 16:'sixteen', 17:'seventeen', 18:'eighteen', 19:'nineteen', 20:'twenty',\\\n 30:'thirty', 40:'forty', 50:'fifty', 60:'sixty', 70:'seventy', 80:'eighty',\\\n 90:'ninety', 1000:'onethousand'}\n result = ''\n while True:\n try:\n result += num2words[num]\n return result\n except:\n pass\n try:\n result += num2words[num-num%10] + num2words[num%10]\n return result\n except:\n result += num2words[(num - num%100)//100] + 'hundred'\n num = num%100\n if num == 0:\n return result\n else:\n result += 'and'", "def indian_word_currency(value):\n if isinstance(value, int) and value < 100:\n return str(value)\n if isinstance(value, float) and value < 99:\n return str(value)\n\n try:\n if isinstance(value, str):\n if '.' not in value and int(value) < 99:\n return value\n if float(value) < 99:\n return value\n except (ValueError, TypeError):\n return value\n\n value_integer = str(value).split('.')[0]\n value_len = len(value_integer)\n if value_len > 7:\n crores = value_integer[:-7]\n lakhs = value_integer[-7:-5]\n if crores == '1' and lakhs == '00':\n return '1 Crore'\n if lakhs == '00':\n return '%s Crores' % crores\n return '%s.%s Crores' % (crores, lakhs)\n elif value_len > 5:\n lakhs = value_integer[:-5]\n thousands = value_integer[-5:-3]\n if lakhs == '1' and thousands == '00':\n return '1 Lakh'\n if thousands == '00':\n return '%s Lakhs' % lakhs\n return '%s.%s Lakhs' % (lakhs, thousands)\n elif value_len > 3:\n thousands = value_integer[:-3]\n hundreds = value_integer[-3:-1]\n if thousands == '1' and hundreds == '00':\n return '1 Thousand'\n if hundreds == '00':\n return '%s Thousands' % thousands\n return '%s.%s Thousands' % (thousands, hundreds)\n else:\n hundreds = value_integer[:-2]\n tens_ones = value_integer[-2:]\n if hundreds == '1' and tens_ones == '00':\n return '1 Hundred'\n if tens_ones == '00':\n return '%s Hundreds' % hundreds\n return '%s.%s Hundreds' % (hundreds, tens_ones)", "def number_phrase(number):\n thousands, hundreds, tens, ones = [int(d) for d in list('%04d' % number)]\n phrase_parts = []\n\n if thousands:\n phrase_parts.append('%s thousand' % WORDS['digits'][thousands])\n if hundreds:\n phrase_parts.append('%s hundred' % WORDS['digits'][hundreds])\n if (thousands or hundreds) and (tens or ones):\n phrase_parts.append('and')\n if tens:\n if tens == 1:\n phrase_parts.append(WORDS['teens'][10 + ones])\n else:\n phrase_parts.append(WORDS['tens'][tens])\n if ones and tens != 1:\n phrase_parts.append(WORDS['digits'][ones])\n\n return ' '.join(phrase_parts)", "def _convert_words_to_numbers_nl(text, short_scale=True, ordinals=False):\n text = text.lower()\n tokens = tokenize(text)\n numbers_to_replace = \\\n _extract_numbers_with_text_nl(tokens, short_scale, ordinals)\n numbers_to_replace.sort(key=lambda number: number.start_index)\n\n results = []\n for token in tokens:\n if not numbers_to_replace or \\\n token.index < numbers_to_replace[0].start_index:\n results.append(token.word)\n else:\n if numbers_to_replace and \\\n token.index == numbers_to_replace[0].start_index:\n results.append(str(numbers_to_replace[0].value))\n if numbers_to_replace and \\\n token.index == numbers_to_replace[0].end_index:\n numbers_to_replace.pop(0)\n\n return ' '.join(results)", "def large_int_word(x):\n\n digits = [int(i) for i in str(x)]\n units = tens = hundreds = thousands = ''\n\n if len(digits) == 1:\n units = UNITS[digits[-1]]\n else:\n units = UNIT_PREFIXES[digits[-1]]\n tens = TENS[digits[-2]]\n if len(digits) >= 3:\n hundreds = HUNDREDS[digits[-3]]\n if len(digits) >= 4:\n thousands = UNITS[digits[-4]] + 'illin'\n if len(digits) >= 5:\n raise\n\n return units + tens + hundreds + thousands + 'illion'", "def int_to_str(number):\n rb = RuleBasedNumberFormat(URBNFRuleSetTag.SPELLOUT, Locale('pl_PL'))\n verbalized = rb.format(int(number))\n return verbalized", "def get_nummeric_only(text):\n\n nummeric_string =\"\"\n \n for character in text:\n if character.isnumeric():\n \n nummeric_string+=character\n \n return nummeric_string", "def numbers2words():\n\tmy_num = None\n\twhile my_num != \"0\":\n\t\tmy_num = input(\"Please enter a number greater than 0 and less than 1 trillion: \")\n\t\tprint(name_num(int(my_num.replace(\",\",\"\"))))", "def TEXT(number, format_type):\n raise NotImplementedError()", "def findEnglish(data: str) -> str:\n # remove all string leading up to the word Translate\n data = data[data.find('Translate'):len(data)]\n # initalize list\n english_list = []\n\n # find all english in the string\n number_list = [int(num) for num in data if num.isnumeric()]\n\n # remove 4\n number_list.remove(4)\n\n # find smallest and largest numbers\n small = min(number_list)\n large = max(number_list)\n\n # first find the string with number\n for i in range(small,large+1):\n # find the line after i\n sym = f\"{i}\"\n symbol_lines_index = symbol_line_location(data, sym, move=0, addLast=False)\n\n # find index for that specific number\n eng = find(data, f\"{i}\")\n\n # for each location, determine if the 2 higher index is an alphabet or not\n for j in range(len(eng)):\n # if it is, then take that line\n if data[eng[j]+3].isalpha():\n indStart = eng[j]+3\n indEnd = symbol_lines_index[j][1]\n\n english = data[indStart:indEnd+1]\n english_list.append(english)\n\n # lastly combine the words, separating each translation with /\n english = \" / \".join(english_list)\n\n return english", "def integer_to_english_numeral(n, activate_tts=False):\n if activate_tts is None:\n activate_tts = False\n elif not isinstance(activate_tts, bool):\n raise TypeError('Argument \"activate_tts\" is not a boolean')\n if not isinstance(n, int):\n raise TypeError('Not an integer')\n if n < 0:\n raise ValueError('Not a positive integer')\n if n > 999999999999:\n raise OverflowError('Integer greater than 999,999,999,999')\n return cardinal_numerals_eng.integer_to_english(n, activate_tts)", "def stats_text(test):\n\n stats_text_en(test) \n \n stats_text_cn(test)", "def format_engineering( number, unit = \"\" ):\n if math.isnan(number):\n return \"nan\"\n if number == 0.0:\n return 0\n\n convert_table = {-18:'a', -15:'f', -12:'p', -9:'n', -6:'u',\n -3:'m', -2:'c', -1:'d', 0:'', 3:'k',\n 6:'M', 9:'G', 12:'T', 15:'P', 18:'E'}\n l10 = math.log10(abs(number))\n ten_exp = int(l10)\n\n sci_places = int(ten_exp / 3) * 3\n sci_signific = (ten_exp % 3)\n\n expo_char = convert_table[sci_places]\n trailing = number / 10.0 ** sci_places\n\n # print z, ten_exp, sci_places, sci_signific\n if trailing >= 10:\n lead = \"{:d}\".format(int(round(trailing)))\n elif trailing >= 1:\n lead = \"{:.1f}\".format(trailing)\n else:\n lead = \"{:.2f}\".format(trailing)\n return lead + \" \" + expo_char + unit", "def text_transform(val):\n if CURRENCY == \"USD\":\n return \"$%d\" % val\n if CURRENCY == \"EUR\":\n return \"‎€%d\" % val\n if CURRENCY == \"GBP\":\n return \"£%d\" % val\n return \"%d\" % val", "def _cardinal2word(strNumber):\n return Number.convertNumberIntoLetters(strNumber)", "def extract_number_nl(text, short_scale=True, ordinals=False):\n return _extract_number_with_text_nl(tokenize(text.lower()),\n short_scale, ordinals).value", "def MakeHumanReadable(num):\n i = 0\n while i+1 < len(EXP_STRINGS) and num >= (2 ** EXP_STRINGS[i+1][0]):\n i += 1\n rounded_val = round(float(num) / 2 ** EXP_STRINGS[i][0], 2)\n return '%s %s' % (rounded_val, EXP_STRINGS[i][1])", "def nintl(self):", "def main(num1, num2, text):\n return print(\"%30i\"%num1), print(\"%030i\"%num1), print(\"%.2f\"%num2), print(\"%.12f\"%num2), \\\n print(\"%40s\"%text)", "def _extract_number_with_text_nl_helper(tokens,\n short_scale=True, ordinals=False,\n fractional_numbers=True):\n if fractional_numbers:\n fraction, fraction_text = \\\n _extract_fraction_with_text_nl(tokens, short_scale, ordinals)\n if fraction:\n return fraction, fraction_text\n\n decimal, decimal_text = \\\n _extract_decimal_with_text_nl(tokens, short_scale, ordinals)\n if decimal:\n return decimal, decimal_text\n\n return _extract_whole_number_with_text_nl(tokens, short_scale, ordinals)", "def convert(number):\n out = \"\"\n if number % 3 == 0:\n out = \"Pling\"\n if number % 5 == 0:\n out = out + \"Plang\"\n if number % 7 == 0:\n out = out + \"Plong\"\n if out == \"\":\n out = str(number)\n return out", "def _extract_number_with_text_nl(tokens, short_scale=True,\n ordinals=False, fractional_numbers=True):\n number, tokens = \\\n _extract_number_with_text_nl_helper(tokens, short_scale,\n ordinals, fractional_numbers)\n while tokens and tokens[0].word in _ARTICLES_NL:\n tokens.pop(0)\n return ReplaceableNumber(number, tokens)", "def intRender(self, number):\n\n data = unicode(number)\n bites = list()\n\n while data:\n bites.append(data[-3:])\n data = data[:-3]\n\n return \" \".join(reversed(bites))", "def transforme(n):\n if n<10 :\n return '0'+str(n)\n else :\n return str(n)", "def med_in_english(word):\r\n\treturn int(med(TextBlob(word).correct(), word))", "def numeral(number):\n return ROMAN_NUMERALS[number]", "def _textualize_group(group):\n # The final string. A list is used for performance.\n ret_str = []\n\n ones = int(group[2])\n tens = int(group[1])\n hundreds = int(group[0])\n is_teen = False\n ones_str = ''\n tens_str = ''\n hundreds_str = ''\n\n if hundreds > 0:\n hundreds_str = '{} hundred'.format(ones_place[hundreds])\n\n if tens > 0:\n if tens == 1:\n is_teen = True\n tens_str = teens[ones]\n else:\n tens_str = tens_place[tens]\n if ones > 0 and not is_teen:\n ones_str = ones_place[ones]\n\n # Create the final string\n\n if hundreds_str:\n ret_str.append(hundreds_str)\n # Add a space if there is a tens\n # or ones place digit.\n if tens_str or ones_str:\n ret_str.append(' ')\n\n if tens_str:\n ret_str.append(tens_str)\n # Add a space or hyphen depending\n # on the ones place digit.\n if ones_str:\n if tens > 1:\n ret_str.append('-')\n else:\n ret_str.append(' ')\n\n if ones_str:\n ret_str.append(ones_str)\n return ''.join(ret_str)", "def process_for_latex(string):\n int_to_str = {0: \"zero\", 1: \"one\", 2: \"two\", 3: \"three\", 4: \"four\",\n 5: \"five\", 6: \"six\", 7: \"seven\", 8: \"eight\", 9: \"nine\"}\n latex_string = string.replace(\" \", \"\")\n lst = re.findall('\\d', latex_string)\n for int_str in lst:\n latex_string = re.sub(int_str, int_to_str[int(int_str)], latex_string)\n return latex_string", "def _num2str(self, num):\n q, mod = divmod(num, 10)\n suffix = \"th\" if q == 1 else self.SUFFIX_DICT[mod]\n return f\"{num}{suffix}\"", "def transform(s):\r\n return 'digit ' + str(s)", "def l10n_mx_edi_amount_to_text(self, amount_total):\n self.ensure_one()\n currency = self.currency_id.name.upper()\n # M.N. = Moneda Nacional (National Currency)\n # M.E. = Moneda Extranjera (Foreign Currency)\n currency_type = 'M.N' if currency == 'MXN' else 'M.E.'\n # Split integer and decimal part\n amount_i, amount_d = divmod(amount_total, 1)\n amount_d = round(amount_d, 2)\n amount_d = int(round(amount_d * 100, 2))\n words = self.currency_id.with_context(lang='es_ES').amount_to_text(amount_i).upper()\n invoice_words = '%(words)s %(amount_d)02d/100 %(curr_t)s' % dict(\n words=words, amount_d=amount_d, curr_t=currency_type)\n return invoice_words", "def get_number_in_portuguese(self, number, result=\"\"):\n number_as_str = str(number)\n\n # Check if the first char is a \"-\" sign\n first_position = number_as_str[0]\n if \"-\" == first_position:\n result = \"menos\"\n # Removes the negative sign from number\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n\n number_len = len(number_as_str)\n\n if number_len > 1 and self._is_zero_sequence(number_as_str):\n # the rest of the number ends in a zero sequence\n return result.strip()\n\n if first_position == '0':\n if number_len > 1:\n # Cut off the leading zero\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n if not result or result == '-':\n # The number is zero\n return self.ZERO\n\n if number_len > 5:\n # Out of range\n raise NotAcceptable(detail=self.MAX_LIMIT_ERROR)\n\n if number_len == 5:\n # Extract the dozen-thounsands\n first_two_positions = number_as_str[0] + number_as_str[1]\n result = ' '.join([result, self._get_two_digits_number_in_extension(first_two_positions), 'mil'])\n\n if self._is_zero_sequence(number_as_str[2::]):\n # Number ends in a zero sequence\n return result.strip()\n result = ' '.join([result, 'e'])\n\n return self.get_number_in_portuguese(number=number_as_str[2::], result=result)\n\n if number_len == 4:\n result = ' '.join([result, self.THOUSANDS[first_position]])\n\n if self._is_zero_sequence(number_as_str[1::]):\n # Number ends in a zero sequence\n return result.strip()\n result = ' '.join([result, 'e'])\n\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n\n if number_len == 3:\n is_following_zeros = self._is_zero_sequence(number_as_str[1::])\n\n if first_position == '1':\n # Number ends in 1xx\n if is_following_zeros:\n # Number is 100\n result = ' '.join([result, self.CEM])\n return result.strip()\n result = ' '.join([result, 'cento e'])\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n result = ' '.join([result, self.HUNDREDS[first_position]])\n if is_following_zeros:\n # Number ends in a zero sequence\n return result.strip()\n result = ' '.join([result, 'e'])\n return self.get_number_in_portuguese(number=number_as_str[1::], result=result)\n\n if number_len == 2:\n result = ' '.join([result, self._get_two_digits_number_in_extension(number_as_str)])\n return result.strip()\n\n if number_len == 1:\n result = ' '.join([result, self.UNITS[number_as_str]])\n\n return result.strip()", "def _decimal2word(strNumber):\n strNumber = \" komma \".join(re.split(\"[,]\", strNumber))\n strNumber = \" punkt \".join(re.split(\"[.]\", strNumber))\n\n tokenList = []\n for w in re.split(SPACEPATTERN, strNumber):\n w = w.strip()\n if NumberFormula._isCardinalNumber(w):\n w = NumberFormula._cardinal2word(w)\n tokenList.append(w)\n\n return \" \".join(tokenList)", "def apnumber(value):\r\n try:\r\n value = int(value)\r\n except (TypeError, ValueError):\r\n return value\r\n if not 0 < value < 10:\r\n return str(value)\r\n return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'),\r\n _('seven'), _('eight'), _('nine'))[value - 1]", "def spelled_num_to_digits(spelled_num):\n words = re.split(r\",?\\s+|-\", spelled_num.lower())\n major = 0\n units = 0\n for w in words:\n x = SMALL.get(w, None)\n if x is not None:\n units += x\n elif w == \"hundred\":\n units *= 100\n elif w == \"and\":\n continue\n else:\n x = MAGNITUDE.get(w, None)\n if x is not None:\n major += units * x\n units = 0\n else:\n raise NumberException(\"Unknown number: %s\" % w)\n return major + units", "def int_to_text(self, labels):\n string = []\n for i in labels:\n string.append(self.index_map[i])\n return ''.join(string).replace('', ' ')", "def _extract_whole_number_with_text_nl(tokens, short_scale, ordinals):\n multiplies, string_num_ordinal, string_num_scale = \\\n _initialize_number_data_nl(short_scale)\n\n number_words = [] # type: [Token]\n val = False\n prev_val = None\n next_val = None\n to_sum = []\n for idx, token in enumerate(tokens):\n current_val = None\n if next_val:\n next_val = None\n continue\n\n word = token.word\n if word in _ARTICLES_NL or word in _NEGATIVES_NL:\n number_words.append(token)\n continue\n\n prev_word = tokens[idx - 1].word if idx > 0 else \"\"\n next_word = tokens[idx + 1].word if idx + 1 < len(tokens) else \"\"\n\n if word not in string_num_scale and \\\n word not in _STRING_NUM_NL and \\\n word not in _SUMS_NL and \\\n word not in multiplies and \\\n not (ordinals and word in string_num_ordinal) and \\\n not is_numeric(word) and \\\n not is_fractional_nl(word, short_scale=short_scale) and \\\n not look_for_fractions(word.split('/')):\n words_only = [token.word for token in number_words]\n if number_words and not all([w in _ARTICLES_NL |\n _NEGATIVES_NL for w in words_only]):\n break\n else:\n number_words = []\n continue\n elif word not in multiplies \\\n and prev_word not in multiplies \\\n and prev_word not in _SUMS_NL \\\n and not (ordinals and prev_word in string_num_ordinal) \\\n and prev_word not in _NEGATIVES_NL \\\n and prev_word not in _ARTICLES_NL:\n number_words = [token]\n elif prev_word in _SUMS_NL and word in _SUMS_NL:\n number_words = [token]\n else:\n number_words.append(token)\n\n # is this word already a number ?\n if is_numeric(word):\n if word.isdigit(): # doesn't work with decimals\n val = int(word)\n else:\n val = float(word)\n current_val = val\n\n # is this word the name of a number ?\n if word in _STRING_NUM_NL:\n val = _STRING_NUM_NL.get(word)\n current_val = val\n elif word in string_num_scale:\n val = string_num_scale.get(word)\n current_val = val\n elif ordinals and word in string_num_ordinal:\n val = string_num_ordinal[word]\n current_val = val\n\n # is the prev word an ordinal number and current word is one?\n # second one, third one\n if ordinals and prev_word in string_num_ordinal and val == 1:\n val = prev_val\n\n # is the prev word a number and should we sum it?\n # twenty two, fifty six\n if prev_word in _SUMS_NL and val and val < 10:\n val = prev_val + val\n\n # is the prev word a number and should we multiply it?\n # twenty hundred, six hundred\n if word in multiplies:\n if not prev_val:\n prev_val = 1\n val = prev_val * val\n\n # is this a spoken fraction?\n # half cup\n if val is False:\n val = is_fractional_nl(word, short_scale=short_scale)\n current_val = val\n\n # 2 fifths\n if not ordinals:\n next_val = is_fractional_nl(next_word, short_scale=short_scale)\n if next_val:\n if not val:\n val = 1\n val = val * next_val\n number_words.append(tokens[idx + 1])\n\n # is this a negative number?\n if val and prev_word and prev_word in _NEGATIVES_NL:\n val = 0 - val\n\n # let's make sure it isn't a fraction\n if not val:\n # look for fractions like \"2/3\"\n aPieces = word.split('/')\n if look_for_fractions(aPieces):\n val = float(aPieces[0]) / float(aPieces[1])\n current_val = val\n\n else:\n if prev_word in _SUMS_NL and word not in _SUMS_NL and current_val >= 10:\n # Backtrack - we've got numbers we can't sum.\n number_words.pop()\n val = prev_val\n break\n prev_val = val\n\n # handle long numbers\n # six hundred sixty six\n # two million five hundred thousand\n if word in multiplies and next_word not in multiplies:\n to_sum.append(val)\n val = 0\n prev_val = 0\n\n if val is not None and to_sum:\n val += sum(to_sum)\n\n return val, number_words", "def numbers(n):\n if n == 0:\n return 'zero'\n elif n == 1:\n return 'one'\n elif n == 2:\n return 'two'\n else:\n return 'unknown number'", "def _to_cn(number):\n\n chinese_numeral_dict = {\n '0': '零',\n '1': '一',\n '2': '二',\n '3': '三',\n '4': '四',\n '5': '五',\n '6': '六',\n '7': '七',\n '8': '八',\n '9': '九'\n }\n chinese_unit_map = [('', '十', '百', '千'),\n ('万', '十万', '百万', '千万'),\n ('亿', '十亿', '百亿', '千亿'),\n ('兆', '十兆', '百兆', '千兆'),\n ('吉', '十吉', '百吉', '千吉')]\n chinese_unit_sep = ['万', '亿', '兆', '吉']\n\n reversed_n_string = reversed(str(number))\n\n result_lst = []\n unit = 0\n\n for integer in reversed_n_string:\n if integer is not '0':\n result_lst.append(chinese_unit_map[unit // 4][unit % 4])\n result_lst.append(chinese_numeral_dict[integer])\n unit += 1\n else:\n if result_lst and result_lst[-1] != '零':\n result_lst.append('零')\n unit += 1\n\n result_lst.reverse()\n\n # clean convert result, make it more natural\n if result_lst[-1] is '零':\n result_lst.pop()\n\n result_lst = list(''.join(result_lst))\n\n for unit_sep in chinese_unit_sep:\n flag = result_lst.count(unit_sep)\n while flag > 1:\n result_lst.pop(result_lst.index(unit_sep))\n flag -= 1\n\n '''\n length = len(str(number))\n if 4 < length <= 8:\n flag = result_lst.count('万')\n while flag > 1:\n result_lst.pop(result_lst.index('万'))\n flag -= 1\n elif 8 < length <= 12:\n flag = result_lst.count('亿')\n while flag > 1:\n result_lst.pop(result_lst.index('亿'))\n flag -= 1\n elif 12 < length <= 16:\n flag = result_lst.count('兆')\n while flag > 1:\n result_lst.pop(result_lst.index('兆'))\n flag -= 1\n elif 16 < length <= 20:\n flag = result_lst.count('吉')\n while flag > 1:\n result_lst.pop(result_lst.index('吉'))\n flag -= 1\n '''\n\n return ''.join(result_lst)", "def lire():\r\n price = give_price_websites_1(\"https://www.tgju.org/profile/price_try\")\r\n\r\n if users_language[update.effective_chat.id] == \"english\":\r\n return \"lire : \" + format(price / 10000, '.2f') + '0' + ' kTomans'\r\n elif users_language[update.effective_chat.id] == \"persian\":\r\n return \" هزارتومان\" + format(price/10000000, '.3f') + '0' + \"لیر : \"", "def tens_conversion(positive_int):\n # I use an index of [-2] to select the ten's place, and so forth until the thousands\n positive_int = str(positive_int)\n if int(positive_int[-2]) < 4:\n return 'X' * int(positive_int[-2])\n if int(positive_int[-2]) == 4:\n return 'XL'\n if int(positive_int[-2]) == 5:\n return 'L'\n if int(positive_int[-2]) == 6:\n return 'LX'\n if int(positive_int[-2]) == 7:\n return 'LXX'\n if int(positive_int[-2]) == 8:\n return 'LXXX'\n if int(positive_int[-2]) == 9:\n return 'XC'", "def numbers_to_words(data: pd.Series) -> pd.Series:\n engine = inflect.engine()\n return data.apply(lambda row: re.sub(\n r'(?<!\\S)\\d+(?!\\S)', lambda x: engine.number_to_words(x.group()), row))", "def num(number: int) -> str:\n numbers = {1: \"one\", 2: \"two\", 3: \"three\", 4: \"four\", 5: \"five\",\n 6: \"six\", 7: \"seven\", 8: \"eight\", 9: \"nine\", 10: \"ten\"}\n if number in numbers:\n return numbers[number]\n else:\n return f\"{number:,}\"", "def price_text(price):\n if price == 0:\n return \"Gratis\"\n\n return price", "def intword(value, format='%.1f'):\r\n try:\r\n value = int(value)\r\n except (TypeError, ValueError):\r\n return value\r\n\r\n if value < powers[0]:\r\n return str(value)\r\n for ordinal, power in enumerate(powers[1:], 1):\r\n if value < power:\r\n chopped = value / float(powers[ordinal - 1])\r\n return (' '.join([format, _(human_powers[ordinal - 1])])) % chopped\r\n return str(value)", "def format_large_numbers(text):\n\n text = re.sub(r\"(?<!\\d)\\$?\\d{1,3}(?=(,\\d{3}|\\s))\", r\" \\g<0> \", text) # pad commas in large numerical values\n return re.sub(r\"(\\d+)?,(\\d+)\", r\"\\1\\2\", text) # remove commas from large numerical values", "def transform(s):\n return 'digit ' + str(s)", "def number_as_string(x):\n \n numnames = {1 : \"one\", 2 : \"two\", 3 : \"three\", 4 : \"four\", 5 : \"five\", 6 : \"six\", 7 : \"seven\", 8 : \"eight\", 9 : \"nine\",\n 10 : \"ten\", 11 : \"eleven\", 12 : \"twelve\", 13 : \"thirteen\", 14 : \"fourteen\", 15 : \"fifteen\", 16 : \"sixteen\",\n 17 : \"seventeen\", 18 : \"eighteen\", 19 : \"nineteen\", 20 : \"twenty\", 30 : \"thirty\", 40 : \"forty\", 50 : \"fifty\", \n 60 : \"sixty\", 70 : \"seventy\", 80 : \"eighty\", 90 : \"ninety\"}\n \n numparts = []\n needAnd = (x > 100) and (x % 100)\n if x >= 1000:\n numparts.append(numnames[x/1000])\n numparts.append(\"thousand\")\n x %= 1000\n \n if x >= 100:\n numparts.append(numnames[x/100])\n numparts.append(\"hundred\")\n x %= 100\n \n if needAnd:\n numparts.append(\"and\")\n \n if 11 <= x <= 19:\n numparts.append(numnames[x])\n else:\n if x >= 10:\n numparts.append(numnames[(x/10)*10])\n x %= 10\n\n if x > 0:\n numparts.append(numnames[x])\n \n return \" \".join(numparts)", "def get_text(downgrade_titles=False):", "def convert_number(number):\n return ' ' + ' '.join(list(int_to_roman(number))) + ' '", "def translate_number(number):\n return NUMBER_TRANSLATOR[number]", "def plurals(num):\r\n if num != 1:\r\n return ('s')\r\n return ('')", "def GetPluralString(*args, **kwargs):\n return _gdi_.PyLocale_GetPluralString(*args, **kwargs)", "def convert(n):\n if n in numbersDict:\n return len(numbersDict[n]), numbersDict[n]\n # else, n is greater than 20\n\n # reverse so that n[0] is the ones place an so on\n n = list(map(int, reversed(str(n))))\n\n word = []\n\n wordHundred = \"hundred\"\n wordAnd = \"and\"\n wordThousand = \"thousand\"\n\n if (n[1]*10 + n[0]) in numbersDict:\n word.append(numbersDict[(n[1]*10 + n[0])])\n else:\n word.append(numbersDict.get(n[0], \"\"))\n word.append(numbersDict.get(n[1] * 10, \"\"))\n\n if len(n) > 2:\n if n[1] or n[0]: word.append(wordAnd)\n hundreds = numbersDict.get(n[2], \"\")\n needHundred = wordHundred if hundreds else \"\"\n word.append(needHundred)\n word.append(hundreds)\n\n if len(n) > 3:\n thousands = numbersDict.get(n[3], \"\")\n needThousand = wordThousand if thousands else \"\"\n word.append(needThousand)\n word.append(thousands)\n\n return len(\"\".join(word)), \" \".join(reversed(word))", "def float_to_letters(flo):\n\n\ttry:\n\t\texpo_int = int(floor(log10(abs(flo))))\t\t\t# integer exponent\n\t\texpo_let = list(_exponents.keys())[list(_exponents.values()).index(\n\t\t\t'%i' % expo_int\t\t\t\t\t\t\t\t# corresponding letter exponent\n\t\t\t)]\n\texcept TypeError: return ''\t\t\t\t\t\t\t# return empty string if input type is not recognisable\n\texcept (OverflowError, KeyError): return 'l0000'\t# return 0 if exponent not attainable or zero\n\tif flo < 0: expo_let = expo_let.upper()\t\t\t\t# make upper case if float is negative\n\n\tdigi = round(abs(flo) * (10**(significant_figures - expo_int - 1)))\t# digits in litteral expression\n\n\treturn '%s%i' % (expo_let, digi)", "def spell(number):\n if number > 1000:\n raise # This doesn't handle numbers greater than 1000\n\n if number == 1000:\n return ['one', 'thousand']\n\n if number >= 100:\n if number % 100 == 0:\n return spell(number // 100) + ['hundred']\n else:\n return spell(number // 100 * 100) + ['and'] + spell(number % 100)\n\n if number >= 20:\n names = {\n 20: 'twenty',\n 30: 'thirty',\n 40: 'forty',\n 50: 'fifty',\n 60: 'sixty',\n 70: 'seventy',\n 80: 'eighty',\n 90: 'ninety',\n }\n if number % 10 == 0:\n return [names[number]]\n else:\n return spell(number // 10 * 10) + spell(number % 10)\n\n names = [\n 'zero', 'one', 'two', 'three', 'four',\n 'five', 'six', 'seven', 'eight', 'nine',\n 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen',\n 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen',\n ]\n return [names[number]]", "def w2n(word):\n word = re.sub('[^A-Z0-9]', '', word)\n return ''.join([letter_to_number_mapping[x] for x in word])", "def zh_num2digit(string):\n for match in zh_nums_iter(string):\n num_str = match.group(0)\n digit_num = parse_zh_num(num_str)\n if digit_num is None:\n continue\n string = string.replace(num_str, str(digit_num), 1)\n return string", "def base_number(number, count, dict_cardinal_num):\n special_numeral = [\"trăm\", \"mười\", \"mươi\", \"linh\", \"lăm\", \"mốt\"]\n list_cardinal_numeral = []\n # Divide number (abc) and follow place's number\n a = number // 100 # hundreds\n b = (number % 100) // 10 # Tens\n c = number % 10 # Ones\n # check a\n if a > 0:\n list_cardinal_numeral.append(dict_cardinal_num[a])\n list_cardinal_numeral.append(special_numeral[0])\n elif a == 0:\n if count > 1 and (b > 0 or c > 0):\n list_cardinal_numeral.append(dict_cardinal_num[a])\n list_cardinal_numeral.append(special_numeral[0])\n # check b\n if b == 0:\n if c > 0:\n if a > 0 or count > 1:\n list_cardinal_numeral.append(special_numeral[3])\n elif b > 0:\n if b == 1:\n list_cardinal_numeral.append(special_numeral[1])\n elif b > 1:\n list_cardinal_numeral.append(dict_cardinal_num[b])\n list_cardinal_numeral.append(special_numeral[2])\n # check c\n if c == 0:\n if count == 1 and a == 0 and b == 0:\n list_cardinal_numeral.append(dict_cardinal_num[c])\n elif c > 0:\n if b >= 1 and c == 5:\n list_cardinal_numeral.append(special_numeral[4])\n elif b >= 2 and c == 1:\n list_cardinal_numeral.append(special_numeral[5])\n else:\n list_cardinal_numeral.append(dict_cardinal_num[c])\n\n return \" \".join(list_cardinal_numeral)", "def evalute_number(dialed):\n if (len(dialed) == 11 or len(dialed) == 10) and str(dialed).startswith(\"0\"):\n # UK Number\n return \"+44%s\" % (dialed[1:])\n elif len(dialed) == 6:\n # Local Fishguard numbers\n return \"+441348%s\" % (dialed)\n return None", "def hundreds_conversion(positive_int):\n positive_int = str(positive_int)\n if int(positive_int[-3]) < 4:\n return 'C' * int(positive_int[-3])\n if int(positive_int[-3]) == 4:\n return 'CD'\n if int(positive_int[-3]) == 5:\n return 'D'\n if int(positive_int[-3]) == 6:\n return 'DC'\n if int(positive_int[-3]) == 7:\n return 'DCC'\n if int(positive_int[-3]) == 8:\n return 'DCCC'\n if int(positive_int[-3]) == 9:\n return 'CM'", "def stats_text_en(text):\n text1 = text.replace(',','').replace('.','').replace('--','').replace('*','').replace('!','') # Remove the non-English characters in the text.\n text2 = text1.split() # Convert the string type to the list type.\n dict = {x: text2.count(x) for x in text2} # Count the times of each word in the list.\n dict1= sorted(dict.items(), key= lambda d:d[1], reverse = True) # Sort the words in the descending order according to the times of words.\n print(dict1) # Return the result.", "def romanify(num):\n result = \"\"\n return result", "def morph_number(word, number):\n\n if (not number) or (number == ''): \n return word\n elif not word:\n return word\n elif number not in ['first', 'second'] or number == 'plural':\n if word in lexicon.IRREG_PLURALS:\n return lexicon.IRREG_PLURALS[word]\n else:\n pluralize(word)\n else:\n return word", "def text_language(text):\n hebrew = 0\n english = 0\n for char in text:\n if char in \"אבגדהוזחטיכךלמםנסעפףצץקרשת\":\n hebrew += 1\n elif char.lower() in \"abcdefghijklmnopqrstuvwxyz\":\n english += 1\n return {True: \"hebrew\", False: \"english\"}[hebrew > english]", "def ta2en(text):\n return IITB_translator(\"ta\", \"en\", text)", "def num_to_words(data):\n tokens = word_tokenize(str(data))\n new = \"\"\n for word in tokens:\n try:\n word = num2words(int(w))\n except:\n a = 0\n new = new + \" \" + word\n new = np.char.replace(new, \"-\", \" \")\n return new", "def translate_leet(phrase):", "def numbersToLetters(number):\n arr = [number[i:i+2] for i in range(0, len(number), 2)]\n result = ''\n for i in arr:\n i = int(i)\n if(i<=48):\n i = i + 48\n result += chr(i)\n return result", "def pound():\r\n price = give_price_websites_1(\"https://www.tgju.org/profile/price_gbp\")\r\n\r\n if users_language[update.effective_chat.id] == \"english\":\r\n return \"pound : \" + format(price/10000, '.2f') + ' kTomans'\r\n elif users_language[update.effective_chat.id] == \"persian\":\r\n return \" هزارتومان\" + format(price/10000000, '.3f') + \"پوند : \"", "def convert_to_text(nums):\n\n\ttext = []\n\tfor num in nums:\n\t\ttext.append(chr(num))\n\treturn ''.join(text)", "def romanify(num):\n result = \"\"\n onesDict = {1:\"I\", 2: \"II\", 3: \"III\", 4: \"IV\", 5: \"V\", 6: \"VI\", 7: \"VII\", 8: \"VIII\", 9: \"IX\", 0:\"\"}\n ones = num%10\n num-=num%10\n result = onesDict[ones] + result\n tensDict = {10:\"X\", 20: \"XX\", 30: \"XXX\", 40:\"XL\", 50:\"L\", 60:\"LX\", 70: \"LXX\", 80: \"LXXX\", 90: \"XC\", 0:\"\"}\n tens = num%100\n num-=num%100\n result = tensDict[tens] + result\n hunsDict = {100:\"C\", 200: \"CC\", 300: \"CCC\", 400:\"CD\", 500:\"D\", 600:\"DC\", 700: \"DCC\", 800: \"DCCC\", 900: \"CM\", 0:\"\"}\n huns = num%1000\n num-=num%1000\n result = hunsDict[huns] + result\n thous = num/1000\n result = \"M\"*thous + result\n \n return result", "def multiply(number, word):\n return int(number) * word", "def _roman2word(strNumber):\n strNumber = strNumber\n cardinalNumber = fromRoman(strNumber)\n return NumberFormula._cardinal2word(cardinalNumber)", "def integers_only(text) -> str:\n return ''.join(x for x in text if x.isdigit())", "def base2str(self, int_number):\r\n return self.format_base % (float(int_number) / self.mult_base)", "def replace_digits(text):\n text = re.sub(r\"\\d+\", \"number\", text)\n \n return text", "def html_integral_numbers(int_esc: int) -> str:\n return f\"{int_esc}(<i>{str(hex(int_esc))}</i>)\"", "def latex_number(value):\n\n if isinstance(value, str):\n return value\n vstring = '%.4g' % value\n if vstring.find('e+0') > -1:\n vstring = vstring.replace('e+0', times + '10^{') + '}'\n elif vstring.find('e-0') > -1:\n vstring = vstring.replace('e-0', times + '10^{-') + '}'\n elif 'e' in vstring:\n vstring = vstring.replace('e', times + '10^{') + '}'\n if '.' in vstring and decimal_point != '.':\n vstring = vstring.replace('.', decimal_point)\n\n latex = vstring.replace('+', '')\n if \"^\" in latex:\n latex = '$%s$' % latex\n return latex", "def _naturalize_numbers(self, string):\n\n def naturalize_int_match(match):\n return \"%08d\" % (int(match.group(0)),)\n\n string = re.sub(r\"\\d+\", naturalize_int_match, string)\n\n return string", "def numero_a_letras(n):\n especiales = {0: 'cero', 10: 'diez', 11: 'once', 12: 'doce', 13: 'trece', 14: 'catorce', 15: 'quince', 20: 'veinte', 100: 'cien', 1000: 'mil'}\n if n in especiales:\n return especiales[n]\n if n < 100:\n cifras = ['', 'una', 'dos', 'tres', 'cuatro', 'cinco', 'seis', 'siete', 'ocho', 'nueve']\n decenas = ['', 'dieci', 'veinti', 'treinta', 'cuarenta', 'cincuenta', 'sesenta', 'setenta', 'ochenta', 'noventa']\n if n % 10 == 0:\n return decenas[n // 10]\n if n < 30:\n return f\"{decenas[n // 10]}{cifras[n % 10]}\"\n return f\"{decenas[n // 10]} y {cifras[n % 10]}\"\n elif n < 1000:\n centenas = ['', 'ciento', 'doscientas', 'trescientas', 'cuatrocientas', 'quinientas', 'seiscientas', 'setecientas', 'ochocientas', 'novecientas']\n if n % 100 == 0:\n return centenas[n // 100]\n return f\"{centenas[n // 100]} {numero_a_letras(n % 100)}\"\n elif n < 10**6:\n if n < 2000:\n return f\"mil {numero_a_letras(n % 1000)}\"\n if n % 1000 == 0:\n return f\"{numero_a_letras(n // 1000)} mil\"\n return f\"{numero_a_letras(n // 1000)} mil {numero_a_letras(n % 1000)}\"\n else:\n raise ValueError(\"Numero demasiado grande\")", "def cond_int2str(cond_int=0):\n try:\n return {\n 0: '晴',\n 1: '多云',\n 2: '阴',\n 3: '阵雨',\n 4: '雷阵雨',\n 5: '雷阵雨伴有冰雹',\n 6: '雨夹雪',\n 7: '小雨',\n 8: '中雨',\n 9: '大雨',\n 10: '暴雨',\n 11: '大暴雨',\n 12: '特大暴雨',\n 13: '阵雪',\n 14: '小雪',\n 15: '中雪',\n 16: '大雪',\n 17: '暴雪',\n 18: '雾',\n 19: '冻雨',\n 20: '沙尘暴',\n 21: '小到中雨',\n 22: '中到大雨',\n 23: '大到暴雨',\n 24: '暴雨到大暴雨',\n 25: '大暴雨到特大暴雨25',\n 26: '小到中雪',\n 27: '中到大雪',\n 28: '大到暴雪',\n 29: '浮尘',\n 30: '扬沙',\n 31: '强沙尘暴',\n 53: '霾',\n 99: '无'\n }[cond_int]\n except KeyError as e:\n logging.warning(e)\n return \"-\"", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def WWRomanNumber(number):\r\n\t\r\n\tif number>4999 or number<1:\r\n\t\traise OverflowError(\"The should be between 1 and 4999, here it is \"+str(number))\r\n\tnumerals = { 1 : \"I\", 4 : \"IV\", 5 : \"V\", 9 : \"IX\", 10 : \"X\", 40 : \"XL\",\r\n\t\t\t\t50 : \"L\", 90 : \"XC\", 100 : \"C\", 400 : \"CD\", 500 : \"D\", 900 : \"CM\", 1000 : \"M\" }\r\n\tresult = \"\"\r\n\tfor value, numeral in sorted(numerals.items(), reverse=True):\r\n\t\twhile number >= value:\r\n\t\t\tresult += numeral\r\n\t\t\tnumber -= value\r\n\treturn result", "def suffix(d): \n return \"th\" if 11<=d<=13 else {1:\"st\",2:\"nd\",3:\"rd\"}.get(d%10, \"th\")", "def add_numbers():\n\n for fraction, fraction_spelling in [(0.25, 'quarter'), (0.5, 'half')]:\n add_db_number(fraction, fraction_spelling)\n\n for cardinal in xrange(60):\n add_db_number(cardinal, spell_number(cardinal))\n\n for single_digit in xrange(9):\n add_db_number(single_digit, \"zero \" + spell_number(single_digit))\n add_db_number(single_digit, \"o \" + spell_number(single_digit))", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def format_number(n):\n # locale.setlocale(locale.LC_ALL, 'en_US') # commented by me\n # return locale.format('%d', n, grouping=True)\n return n" ]
[ "0.7694077", "0.7104256", "0.7084235", "0.6613821", "0.64341813", "0.6333191", "0.6266428", "0.62469083", "0.6115907", "0.6083413", "0.6064189", "0.6041851", "0.5971319", "0.59671617", "0.59606844", "0.58907706", "0.5845223", "0.58414835", "0.5818904", "0.5810572", "0.58071923", "0.5769101", "0.57457346", "0.571984", "0.56917167", "0.56897444", "0.5685031", "0.5666315", "0.5664131", "0.5642236", "0.5640896", "0.559643", "0.5554741", "0.5535638", "0.5531605", "0.552721", "0.5511391", "0.5509511", "0.5495352", "0.5488441", "0.54732794", "0.5459285", "0.54541206", "0.54454845", "0.54443854", "0.54389805", "0.5436626", "0.54358673", "0.5434871", "0.5430474", "0.5413532", "0.54110664", "0.54045403", "0.5398106", "0.5395723", "0.5391496", "0.5390938", "0.5390032", "0.538963", "0.538815", "0.5386414", "0.53846884", "0.53791356", "0.53471315", "0.53114486", "0.5307191", "0.52961683", "0.5295727", "0.52936494", "0.5290728", "0.5276445", "0.52451897", "0.52301764", "0.5226641", "0.5220045", "0.52068", "0.5204747", "0.520427", "0.5200492", "0.51990914", "0.5189732", "0.51863515", "0.5182251", "0.51786244", "0.5175904", "0.51736504", "0.5168252", "0.516794", "0.5152334", "0.5146387", "0.5139865", "0.5131034", "0.51163024", "0.51163024", "0.51163024", "0.51147217", "0.51126873", "0.51092875", "0.5105652", "0.51044655" ]
0.7075966
3
Solves [a]{b} = {x} by Gauss elimination.
def gaussElimin(a,b): a=float64(a) b=float64(b) n=len(b) x=zeros((n,1),dtype=float) for k in range(n-1): for i in range(k+1,n): l=float(a[i][k])/a[k][k] a[i][k]=0 for j in range(k+1,n): a[i][j]=a[i][j]-l*a[k][j] b[i]=b[i]-l*b[k] x[n-1]=float(b[n-1])/a[n-1][n-1] for i in range(n-2,-1,-1): sum=b[i] for j in range(i+1,n): sum=sum-a[i][j]*x[j] x[i]=float(sum)/a[i][i] return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian_elimination(A, b):\n \n m, n = A.shape\n U = A.copy() \n b = b.copy()\n\n # forward sweep, reduce A to a upper triangular matrix\n for k in range(min(m, n)):\n swap = np.argmax(np.abs(U[k:, k])) + k\n if U[swap, k] == 0:\n raise ValueError('Singular matrix')\n U[[k, swap], :] = U[[swap, k], :]\n b[[k, swap]] = b[[swap, k]]\n \n for i in range(k + 1, m):\n factor = U[i, k] / U[k, k]\n b[i] = b[i] - factor*b[k]\n U[i, k+1:] = U[i, k+1:] - U[k, k+1:] * factor\n U[i, k] = 0\n \n # solve by back subistitution\n x = rbackwardsolve(U, b, m)\n\n return x", "def gaussian_elimination(A, b):\n n = len(b)\n # Join A and b\n ab = np.c_[A,b]\n # Gaussian Elimination\n for i in range(n-1):\n if ab[i,i] == 0:\n raise ZeroDivisionError('Zero value in matrix..')\n\n for j in range(i+1, n):\n ratio = ab[j,i] / ab[i,i]\n\n for k in range(i, n+1):\n ab[j,k] = ab[j,k] - ratio * ab[i,k]\n\n # Backward Substitution\n X = np.zeros((n,1))\n X[n-1,0] = ab[n-1,n] / ab[n-1,n-1]\n\n for i in range(n-2,-1,-1):\n knowns = ab[i, n]\n for j in range(i+1, n):\n knowns -= ab[i,j] * X[j,0]\n X[i,0] = knowns / ab[i,i]\n return X", "def gaussian_elimination_pivots(A, b):\n\n P, L, U = PLU(A)\n n,_ = A.shape\n y = rforwardsolve(L, (P.T).dot(b), n)\n x = rbackwardsolve(U, y, n)\n\n return x", "def Gauss_Seidel_Solve(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n #only change from before is that I use x_new in the update\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def gaussian_solve(a, b):\n g = np.zeros((len(a), len(a[0]) + len(b[0])))\n for i in range(len(a)):\n for j in range(len(a[0])):\n g[i][j] = a[i][j]\n for i in range(len(b)):\n for j in range(len(b[0])):\n g[i][j + len(a[0])] = b[i][j]\n for i in range(len(a)):\n for j in range(i+1, len(a)):\n row1 = g[i]\n row2 = g[j]\n if row1[i] != 0:\n q = row2[i] / row1[i]\n g[j] = row2 - q * row1\n for i in range(len(a)):\n i = len(a) - i - 1\n for j in range(i):\n j = i - j - 1\n row1 = g[i]\n row2 = g[j]\n if row1[i] != 0:\n q = row2[i] / row1[i]\n g[j] = row2 - q * row1\n if g[i][i] != 0:\n g[i] /= g[i][i]\n else:\n return 'error: matrix is not linearly independent'\n out = np.zeros((len(b), len(b[0])))\n for i in range(len(b)):\n for j in range(len(b[0])):\n out[i][j] = g[i][j + len(a[0])]\n return out", "def gaussian_elimination_special_case(b):\n n = len(b)\n # init new (prime) arrays\n beta_prime = np.empty(n)\n beta_prime[0] = 2\n\n b_prime = np.empty(n)\n b_prime[0] = b[0]\n\n v = np.empty(n)\n i_array = np.arange(n)\n beta_prime = (i_array+2) / (i_array+1)\n\n for i in range(1,n):\n b_prime[i] = b[i] + (b_prime[i-1] / beta_prime[i-1])\n\n v[-1] = b_prime[-1] / beta_prime[-1]\n\n for i in range(n-2, -1, -1):\n v[i] = (b_prime[i] + v[i+1])/ beta_prime[i]\n\n return v", "def solve(self,b):\n nrows = self.nrows\n ncols = self.ncols\n newmatrix = Matrix(nrows,ncols+b.ncols) #Account for b not being just a column vector\n for i in range(nrows):\n for j in range(ncols):\n newmatrix[i,j]= self[i,j]\n for j in range(b.ncols):\n newmatrix[i,ncols+j] = b[i,j]\n newmatrix.gaussianelimination()\n x = Matrix(nrows,b.ncols)\n for i in range(x.nrows):\n for j in range(b.ncols):\n x[i,j] = newmatrix[i,j+ncols]\n return x", "def elimination(A, b):\n n = len(A)\n for j in range(n):\n if A[j][j] <= 0:\n raise ValueError('Matrix A is not positive definite.')\n A[j][j] = math.sqrt(A[j][j])\n b[j][0] = b[j][0] / A[j][j]\n for i in range(j + 1, n):\n A[i][j] = A[i][j] / A[j][j]\n b[i][0] = b[i][0] - A[i][j] * b[j][0]\n for k in range(j + 1, i + 1):\n A[i][k] = A[i][k] - A[i][j] * A[k][j]", "def polynomial(x, y):\n \n var = copy(x)\n known = copy(y)\n V = vandermonde_matrix(var)\n a = gauss_elimination(V, known)\n return a", "def gauss_seidel(a, b, n=None, x=None, delta=None, actual=np.array([]), max_iterations=default_max_iterations):\n # Make sure that both delta and actual are passed in\n if (delta and not actual.any()) or (actual.any() and not delta):\n raise SyntaxError(\"Must pass in both delta and actual if one is passed in\")\n # Make sure that only N or delta is passed in\n if delta and n:\n raise SyntaxError(\"Can only pass delta or N option\")\n\n # Create an initial guess if needed\n if x is None:\n x = np.zeros(len(a[0]))\n\n # Iterate for N times if N is passed in\n if n:\n L = np.tril(a)\n U = a - L\n for i in range(n):\n x = np.dot(np.linalg.inv(L), b - np.dot(U, x))\n\n # Iterate until error is found or max_iterations is exceeded if delta and actual are passed in\n elif delta and actual.any():\n n = 0\n actual_norm = np.linalg.norm(actual)\n L = np.tril(a)\n U = a - L\n\n while True:\n x = np.dot(np.linalg.inv(L), b - np.dot(U, x))\n x_norm = np.linalg.norm(x)\n n += 1\n # Compare norms of actual matrix with Jacobian-calculated matrix and if difference is within error, return\n # the number of iterations it took to get within the error\n if abs(Decimal(actual_norm) - Decimal(x_norm)) <= delta or n >= max_iterations:\n break\n # If neither N or delta was passed in\n else:\n raise SyntaxError(\"Must pass in either N or delta options to function\")\n\n # Return the result and the number of iterations taken to find it\n return [x, n]", "def solve_triangular(a, b, lower=False):\n # TODO maybe commit this to gvar.linalg\n # TODO can I raise a LinAlgError if a[i,i] is 0, and still return the\n # result and have it assigned to a variable using try...finally inside this\n # function?\n x = np.copy(b)\n a = a.reshape(a.shape + (1,) * len(x.shape[1:]))\n if lower:\n x[0] /= a[0, 0]\n for i in range(1, len(x)):\n x[i:] -= x[i - 1] * a[i:, i - 1]\n x[i] /= a[i, i]\n else:\n x[-1] /= a[-1, -1]\n for i in range(len(x) - 1, 0, -1):\n x[:i] -= x[i] * a[:i, i]\n x[i - 1] /= a[i - 1, i - 1]\n return x", "def sparse_gauss_seidel(A, b, tol=1e-8, maxiters=29):\n \n\n def iter(xi):\n xj=np.zeros((m,))\n for i in xrange(m): \n rowstart = A.indptr[i]\n rowend = A.indptr[i+1]\n aii=A[i,i]\n xj[i]=(b[i]-(np.dot(A.data[rowstart:rowend], xi[A.indices[rowstart:rowend]])-aii*xi[i]))/(aii)\n xi[i]=xj[i]\n return xj\n \n #Aix = np.dot(A.data[rowstart:rowend], x[A.indices[rowstart:rowend]])\n\n m=len(b)\n xk=np.zeros((m,))\n for i in xrange(0,maxiters):\n xk=iter(xk)\n if (la.norm(A.dot(xk)-b,ord=np.inf)<tol) or (i==maxiters-1):\n return xk", "def legendreGauss (func, deg, a, b, ind, bsp, ind2=0):\n\n\tx, w = np.polynomial.legendre.leggauss(deg)\n\tt = 0.5*(x+1)*(b-a)+ a\n\t\n\tgauss = sum(w + func(t, bsp, ind, ind2))*( 0.5*(b-a))\n\n\treturn gauss", "def gem_solve(A, b):\r\n\tstart = time()\r\n\tn = len(A)\r\n\tU = [[0.0 for k in range(n)] for k in range(n)]\r\n\tfor k in range(n):\r\n\t\tfor i in range(k+1,n):\r\n\t\t\tA[i][k] = A[i][k]/A[k][k]\r\n\t\t\tb[i] = b[i] - A[i][k]*b[k]\r\n\t\tfor j in range(k+1,n):\r\n\t\t\tfor i in range(k+1, n):\r\n\t\t\t\tA[i][j] = A[i][j]-A[i][k]*A[k][j]\r\n\t\t\t\t\r\n\tfor i in range(n):\r\n\t\tfor j in range(n):\r\n\t\t\tif i>j:\r\n\t\t\t\tU[i][j] = 0\r\n\t\t\telse:\r\n\t\t\t\tU[i][j] = A[i][j]\r\n\t\r\n\tx, place = backward(U, b)\r\n\tend = time()\r\n\treturn x, (end-start)", "def gaussseidel_poissoneq(A, x0):\n return 1", "def gauss(x,p):\n return np.exp((-(x - p[0])**2) / (2 * p[1]**2))", "def gauss(x, y, ax, ay, x0, y0, phase):\n g_x = ((ax / sqrt(pi)) ** 0.5\n * exp(-0.5 * ((x - x0) * ax) ** 2))\n g_y = ((ay / sqrt(pi)) ** 0.5\n * exp(-0.5 * ((y - y0) * ay) ** 2))\n\n gxy = np.zeros((len(x),len(y)), dtype=float)\n for i, _gx in enumerate(g_x):\n for j, _gy in enumerate(g_y):\n gxy[i,j] = _gx * _gy \n\n gxy2 = (1.0 / sqrt(1.0+abs(phase))) * np.array([gxy, phase*gxy], dtype=float) \n\n return gxy2", "def gauss_seidel(coeficientes, semilla, b, i, n):\n suma = 0\n for j in range(n):\n if j != i and coeficientes[j] != 0:\n suma += (coeficientes[j] * semilla[j]) / coeficientes[i]\n return (b / coeficientes[i]) - suma", "def gaussian_reduce(w, a, b):\n u = (0, 1)\n v = (1, 0)\n\n if dot(u, v, w, a, b) < 0:\n v = (-v[0], -v[1])\n\n if norm(u, w, a, b) < norm(v, w, a, b):\n u, v = v, u\n\n while norm(u, w, a, b) > norm(v, w, a, b):\n k = dot(u, v, w, a, b) // dot(v, v, w, a, b)\n u, v = v, (u[0]- k*v[0], u[1]- k*v[1])\n\n u, v = v, u\n\n if dot(u, v, w, a, b) < dot(v, v, w, a, b)/2 or norm((u[0]-v[0], u[1]-v[1]), w, a, b) > norm(v, w, a, b):\n c = v\n else:\n c = (u[0] - v[0], u[1] - v[1])\n\n return c[0]*w + b*c[1], c[0]", "def _solveX(L, U, b):\n m, n = L.shape\n # Forward Substitution\n y = list()\n y.insert(0, b[0]/L[0][0])\n for i in range(1, m):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*y[k]\n y.insert(i, (b[i]-summ)/(L[i][i]))\n\n # Backwards Substitution\n x = [0]*m\n x[m-1] = y[m-1] / U[m-1][m-1]\n for i in range(m - 2, -1, -1):\n summ = 0\n for k in range(i+1, n):\n summ += U[i][k]*x[k]\n x[i] = (y[i] - summ)/U[i][i]\n\n return x", "def gauss_naive (M, b) -> list:\n dim = len(b)\n\n #Itero sulle Incognite da Trovare\n for i in range(dim):\n\n #Itero sulle righe su cui devo cancellare un elemento\n for j in range(i+1,dim):\n m__j_i = M[j][i] / M[i][i]\n M[j][i] = 0.0\n\n for k in range (i+1,dim):\n M[j][k] = M[j][k] - m__j_i * M[i][k]\n \n b[j] = b[j] - m__j_i * b[i]\n\n return M,b", "def solve(a, b):\n #-> getrf + getrs\n a, _, _ = get_computation_matrix(a)\n b, cv2, isM2 = get_computation_matrix(b)\n if a.get_dtype() != b.get_dtype():\n raise TypeError(\"solve: dtype of a and b are not compatible!\")\n if a.numRows() != a.numCols():\n raise ValueError(\"solve: input a is not a square matrix!\")\n t_dtype = TypeUtil.to_numpy_dtype(a.get_dtype())\n (_, _, x, _) = gesv(a, b, overwrite_a=1, overwrite_b=1, dtype=t_dtype)\n\n if cv2:\n if isM2:\n return x.to_numpy_matrix()\n else:\n return x.to_numpy_array()\n else:\n return x", "def LUsolve(a,b):\n b=float64(b)\n n=len(b)\n LU=LUdecomp(a)\n y=zeros((n,1))\n x=zeros((n,1))\n y[0]=b[0]\n for i in range(1,n):\n sum=b[i]\n for j in range(i):\n sum=sum-LU[i][j]*y[j]\n y[i]=sum\n x[n-1]=float(y[n-1])/LU[n-1][n-1]\n for i in range(n-2,-1,-1):\n sum=y[i]\n for j in range(i+1,n):\n sum=sum-LU[i][j]*x[j]\n x[i]=float(sum)/LU[i][i]\n return x", "def MatrixFreeCG(A, b, x, tol=1e-6, maxiter=5000, quiet=True):\n\n if b.dtype != x.dtype:\n raise TaichiTypeError(f\"Dtype mismatch b.dtype({b.dtype}) != x.dtype({x.dtype}).\")\n if str(b.dtype) == \"f32\":\n solver_dtype = ti.f32\n elif str(b.dtype) == \"f64\":\n solver_dtype = ti.f64\n else:\n raise TaichiTypeError(f\"Not supported dtype: {b.dtype}\")\n if b.shape != x.shape:\n raise TaichiRuntimeError(f\"Dimension mismatch b.shape{b.shape} != x.shape{x.shape}.\")\n\n size = b.shape\n vector_fields_builder = ti.FieldsBuilder()\n p = ti.field(dtype=solver_dtype)\n r = ti.field(dtype=solver_dtype)\n Ap = ti.field(dtype=solver_dtype)\n Ax = ti.field(dtype=solver_dtype)\n if len(size) == 1:\n axes = ti.i\n elif len(size) == 2:\n axes = ti.ij\n elif len(size) == 3:\n axes = ti.ijk\n else:\n raise TaichiRuntimeError(f\"MatrixFreeCG only support 1D, 2D, 3D inputs; your inputs is {len(size)}-D.\")\n vector_fields_builder.dense(axes, size).place(p, r, Ap, Ax)\n vector_fields_snode_tree = vector_fields_builder.finalize()\n\n scalar_builder = ti.FieldsBuilder()\n alpha = ti.field(dtype=solver_dtype)\n beta = ti.field(dtype=solver_dtype)\n scalar_builder.place(alpha, beta)\n scalar_snode_tree = scalar_builder.finalize()\n succeeded = True\n\n @ti.kernel\n def init():\n for I in ti.grouped(x):\n r[I] = b[I] - Ax[I]\n p[I] = 0.0\n Ap[I] = 0.0\n\n @ti.kernel\n def reduce(p: ti.template(), q: ti.template()) -> solver_dtype:\n result = solver_dtype(0.0)\n for I in ti.grouped(p):\n result += p[I] * q[I]\n return result\n\n @ti.kernel\n def update_x():\n for I in ti.grouped(x):\n x[I] += alpha[None] * p[I]\n\n @ti.kernel\n def update_r():\n for I in ti.grouped(r):\n r[I] -= alpha[None] * Ap[I]\n\n @ti.kernel\n def update_p():\n for I in ti.grouped(p):\n p[I] = r[I] + beta[None] * p[I]\n\n def solve():\n A._matvec(x, Ax)\n init()\n initial_rTr = reduce(r, r)\n if not quiet:\n print(f\">>> Initial residual = {initial_rTr:e}\")\n old_rTr = initial_rTr\n new_rTr = initial_rTr\n update_p()\n if sqrt(initial_rTr) >= tol: # Do nothing if the initial residual is small enough\n # -- Main loop --\n for i in range(maxiter):\n A._matvec(p, Ap) # compute Ap = A x p\n pAp = reduce(p, Ap)\n alpha[None] = old_rTr / pAp\n update_x()\n update_r()\n new_rTr = reduce(r, r)\n if sqrt(new_rTr) < tol:\n if not quiet:\n print(\">>> Conjugate Gradient method converged.\")\n print(f\">>> #iterations {i}\")\n break\n beta[None] = new_rTr / old_rTr\n update_p()\n old_rTr = new_rTr\n if not quiet:\n print(f\">>> Iter = {i+1:4}, Residual = {sqrt(new_rTr):e}\")\n if new_rTr >= tol:\n if not quiet:\n print(\n f\">>> Conjugate Gradient method failed to converge in {maxiter} iterations: Residual = {sqrt(new_rTr):e}\"\n )\n succeeded = False\n\n solve()\n vector_fields_snode_tree.destroy()\n scalar_snode_tree.destroy()\n return succeeded", "def gauss2(x,a1,c1,w1,a2,c2,w2):\n return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)", "def gauss_seidel(self):\n for i in range(1,self.size[0]-1):\n for j in range(1,self.size[1]-1):\n for k in range(1,self.size[2]-1):\n self.A[(i,j,k)] = ((1/6)*(self.A[(i+1,j,k)] + self.A[(i-1,j,k)] + self.A[(i,j+1,k)] + self.A[(i,j-1,k)] + self.A[(i,j,k+1)] + self.A[(i,j,k-1)] + self.J[(i,j,k)]) - self.A[(i,j,k)])*self.omega + self.A_0[(i,j,k)]", "def gauss_vect_mult(v):\n Jv = T.Rop(output, params, v)\n HJv = T.Rop(T.grad(opt_cost,output), output, Jv)\n JHJv = T.Lop(output, params, HJv)\n if not isinstance(JHJv,list):\n JHJv = [JHJv]\n JHJv = [a+ridge*b for a,b in zip(JHJv,v)]\n return JHJv", "def cg(A, b, x=None):\n n = len(b)\n if not x:\n x = np.ones([n,1])\n r = np.dot(A, x) - b\n p = - r\n # r_k_norm = np.dot(r, r)\n r_k_norm = np.linalg.norm ( r )*np.linalg.norm ( r )\n for i in range(2*n):\n Ap = np.dot(A, p)\n alpha = r_k_norm / p.T@Ap\n try:\n x += alpha * p\n except:\n pass\n r += alpha * Ap\n r_kplus1_norm = np.linalg.norm ( r )*np.linalg.norm ( r )\n beta = r_kplus1_norm / r_k_norm\n r_k_norm = r_kplus1_norm\n if r_kplus1_norm < 1e-5:\n break\n p = beta * p - r\n return x", "def g_rosenbrock(x, a=1, b=100):\n\n g = np.array(\n [\n -2 * a - 4 * b * x[0] * (-x[0] ** 2 + x[1]) + 2 * x[0],\n b * (-2 * x[0] ** 2 + 2 * x[1]),\n ]\n )\n\n return g", "def gauss(x, x0, gamma):\n sigma = gamma / sqrt(2.0)\n \n A = 1/ (sigma * sqrt(2*pi))\n return (A * exp (-0.5 * (x-x0)**2/sigma**2))", "def gauss_seidel(iterate, x, tol=1.0e-9, relaxation=True):\n omega = 1\n k = 10\n p = 1\n for i in range(1,501):\n xold = x.copy()\n x = iterate(x, omega)\n dx = sqrt(dot(x - xold, x - xold))\n if dx < tol:\n return x, i, omega\n if relaxation:\n # Compute of relaxation factor after k+p iterations\n if i == k:\n dx1 = dx\n if i == k + p:\n dx2 = dx\n omega = 2.0 / (1.0 + sqrt(1.0 - (dx2 / dx1)**(1.0 / p)))\n print 'Gauss-Seidel failed to converge'", "def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000):\n n = b.size\n assert A.n == n\n assert A.m == n\n b_norm = np.linalg.norm(b)\n\n # Jacobi pre-conditioner\n kvec = A.diag\n # For diag elem < 1e-6 we keep 1e-6.\n kvec = np.where(kvec > 1.e-6, kvec, 1.e-6)\n\n # Initial guess\n if x0 is None:\n x = np.zeros(n)\n else:\n x = x0\n\n r = b - A.dot(x)\n w = r/kvec\n\n p = np.zeros(n)\n beta = 0.0\n rho = np.dot(r, w)\n k = 0\n\n # Following C. T. Kelley\n while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter):\n p = w + beta*p\n z = A.dot(p)\n alpha = rho/np.dot(p, z)\n r = r - alpha*z\n w = r/kvec\n rhoold = rho\n rho = np.dot(r, w)\n x = x + alpha*p\n beta = rho/rhoold\n #err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used\n k += 1\n err = np.linalg.norm(A.dot(x) - b)\n return x, err", "def onedgauss(x,H,A,dx,w):\n #H,A,dx,w = params\n return H+A*np.exp(-(x-dx)**2/(2*w**2))", "def gauss(x, *p):\n A, mu, sigma = p\n\n return A*np.exp(-(x-mu)**2/(2.*sigma**2))", "def analyticSol (x):\n\treturn x*(1-x);", "def gauss_seidel(A, b, tol=1e-8, maxiters=100, plot=False):\n A=np.array(A)*1.0\n b=np.array(b)*1.0 \n m,n=A.shape\n e=[]\n xk=np.zeros((m,))\n \n def iter(xi):\n xj=np.zeros((m,))\n for i in xrange(m):\n xj[i]=(b[i]-(np.dot(A[i],xi)-A[i,i]*xi[i]))/A[i,i]\n xi[i]=xj[i]\n return xj\n\n if plot==True: \n for i in xrange(1,maxiters+1):\n e+=[la.norm(np.dot(A,xk)-b,ord=np.inf)]\n #print i-1,e[i-1],xk\n xk=iter(xk)\n if (la.norm(np.dot(A,xk)-b,ord=np.inf)<tol) or (i==maxiters):\n e+=[la.norm(np.dot(A,xk)-b,ord=np.inf)]\n break\n #How many iterations happened\n iters=len(e) #1..len(e)\n dom=np.arange(0,iters)\n \n plt.semilogy(dom,e,'b.-',basey=10,lw=2, ms=2)\n plt.xlabel(\"Iteration #\")\n plt.ylabel(\"Absolute Error of Approximation\")\n #plt.legend(loc=\"upper left\")\n plt.title(\"Convergence of Gauss-Seidel Method\", fontsize=18)\n plt.show()\n return xk\n \n else:\n for i in xrange(1,maxiters+1):\n xk=iter(xk)\n if (la.norm(np.dot(A,xk)-b,ord=np.inf)<tol) or (i==maxiters):\n return xk", "def Gauss(self, x, height, centre, width, b=0):\n\n return height * np.exp(-(x - centre)**2 / (2 * width**2)) - b", "def lu_decom(A,b):\n # init\n n = len(b)\n L = np.eye(n)\n U = np.zeros((n,n))\n x = np.zeros(n)\n y = np.zeros(n)\n\n # decomposition A = LU\n\n U[0,:] = A[0,:]\n L[1:,0] = A[1:,0] / U[0,0]\n\n for i in range(1,n):\n for j in range(i,n):\n\n U[i,j] = A[i,j] - np.dot(L[i,:i],U[:i,j])\n\n if j != n-1:\n L[j+1,i] = (A[j+1,i] - np.dot(L[j+1,:i],U[:i,i])) / U[i,i]\n\n # solve Ly=b\n y[0] = b[0]\n\n for k in range(1,n):\n y[k] = b[k] - np.dot(L[k,:k],y[:k])\n\n # solve Ux=y\n x[-1] = y[-1] / U[-1,-1]\n\n for k in range(n-2,-1,-1):\n x[k] = (y[k] - np.dot(U[k,k+1:],x[k+1:])) / U[k,k]\n\n return x,L,U", "def solve_canonical_impl(basis, c, A, b):\n (m, n) = A.shape\n Q = np.row_stack(\n (\n np.hstack(([0], -c)),\n np.column_stack((b, A)),\n )\n )\n gauss_elimination(Q, basis)\n\n while True:\n # choose 's' and 'r' according to the Bland's rule\n ss = (j for j in range(1, n + 1) if Q[0][j] < 0)\n s = min(ss, default=None)\n if s is None:\n return basis, Q\n\n rs = [i for i in range(1, m + 1) if Q[i][s] > 0] # and Q[0][s] / Q[i][s] > 0\n r = min(rs, key=lambda i: (abs(Q[0][s] / Q[i][s]), basis[i - 1]), default=None)\n if r is None:\n raise UnboundFunction\n\n Q[r] /= Q[r][s]\n for i in range(m + 1):\n if i != r:\n Q[i] -= Q[r] * Q[i][s]\n\n basis[r - 1] = s", "def solveU(U, b):\n # validate input\n if np.allclose(U,np.triu(U))==False or np.linalg.det == 0:\n raise TypeError(\"U is not an upper regular triangular matrix\")\n \n elif len(U.shape) != 2 or len(b.shape) != 1:\n raise TypeError(\"unsuitable object\")\n \n else:\n un, um = U.shape\n n, = b.shape\n if un != um or un != n:\n raise TypeError((\"dimensions do not fullfill requirements\"))\n\n # solve \n x = np.zeros(n, dtype=complex)\n x[-1] = (b[-1]) / U[n - 1, n - 1]\n for i in range(1, n):\n t = U[(n - (i + 1)):(n - i)] @ x\n x[-(i + 1)] = (b[-(i + 1)] - t) / U[n - (i + 1), n - (i + 1)]\n\n return x", "def usolve(self, ub):\n raise NotImplementedError", "def kl_gauss(x, y, sig2=1.):\n return (x - y) ** 2 / (2 * sig2)", "def g(x):\n return 5. - x[:, 1] - .5 * x[:, 0] ** 2.", "def gauss(x, *p):\n mu, sigma = p\n return (1 / (sigma * np.sqrt(2 * np.pi)) *\n np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)))", "def geglu(x: Tensor) ->Tensor:\n assert x.shape[-1] % 2 == 0\n a, b = x.chunk(2, dim=-1)\n return a * F.gelu(b)", "def gauss(x, gamma):\n return 1 / np.sqrt(2*np.pi) / gamma * np.exp(-(x/gamma)**2 / 2)", "def gelu(x):\n return f_gelu(x)", "def compute_hessian_vector_product(self, function, arguments):", "def singular_solve(U, e, V, b):\n # Calculate S * V^T * x = U^T * b\n y = np.dot(np.transpose(U), b)\n\n for i in range(len(y)):\n y[i] /= e[i]\n\n # Solve\n x = np.dot(V, y)\n\n return x", "def reconstruct(A, B, z):\n f = factorint(igcd(A, B))\n for p, e in f.items():\n if e != 1:\n raise ValueError('a and b should be square-free')\n z *= p\n return z", "def gauss(x, mu, A, sigma):\n mu, A, sigma = np.atleast_2d(mu), np.atleast_2d(A), np.atleast_2d(sigma)\n val = (A / (sigma * np.sqrt(np.pi * 2)) *\n np.exp(-(x[:, None] - mu)**2 / (2 * sigma**2)))\n return val.sum(axis=-1)", "def gauss_seidel_solver(self, mat, rhs):\n x = np.zeros_like(rhs)\n for it_count in range(1, self.iterations_number):\n x_new = np.zeros_like(x)\n if self.verbose > 1:\n print(\"Iteration {0}: {1}\".format(it_count, x))\n for i in range(mat.shape[0]):\n s1 = np.dot(mat[i, :i], x_new[:i])\n s2 = np.dot(mat[i, i + 1:], x[i + 1:])\n x_new[i] = (rhs[i] - s1 - s2) / mat[i, i]\n if np.allclose(x, x_new, rtol=1e-8):\n break\n x = x_new\n return x", "def solve(self, x_0, dual_x_0):\n # Sanitize the inputs\n if type(x_0) is not np.ndarray or type(dual_x_0) is not np.ndarray:\n x_0 = np.array(x_0)\n dual_x_0 = np.array(dual_x_0)\n # Make sure that the arrays are column vectors\n x_0 = x_0.reshape(-1, 1)\n dual_x_0 = dual_x_0.reshape(-1, 1)\n\n print (\"Starting SQP minimization...\")\n [x, dual_x, exit_info] = self.globalized_sqp(x_0, dual_x_0)\n conv_criteria = exit_info['val']\n\n print (exit_info['msg'])\n print (\"Exiting with ||grad[L]|| = {0:e}\".format(conv_criteria))\n print (\"x = {0}\".format(x.reshape(-1)))\n print (\"dual_x = {0}\".format(dual_x.reshape(-1)))\n\n return [x, dual_x]", "def fun_gauss_gauss(p,r):\n return p[1] * np.exp(-((r/p[0])**2)) + p[3] * np.exp(-((r/p[2])**2))", "def gaussian_deconvolve (smaj, smin, spa, bmaj, bmin, bpa):\n from numpy import cos, sin, sqrt, min, abs, arctan2\n\n if smaj < bmaj:\n smaj = bmaj\n if smin < bmin:\n smin = bmin\n\n alpha = ((smaj * cos (spa))**2 + (smin * sin (spa))**2 -\n (bmaj * cos (bpa))**2 - (bmin * sin (bpa))**2)\n beta = ((smaj * sin (spa))**2 + (smin * cos (spa))**2 -\n (bmaj * sin (bpa))**2 - (bmin * cos (bpa))**2)\n gamma = 2 * ((smin**2 - smaj**2) * sin (spa) * cos (spa) -\n (bmin**2 - bmaj**2) * sin (bpa) * cos (bpa))\n\n s = alpha + beta\n t = sqrt ((alpha - beta)**2 + gamma**2)\n limit = 0.5 * min ([smaj, smin, bmaj, bmin])**2\n #limit = 0.1 * min ([smaj, smin, bmaj, bmin])**2\n status = 'ok'\n\n if alpha < 0 or beta < 0 or s < t:\n dmaj = dmin = dpa = 0\n\n if 0.5 * (s - t) < limit and alpha > -limit and beta > -limit:\n status = 'pointlike'\n else:\n status = 'fail'\n else:\n dmaj = sqrt (0.5 * (s + t))\n dmin = sqrt (0.5 * (s - t))\n\n if abs (gamma) + abs (alpha - beta) == 0:\n dpa = 0\n else:\n dpa = 0.5 * arctan2 (-gamma, alpha - beta)\n\n return dmaj, dmin, dpa, status", "def gauss_term_fn(iteration_count, v, z):\n return tf.math.square(z) / 4. / (\n (v + iteration_count - 1) * (v + iteration_count))", "def make_quad_gauss(lmax,alm):\n return libcurvedsky.bispec.make_quad_gauss(lmax,alm)", "def CG(A, b, x0, eps=0.01, imax=50):\n i = 0\n x = x0\n # residue\n r = b - A @ x\n # step in the direction of residue\n d = r\n # initial delta^2\n delta_new = np.dot(r,r)\n delta_0 = delta_new\n while i < i_max and delta_new > eps**2 * delta_0:\n alpha = delta_new / np.einsum('i,ij,j', d,A,d)\n x += alpha * d\n # correct for floating point error at some point\n # not useful for high tolerance but good to keep\n # in mind\n if i % 50 == 0:\n r = b - A@x\n else:\n r -= alpha*q\n delta_old = delta_new\n delta_new = np.dot(r, r)\n beta = delta_new / delta_old\n d = r + beta*d\n i += 1\n return x", "def spectral_laplace(x_values, dd_math_function, sigma, ua, ub):\n B = []\n for x in x_values:\n B += [-dd_math_function(x, sigma)]\n B[0] = ua\n B[len(x_values) - 1] = ub\n #B ferdig\n A=[]\n for i in range (len(x_values)):\n a = []\n for j in range (len(x_values)):\n if i == 0 or i == len(x_values) - 1:\n a.append(lagrange(x_values, j, x_values[i]))\n else:\n a.append(dd_lagrange(x_values, j, x_values[i]))\n A.append(a)\n #A ferdig\n return np.linalg.solve(A, B)", "def housetriang_solve(A, b):\n\n n, _ = A.shape\n b = np.reshape(b.copy(), (n, 1))\n R, c = housetriang(A, b)\n x = np.reshape(rbackwardsolve(R, c, n), (n,))\n\n\n return x", "def gaussum(xdata,*params):\n\tamp = num.zeros(0)\n\tcen = num.zeros(0)\n\tstdv = num.zeros(0)\n\n\tfor i in range(0, len(params), 3): #This section is just unpacking the parameter array into amps, cens, and stdvs\n\t\tx = params[i]\n\t\tamp = num.append(amp,x)\n\t\ty = params[i+1]\n\t\tcen = num.append(cen,y)\n\t\tz = params[i+2]\n\t\tstdv = num.append(stdv,z)\n\tglobal storage #You may not need storage to be global so think about taking this part out. storage stores the data\n\tstorage = [[0 for x in range(1)] for x in range(len(params)/3)] #from each iteration of the gaussian equation into\n\tfor i in range(len(params)/3):#individual rows. So row one will be the gaussian solutions to the first peak and so on\n\t\tstorage[i] = gaus(xdata,amp[i],cen[i],stdv[i])\n\tstorage = num.asarray(storage)\n\treturn sum(storage)", "def eucl_alg(a, b):\n if a == 0:\n return b, 0, 1\n else:\n g, x, y = eucl_alg(b % a, a)\n return g, y - (b // a) * x, x", "def gauss_rule(iel, elemType, normal_order, element):\n\n #sctr = element[iel, :] # element connectivity\n\n if ((elemType == 'Q4') and (normal_order <8)):\n W, Q = gpw.gauss_pt_wt(normal_order,'GAUSS',2)\n return W, Q", "def linear_least_squares(M, v):\n \n B = copy(M)\n [m,n] = shape(B)\n if rank(B) != min(m,n):\n print('Warning: can not be solved since the rank of the matrix is not its maximum value')\n return nan\n else:\n \n A = copy(M)\n At = transpose(M)\n b = copy(v)\n b = transpose(b)\n \n AtA = dot(At, A)\n Atb = transpose(dot(At, b))\n print(AtA, Atb)\n \n x = gauss_elimination(AtA, Atb)\n print('x*:')\n return x", "def set_sobol_g_func():\n a = np.zeros(21)\n x = np.zeros(21) \n\n # a[0:2] = 0\n # a[2:9] = [0.005, 0.020, 0.040, 0.060, 0.08, 0.090, 1]\n # a[9:16] = 2\n # a[16:24] = [2.10, 2.25, 2.75, 3, 3.10, 3.15, 3.25, 3.50]\n # a[24:30] = 9\n # a[30:44] = [8, 8.5, 9, 10, 10.5, 11, 12, 12.5, 13, 13.5, 14, 14.5, 15, 16]\n # a[44:] = [70, 75, 80, 85, 90, 99]\n\n a[0:2] = 0\n a[2:4] = [0.005, 0.090]\n a[4:7] = 2\n a[7:11] = [2.10, 2.75, 3, 3.15]\n a[11:15] = [8, 13, 13.5, 16]\n a[15:] = [70, 75, 80, 85, 90, 99]\n\n x_names = ['x' + str(i+1) for i in range(21)]\n len_params = len(x_names)\n x_bounds = np.zeros((21, 2))\n x_bounds[:, 0] = 0\n x_bounds[:, 1] = 1\n\n problem = {\n 'num_vars': len(x),\n 'names': x_names,\n 'bounds': x_bounds\n }\n return a, x, x_bounds, x_names, len_params, problem", "def phi_gauss(self,x,i):\n s = 0.1\n return np.exp(-(x-self.mu[i])**2/(2*s))", "def solve(A, b, pivoting='partial'):\n M, N = A.shape\n Z = len(b)\n\n error_msg = \"[!] A must be square.\"\n assert (M == N), error_msg\n\n error_msg = \"[!] b must be {}D\".format(M)\n assert (Z == N), error_msg\n\n solver = LU(A, pivoting=pivoting)\n\n # solve for x\n x = solver.solve(b)\n\n return x", "def gss(f, args, a, b, tol=1e-5):\n invphi = (math.sqrt(5) - 1) / 2 # 1 / phi\n invphi2 = (3 - math.sqrt(5)) / 2 # 1 / phi^2\n\n (a, b) = (min(a, b), max(a, b))\n h = b - a\n if h <= tol:\n return (a, b)\n\n # Required steps to achieve tolerance\n n = int(math.ceil(math.log(tol / h) / math.log(invphi)))\n\n c = a + invphi2 * h\n d = a + invphi * h\n yc = f(c, args)\n yd = f(d, args)\n\n for k in range(n-1):\n if yc < yd:\n b = d\n d = c\n yd = yc\n h = invphi * h\n c = a + invphi2 * h\n yc = f(c, args)\n else:\n a = c\n c = d\n yc = yd\n h = invphi * h\n d = a + invphi * h\n yd = f(d, args)\n\n if yc < yd:\n return (a, d)\n else:\n return (c, b)", "def f1(x, a, b):\n #return x**43 - b*x**42 + x**7 - x**6 * a + 84*x - 42 * b - 42 * a\n return (x**42 + 42)/(x-a) + (x**6 + 42)/(x-b)", "def gauss_seidel(L, U, S, x, k):\n\tn = len(x) - 1\n\tm = len(x)//2\n\t\n\t# Leftmost\n\tx[0] = (S[0]/k - U[0, 1]*x[1])/L[0, 0]\n\t# Interior\n\tfor i in range(1, m):\n\t\tx[i] = (S[i]/k - L[i, i - 1]*x[i - 1] - U[i, i + 1]*x[i + 1])/L[i, i]\n\tfor i in range(m, n):\n\t\tx[i] = (S[i]/k - L[i, i - 1]*x[i - 1] - U[i, i + 1]*x[i + 1] -\n\t\t L[i,i-m]*x[i-m]) / L[i,i]\n\t# Rightmost\n\tx[n] = (S[n]/k - L[n, n - 1]*x[n - 1] - L[n, n - m]*x[n - m])/L[n, n]\n\treturn x", "def Ax_b(A, b):\n x = Matrix([x1, x2])\n Ax = A*x\n Ax_b = Ax - b\n x = linsolve([Ax_b[0], Ax_b[1]], x1, x2)\n return tuple(*x)", "def solveLU(A, b):\n utils._checkDimensions(A, b)\n if utils.isSingular(A):\n raise utils.SingularityError(\"Input matrix is singular.\")\n L, U = LU(A)\n x_calculated = _solveX(L, U, b)\n\n acc = 10e-14\n accuracy_achieved = False\n while not accuracy_achieved:\n delb = b - np.matmul(A, x_calculated)\n delX = _solveX(L, U, delb)\n x_calculated = np.subtract(x_calculated, delX)\n if [x < acc for x in x_calculated]:\n accuracy_achieved = True\n return x_calculated", "def subsq(self, a, b):\n return ((a-b)**2)", "def GaussJordanElimination(matrix, vector):\r\n # Pivoting process\r\n matrix, vector = RowXchange(matrix, vector)\r\n # Inverse matrix calculation\r\n invert = InverseMatrix(matrix,vector)\r\n return MulMatrixVector(invert, vector)", "def gaussian_eliminate(coeffs, values):\r\n\r\n # The values num_rows and num_cols are the number of rows\r\n # and columns in the matrix, not the augmented matrix.\r\n num_rows = len(coeffs)\r\n num_cols = len(coeffs[0])\r\n\r\n # Build the agumented array.\r\n aug = []\r\n for r in range(0, num_rows):\r\n aug.append([])\r\n for value in coeffs[r]:\r\n aug[r].append(value)\r\n aug[r].append(float(values[r]))\r\n\r\n # Solve.\r\n for r in range(0, num_rows - 1):\r\n # Zero out all entries in column r after this row.\r\n # See if this row has a non-zero entry in column r.\r\n if abs(aug[r][r]) < TINY:\r\n # Too close to zero. Try to swap with a later row.\r\n for r2 in range(r + 1, num_rows):\r\n if abs(aug[r2][r]) > TINY:\r\n # This row will work. Swap them.\r\n for c in range(0, num_cols + 1):\r\n aug[r][c], aug[r2][c] = aug[r2][c], aug[r][c]\r\n break\r\n\r\n # See if aug[r][r] is still zero.\r\n if abs(aug[r][r]) < TINY:\r\n # No later row has a non-zero entry in this column.\r\n raise ValueError(\"There is no unique solution.\")\r\n\r\n # Zero out this column in later rows.\r\n for r2 in range(r + 1, num_rows):\r\n factor = -aug[r2][r] / aug[r][r]\r\n for c in range(r, num_cols + 1):\r\n aug[r2][c] = aug[r2][c] + factor * aug[r][c]\r\n\r\n # See if we have a solution.\r\n if abs(aug[num_rows - 1][num_cols - 1]) < TINY:\r\n # We have no solution.\r\n # See if all of the entries in this row are 0.\r\n all_zeros = True\r\n for c in range(0, num_cols + 2):\r\n if abs(aug[num_rows - 1][c]) > TINY:\r\n all_zeros = False\r\n break\r\n\r\n if all_zeros:\r\n raise ValueError(\"The solution is not unique.\")\r\n else:\r\n raise ValueError(\"There is no solution.\")\r\n\r\n # Back substitute.\r\n xs = [0 for c in range(num_rows)]\r\n for r in range(num_rows - 1, -1, -1):\r\n xs[r] = aug[r][num_cols]\r\n for r2 in range(r + 1, num_rows):\r\n xs[r] -= aug[r][r2] * xs[r2]\r\n xs[r] /= aug[r][r]\r\n return xs", "def gauss(self, X, xm, amp, w):\n return amp * np.exp(-((X - xm) / w) ** 2)", "def gauss2d(param, x, y):\n #2010-01-11 22:46 IJC: Created\n from numpy import array, abs, concatenate, exp\n x = array(x, dtype=float).copy()\n y = array(y, dtype=float).copy()\n p = array(param).copy()\n\n r = abs((x-p[2]) + 1j*(y-p[3]))\n\n if len(p)==4:\n p = concatenate((p, [0]))\n\n z = p[4] + p[0]/(p[1]*4*pi) * exp(-r**2 / (2*p[1]**2))\n \n return z", "def soft_constraint ( self , var , value , name = '' , title = '' ) :\n \n assert isinstance ( var , ROOT.RooAbsReal ) ,\\\n \"Invalid ``v'': %s/%s\" % ( var , type ( var ) ) \n assert isinstance ( value , VE ),\\\n \"Invalid ``value'': %s/%s\" % ( value , type ( value ) )\n\n assert 0 < value.cov2() , 'Invalid error for %s' % value\n \n name = name if name else 'Gauss_%s_%s' % ( var.GetName() , self.name ) \n title = title if title else 'Gaussian Constraint(%s,%s) at %s' % ( var.GetName() , self.name , value )\n \n # value & error as RooFit objects: \n val = ROOT.RooFit.RooConst ( value.value () )\n err = ROOT.RooFit.RooConst ( value.error () )\n \n # Gaussian constrains \n gauss = ROOT.RooGaussian ( self.var_name ( name ) , title , var , val , err )\n \n # keep all the created technical stuff \n self.aux_keep.append ( val )\n self.aux_keep.append ( err )\n self.aux_keep.append ( gauss )\n\n self.info ('Constraint is created %s=%s' % ( var.name , value ) )\n return gauss", "def eigh(a, b):\n a = symmetrize(a)\n b = symmetrize(b)\n b_inv_a = jax.scipy.linalg.cho_solve(jax.scipy.linalg.cho_factor(b), a)\n v, w = jax.jit(jax.numpy.linalg.eig, backend=\"cpu\")(b_inv_a)\n v = v.real\n # with loops.Scope() as s:\n # for _ in s.cond_range(jnp.isrealobj)\n if jnp.isrealobj(a) and jnp.isrealobj(b):\n w = w.real\n # reorder as ascending in w\n order = jnp.argsort(v)\n v = v.take(order, axis=0)\n w = w.take(order, axis=1)\n # renormalize so v.H @ b @ H == 1\n norm2 = jax.vmap(lambda wi: (wi.conj() @ b @ wi).real, in_axes=1)(w)\n norm = jnp.sqrt(norm2)\n w = w / norm\n w = standardize_angle(w, b)\n return v, w", "def test_numbers_can_substitute_scalars(free_alg, full_balance):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n y = IndexedBase('y')\n r = Range('D', 0, 2)\n i, j, k, l = symbols('i j k l')\n dr.set_dumms(r, [i, j, k, l])\n v = p.v\n\n orig = dr.sum((i, r), x[i] ** 2 * x[j] * y[k] * v[l])\n\n res = orig.subst(x[i], 0, full_balance=full_balance).simplify()\n assert res == 0\n res = orig.subst(x[j], 1, full_balance=full_balance).simplify()\n assert res == dr.sum(2 * y[k] * v[l])\n res = orig.subst(x[k], 2, full_balance=full_balance).simplify()\n assert res == dr.sum(16 * y[k] * v[l])", "def JacobiSolve(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n x_new *= 0 #reset x_new\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x[column]\n x_new[row] /= A[row,row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def solve(self, b: Array, check_finite: Optional[bool] = None) -> Array:\n if check_finite is None:\n check_finite = self.check_finite\n if self.cho_factor:\n fact_solve = lambda x: jsl.cho_solve(self.factor, x, check_finite=check_finite)\n else:\n fact_solve = lambda x: jsl.lu_solve(self.factor, x, trans=0, check_finite=check_finite)\n\n if b.ndim == 1:\n D = self.D\n else:\n D = self.D[:, snp.newaxis]\n N, M = self.A.shape\n if N < M and self.D.ndim == 1:\n w = fact_solve(self.A @ (b / D))\n x = (b - (self.A.T.conj() @ w)) / D\n else:\n x = fact_solve(b)\n\n return x", "def gausspix(x, mean=0.0, sigma=1.0):\n edges = np.concatenate((x-0.5, x[-1:]+0.5))\n integrals = gaussint(edges, mean=mean, sigma=sigma)\n return integrals[1:] - integrals[0:-1]", "def klucb_gauss(x, d, sig2=1., precision=0.):\n return x + sqrt(2*sig2*d)", "def laplace(f, g_inv, g_det, X):\n r = 0\n for i in range(len(X)):\n for j in range(len(X)):\n r += g_inv[i, j]*f.diff(X[i]).diff(X[j])\n for sigma in range(len(X)):\n for alpha in range(len(X)):\n r += g_det.diff(X[sigma]) * g_inv[sigma, alpha] * \\\n f.diff(X[alpha]) / (2*g_det)\n return r", "def leastsquares(A,b,qr=qrfact.qri_mgs_piv,alpha=0.5):\n \n\n A = numpy.array(A, dtype=float)\n m,n = A.shape\n z = numpy.zeros( n )\n a = numpy.zeros( n )\n x = numpy.zeros( n )\n b = numpy.transpose(b)[0]\n\n # do the QR factorization\n try:\n Q,R = qr(A)[:2] # Some QR routines return a third permutation P solving AP=QR.\n PA = A\n except TypeError:\n Q,R,P = qr(A,alpha)[:3] # Some QR routines return a third permutation P solving AP=QR.\n AP = numpy.dot( A, P )\n\n # Step 1'': orthogonalization of b against Q\n u = b\n for j in range( 0, n ) :\n # print \"Qj = \", Q[:,j]\n # print \"u = \", u\n # print \"dot = \", numpy.dot( Q[:,j], u )\n z[j] = numpy.dot( Q[:,j], u )\n u = u - z[j] * Q[:,j]\n\n # Step 2'': iterative orthogonalization of u\n ul2norm = numpy.linalg.norm( u )\n ii = 0\n while True : # iterate\n for j in range( 0, n ) :\n a[j] = numpy.dot( Q[:,j], u )\n z[j] = z[j] + a[j]\n u = u - a[j] * Q[:,j]\n\n ii = ii + 1\n ulnorm = ul2norm\n ul2norm = numpy.linalg.norm( u )\n\n #print ul2norm, ulnorm\n \n if (ul2norm > alpha * ulnorm) or ul2norm == 0 :\n # print \"used\", ii, \"orthogonalizations\"\n break\n\n #print z\n #print R\n\n # Step 3'': use back substitution to solve Rx = z\n for i in range( n-1, -1, -1 ) :\n x[i] = z[i]\n for j in range( i+1, n ) :\n x[i] = x[i] - R[i,j] * x[j]\n x[i] = x[i] / R[i,i]\n #print x\n\n #need to permute x according to permutation matrix P\n \n return numpy.dot( P, x )", "def CG(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n r = b - np.dot(A,x)\n s = r.copy()\n while not(converged):\n denom = np.dot(s, np.dot(A,s))\n alpha = np.dot(s,r)/denom\n x = x + alpha*s \n r = b - np.dot(A,x)\n beta = - np.dot(r,np.dot(A,s))/denom\n s = r + beta * s\n relative_change = np.linalg.norm(r)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x", "def eval(self, x):\n y = list(x)\n if not self.biased:\n y.insert(0, 1.0)\n y = np.array(y).reshape((self.Ws[0].shape[1], 1))\n for W, g in zip(self.Ws, self.gs):\n y = g(np.dot(W, y))\n return y.flatten()", "def fun_gauss(p,r):\n return p[1] * np.exp(-((r/p[0])**2))", "def globalized_sqp(self, x_0, dual_x_0):\n raise Exception(\"Not implemented in {0}\".format(self))", "def gauss_func(x, wid, cen, amp):\n\n return np.exp(-((x-cen)**2.)/(2.*wid**2)) * amp", "def subsquares(x):\n return subsquares.subsquares(x)", "def MatrixFreeBICGSTAB(A, b, x, tol=1e-6, maxiter=5000, quiet=True):\n\n if b.dtype != x.dtype:\n raise TaichiTypeError(f\"Dtype mismatch b.dtype({b.dtype}) != x.dtype({x.dtype}).\")\n if str(b.dtype) == \"f32\":\n solver_dtype = ti.f32\n elif str(b.dtype) == \"f64\":\n solver_dtype = ti.f64\n else:\n raise TaichiTypeError(f\"Not supported dtype: {b.dtype}\")\n if b.shape != x.shape:\n raise TaichiRuntimeError(f\"Dimension mismatch b.shape{b.shape} != x.shape{x.shape}.\")\n\n size = b.shape\n vector_fields_builder = ti.FieldsBuilder()\n p = ti.field(dtype=solver_dtype)\n p_hat = ti.field(dtype=solver_dtype)\n r = ti.field(dtype=solver_dtype)\n r_tld = ti.field(dtype=solver_dtype)\n s = ti.field(dtype=solver_dtype)\n s_hat = ti.field(dtype=solver_dtype)\n t = ti.field(dtype=solver_dtype)\n Ap = ti.field(dtype=solver_dtype)\n Ax = ti.field(dtype=solver_dtype)\n Ashat = ti.field(dtype=solver_dtype)\n if len(size) == 1:\n axes = ti.i\n elif len(size) == 2:\n axes = ti.ij\n elif len(size) == 3:\n axes = ti.ijk\n else:\n raise TaichiRuntimeError(f\"MatrixFreeBICGSTAB only support 1D, 2D, 3D inputs; your inputs is {len(size)}-D.\")\n vector_fields_builder.dense(axes, size).place(p, p_hat, r, r_tld, s, s_hat, t, Ap, Ax, Ashat)\n vector_fields_snode_tree = vector_fields_builder.finalize()\n\n scalar_builder = ti.FieldsBuilder()\n alpha = ti.field(dtype=solver_dtype)\n beta = ti.field(dtype=solver_dtype)\n omega = ti.field(dtype=solver_dtype)\n rho = ti.field(dtype=solver_dtype)\n rho_1 = ti.field(dtype=solver_dtype)\n scalar_builder.place(alpha, beta, omega, rho, rho_1)\n scalar_snode_tree = scalar_builder.finalize()\n succeeded = True\n\n @ti.kernel\n def init():\n for I in ti.grouped(x):\n r[I] = b[I] - Ax[I]\n r_tld[I] = b[I]\n p[I] = 0.0\n Ap[I] = 0.0\n Ashat[I] = 0.0\n rho[None] = 0.0\n rho_1[None] = 1.0\n alpha[None] = 1.0\n beta[None] = 1.0\n omega[None] = 1.0\n\n @ti.kernel\n def reduce(p: ti.template(), q: ti.template()) -> solver_dtype:\n result = solver_dtype(0.0)\n for I in ti.grouped(p):\n result += p[I] * q[I]\n return result\n\n @ti.kernel\n def copy(orig: ti.template(), dest: ti.template()):\n for I in ti.grouped(orig):\n dest[I] = orig[I]\n\n @ti.kernel\n def update_p():\n for I in ti.grouped(p):\n p[I] = r[I] + beta[None] * (p[I] - omega[None] * Ap[I])\n\n @ti.kernel\n def update_phat():\n for I in ti.grouped(p_hat):\n p_hat[I] = p[I]\n\n @ti.kernel\n def update_s():\n for I in ti.grouped(s):\n s[I] = r[I] - alpha[None] * Ap[I]\n\n @ti.kernel\n def update_shat():\n for I in ti.grouped(s_hat):\n s_hat[I] = s[I]\n\n @ti.kernel\n def update_x():\n for I in ti.grouped(x):\n x[I] += alpha[None] * p_hat[I] + omega[None] * s_hat[I]\n\n @ti.kernel\n def update_r():\n for I in ti.grouped(r):\n r[I] = s[I] - omega[None] * t[I]\n\n def solve():\n A._matvec(x, Ax)\n init()\n initial_rTr = reduce(r, r)\n rTr = initial_rTr\n if not quiet:\n print(f\">>> Initial residual = {initial_rTr:e}\")\n if sqrt(initial_rTr) >= tol: # Do nothing if the initial residual is small enough\n for i in range(maxiter):\n rho[None] = reduce(r, r_tld)\n if rho[None] == 0.0:\n if not quiet:\n print(\">>> BICGSTAB failed because r@r_tld = 0.\")\n succeeded = False\n break\n if i == 0:\n copy(orig=r, dest=p)\n else:\n beta[None] = (rho[None] / rho_1[None]) * (alpha[None] / omega[None])\n update_p()\n update_phat()\n A._matvec(p, Ap)\n alpha_lower = reduce(r_tld, Ap)\n alpha[None] = rho[None] / alpha_lower\n update_s()\n update_shat()\n A._matvec(s_hat, Ashat)\n copy(orig=Ashat, dest=t)\n omega_upper = reduce(t, s)\n omega_lower = reduce(t, t)\n omega[None] = omega_upper / (omega_lower + 1e-16) if omega_lower == 0.0 else omega_upper / omega_lower\n update_x()\n update_r()\n rTr = reduce(r, r)\n if not quiet:\n print(f\">>> Iter = {i+1:4}, Residual = {sqrt(rTr):e}\")\n if sqrt(rTr) < tol:\n if not quiet:\n print(f\">>> BICGSTAB method converged at #iterations {i}\")\n break\n rho_1[None] = rho[None]\n if rTr >= tol:\n if not quiet:\n print(f\">>> BICGSTAB failed to converge in {maxiter} iterations: Residual = {sqrt(rTr):e}\")\n succeeded = False\n\n solve()\n vector_fields_snode_tree.destroy()\n scalar_snode_tree.destroy()\n return succeeded", "def uncon(f, x0, tol, mu1=1e-6, mu2=0.99996, adef=1, amax=200, method='QN', \\\r\n search='BT'):\r\n f.calls = 0\r\n converged = False\r\n J, g = f(x0)\r\n p = -g.T\r\n gprev = g\r\n gnew = gprev\r\n x = x0\r\n miter = 0\r\n V = np.identity(len(x))\r\n switch = False\r\n n = x0.size\r\n oldhist = np.zeros((n, miter+1))\r\n oldhist[:, miter] = x0\r\n while not converged:\r\n if search == 'SW':\r\n x, J, gnew, a = linesearch(f, x, J, gnew, \\\r\n adef, amax, mu1, mu2, p)\r\n elif search == 'BT':\r\n x, J, gnew, a = backtrack(f, x, J, gnew, adef, 0.8, 1e-6, p)\r\n if method == 'QN':\r\n p, V, broken = quasinewton(x, V, a, p, gprev, gnew)\r\n if broken:\r\n method = 'CG'\r\n switch = True\r\n elif miter > 200:\r\n method = 'CG'\r\n elif method == 'CG':\r\n p = conjgradient(x, p, gprev, gnew)\r\n if switch is True:\r\n switch = False\r\n method = 'QN'\r\n elif method == 'SD':\r\n p = -g.T\r\n TAU = np.amax(np.abs(gnew))\r\n if TAU < tol:\r\n converged = True\r\n miter += 1\r\n newhist = np.zeros((n, miter+1))\r\n newhist[:, :miter] = oldhist\r\n newhist[:, miter] = x\r\n oldhist = newhist\r\n gprev = gnew\r\n output = {'alias': 'N-dimensional_Space_Alien', \\\r\n 'major_iterations': miter, \\\r\n 'objective': J, 'hess_inv': V, 'g-norm': TAU, \\\r\n 'func_calls': f.calls, 'hist': newhist}\r\n return x, J, output", "def _G_integrand(xip, B):\n try:\n from scipy.special import iv as besseli\n except:\n \"\"\" [PHIDL] To run this function you need scipy, please install it with\n pip install scipy \"\"\"\n return besseli(0, B*sqrt(1-xip**2))", "def glaucoma_hard(a=2.5,b=2.5,dprime_fnc=dprime_basic):\n def scotoma_fnc(distance):\n if in_ellipse(distance[0],distance[1],a,b):\n return dprime_fnc(distance)\n else:\n return SMALL_FLOAT\n return scotoma_fnc", "def gelu(x):\n return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))", "def attack(q, A, b, E, S=None):\n m = len(A)\n n = len(A[0])\n gf = GF(q)\n pr = gf[tuple(f\"x{i}\" for i in range(n))]\n gens = pr.gens()\n\n f = []\n for i in range(m):\n p = 1\n for e in E:\n p *= (b[i] - sum(A[i][j] * gens[j] for j in range(n)) - e)\n f.append(p)\n\n if S is not None:\n # Use information about the possible values for s to add more polynomials.\n for j in range(n):\n p = 1\n for s in S:\n p *= (gens[j] - s)\n f.append(p)\n\n s = []\n for p in pr.ideal(f).groebner_basis():\n assert p.nvariables() == 1 and p.degree() == 1\n s.append(int(-p.constant_coefficient()))\n\n return s", "def test_ln_gauss(self):\n Q = likelihood()\n test_object = test_values_for_likelihood()\n experimental_values = [0.9,2.1,3.2]\n errors = [0.5,0.4,0.3]\n reference_q_value = 0.27347222222222267\n reference_q_derivative = np.array([-3.75833333, -3.75833333])\n Q.add_observable(test_object,experimental_values,errors,scale=1.0)\n q_value, q_derivative = Q.compute_ln_gauss(np.array([1.0,1.0]))\n assert np.isclose(q_value,reference_q_value)\n assert np.all(np.isclose(q_derivative,reference_q_derivative))\n return", "def G(self, (k,t), (j,x), **params):\n d = len(x)/2\n q,dq = x[:d],x[d:]\n J = (j == True)\n _J = np.logical_not(J)\n # number of constraints\n n = len(J) \n # number of active constraints\n m = np.sum(J) # = n - len(a)\n a = self.a( (k,t), (_J,q), **params)\n lambda_ = self.lambda_( (k,t), (J,q,dq), **params)\n # unilateral constraint forces\n lambda_ = lambda_[:m] \n g = np.nan*np.zeros(n)\n g[_J] = a\n g[J] = lambda_\n return g" ]
[ "0.7353518", "0.71408343", "0.71251684", "0.70046204", "0.6569199", "0.64925224", "0.6439031", "0.63340366", "0.62848866", "0.6225892", "0.6141184", "0.61222094", "0.6090073", "0.6016392", "0.5969952", "0.5954091", "0.59377366", "0.5927988", "0.59233207", "0.59059817", "0.5830629", "0.5821877", "0.5712208", "0.5692663", "0.56926626", "0.5683645", "0.56747377", "0.5672946", "0.56709397", "0.5662022", "0.5643292", "0.5638223", "0.5635565", "0.56350607", "0.5615289", "0.5597338", "0.55901814", "0.558817", "0.55869734", "0.5586691", "0.5561759", "0.5552836", "0.5545363", "0.55445707", "0.55288917", "0.550888", "0.5490382", "0.548431", "0.5482966", "0.54789686", "0.5475032", "0.5471753", "0.54083544", "0.5400708", "0.5369813", "0.5357138", "0.53560627", "0.53500676", "0.5346413", "0.5345824", "0.5344627", "0.5320689", "0.5311547", "0.5309342", "0.5308962", "0.52910167", "0.5290726", "0.5273614", "0.5270736", "0.526308", "0.5262858", "0.52602714", "0.5254971", "0.52523535", "0.5250753", "0.5247823", "0.5247176", "0.5244168", "0.5235069", "0.52321345", "0.52265644", "0.5218725", "0.52137566", "0.52028984", "0.5202415", "0.5202088", "0.52011377", "0.51924205", "0.5184656", "0.5177722", "0.5176809", "0.51730675", "0.51711315", "0.51668775", "0.5166356", "0.51630884", "0.5161275", "0.5160046", "0.5158405", "0.51573235" ]
0.7767935
0
Solves [L][U]{x} = b, where [a] = [L\U] is the matrix returned from LUdecomp.
def LUsolve(a,b): b=float64(b) n=len(b) LU=LUdecomp(a) y=zeros((n,1)) x=zeros((n,1)) y[0]=b[0] for i in range(1,n): sum=b[i] for j in range(i): sum=sum-LU[i][j]*y[j] y[i]=sum x[n-1]=float(y[n-1])/LU[n-1][n-1] for i in range(n-2,-1,-1): sum=y[i] for j in range(i+1,n): sum=sum-LU[i][j]*x[j] x[i]=float(sum)/LU[i][i] return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LU_solve(A, d, b):\n \n\n L, U = L1U(A, d)\n\n y = rforwardsolve(L, b, d)\n x = rbackwardsolve(U, y, d)\n\n return x", "def lu_decom(A,b):\n # init\n n = len(b)\n L = np.eye(n)\n U = np.zeros((n,n))\n x = np.zeros(n)\n y = np.zeros(n)\n\n # decomposition A = LU\n\n U[0,:] = A[0,:]\n L[1:,0] = A[1:,0] / U[0,0]\n\n for i in range(1,n):\n for j in range(i,n):\n\n U[i,j] = A[i,j] - np.dot(L[i,:i],U[:i,j])\n\n if j != n-1:\n L[j+1,i] = (A[j+1,i] - np.dot(L[j+1,:i],U[:i,i])) / U[i,i]\n\n # solve Ly=b\n y[0] = b[0]\n\n for k in range(1,n):\n y[k] = b[k] - np.dot(L[k,:k],y[:k])\n\n # solve Ux=y\n x[-1] = y[-1] / U[-1,-1]\n\n for k in range(n-2,-1,-1):\n x[k] = (y[k] - np.dot(U[k,k+1:],x[k+1:])) / U[k,k]\n\n return x,L,U", "def solve_lu(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape, b.dtype)\n return jax.numpy.linalg.solve(A, b)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape, b.dtype) # 4d array (tensor)\n A = A.reshape(-1, b.shape[0] * b.shape[1]) # 2d array (matrix)\n return jax.numpy.linalg.solve(A, b.ravel()).reshape(*b.shape)\n else:\n raise NotImplementedError", "def solveLU(A, b):\n utils._checkDimensions(A, b)\n if utils.isSingular(A):\n raise utils.SingularityError(\"Input matrix is singular.\")\n L, U = LU(A)\n x_calculated = _solveX(L, U, b)\n\n acc = 10e-14\n accuracy_achieved = False\n while not accuracy_achieved:\n delb = b - np.matmul(A, x_calculated)\n delX = _solveX(L, U, delb)\n x_calculated = np.subtract(x_calculated, delX)\n if [x < acc for x in x_calculated]:\n accuracy_achieved = True\n return x_calculated", "def _solveX(L, U, b):\n m, n = L.shape\n # Forward Substitution\n y = list()\n y.insert(0, b[0]/L[0][0])\n for i in range(1, m):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*y[k]\n y.insert(i, (b[i]-summ)/(L[i][i]))\n\n # Backwards Substitution\n x = [0]*m\n x[m-1] = y[m-1] / U[m-1][m-1]\n for i in range(m - 2, -1, -1):\n summ = 0\n for k in range(i+1, n):\n summ += U[i][k]*x[k]\n x[i] = (y[i] - summ)/U[i][i]\n\n return x", "def solve(matrix, b):\n lu_matrix = decompose_to_LU(matrix)\n # get supporting vector y\n y = np.matrix(np.zeros([lu_matrix.shape[0], 1]), dtype=np.float64)\n for i in range(y.shape[0]):\n y[i, 0] = b[i] - lu_matrix[i, :i] * y[:i]\n\n # get vector of answers x\n x = np.matrix(np.zeros([lu_matrix.shape[0], 1]))\n for i in range(1, x.shape[0] + 1):\n x[-i, 0] = (y[-i] - lu_matrix[-i, -i:] * x[-i:, 0]) / lu_matrix[-i, -i]\n\n return np.array(x.transpose()[0], dtype=np.float64)[0]", "def SolveLU(matrix, vector):\r\n matrixU = UMatrix(matrix)\r\n matrixL = LMatrix(matrix)\r\n return MultiplyMatrix(InverseMatrix(matrixU), MultiplyMatrix(InverseMatrix(matrixL), vector))", "def lu_solve(A,b,tol=10**(-14)):\n\n\t# LU decomposition -- raise ValueError for singular matrix A\n\ttry:\n\t\tLU, piv = scipy.linalg.lu_factor(A)\n\n\t\t# enforce magnitude of diagonal values are above given tolernce (round off)\n\t\tfor di in np.diag(LU):\n\t\t\tif abs(di) <= tol: raise ValueError\n\n\texcept ValueError:\n\t\tlogger.error(\"Error 'Singular Matrix' passed method: %s\" % inverse.__name__)\n\n\t# Use decomposition to solve for x\n\tx = scipy.linalg.lu_solve((LU, piv), b)\n\n\t# return solution vector x\n\treturn x", "def LUdecomp(Ainput):\n\n n, m = np.shape(Ainput)\n \n if n != m:\n return 'Error: Please enter an invertible matrix.'\n \n U = Ainput.copy() # make copies so as not to write over originals\n L = np.zeros((np.shape(Ainput)))\n \n for i in range(0,n):\n L[i,i] = 1\n for i in range(0,n-1): # loop over pivot rows from row 1 to row n-1 (i to n-2)\n for j in range(i+1,n): # loop over row to be zero'ed from row j+1 to n (j+1 to n-1)\n c = U[j,i]/U[i,i] # multiplicative factor to zero point\n L[j,i] = c\n U[j,i] = 0.0 # we know this element goes to zero\n U[j,i+1:n]=U[j,i+1:n]-c*U[i,i+1:n] # do subtraction of two rows\n\n return (L,U) # return lower and upper decompositions", "def solve(A, b, pivoting='partial'):\n M, N = A.shape\n Z = len(b)\n\n error_msg = \"[!] A must be square.\"\n assert (M == N), error_msg\n\n error_msg = \"[!] b must be {}D\".format(M)\n assert (Z == N), error_msg\n\n solver = LU(A, pivoting=pivoting)\n\n # solve for x\n x = solver.solve(b)\n\n return x", "def decomposeLU(self):\n self.check_square()\n\n N = self.rows\n L = make_matrix(N, N)\n U = make_matrix(N, N)\n A = self #for more math friendly notation\n\n\n for j in range(N):\n L[j, j] = 1.0 #Doolittle factorization\n\n #e.g., if you are in column = 5, you go down 6 rows\n for i in range(j+1):\n U[i, j] = A[i, j] - sum(L[i, k] * U[k, j] for k in range(i))\n #e.g., if you are in column = 5,\n # you start at row 5 and go down for the lower triangular matrix\n for i in range(j, N):\n L[i, j] = (A[i, j] - sum(L[i, k] * U[k, j] for k in range(j))) / U[j, j]\n\n self.L = L\n self.U = U\n return L, U", "def solveU(U, b):\n # validate input\n if np.allclose(U,np.triu(U))==False or np.linalg.det == 0:\n raise TypeError(\"U is not an upper regular triangular matrix\")\n \n elif len(U.shape) != 2 or len(b.shape) != 1:\n raise TypeError(\"unsuitable object\")\n \n else:\n un, um = U.shape\n n, = b.shape\n if un != um or un != n:\n raise TypeError((\"dimensions do not fullfill requirements\"))\n\n # solve \n x = np.zeros(n, dtype=complex)\n x[-1] = (b[-1]) / U[n - 1, n - 1]\n for i in range(1, n):\n t = U[(n - (i + 1)):(n - i)] @ x\n x[-(i + 1)] = (b[-(i + 1)] - t) / U[n - (i + 1), n - (i + 1)]\n\n return x", "def decompose_to_LU(a):\n # create emtpy LU-matrix\n lu_matrix = np.matrix(np.zeros([a.shape[0], a.shape[1]]))\n n = a.shape[0]\n\n for k in range(n):\n # calculate all residual k-row elements\n for j in range(k, n):\n lu_matrix[k, j] = a[k, j] - lu_matrix[k, :k] * lu_matrix[:k, j]\n # calculate all residual k-column elemetns\n for i in range(k + 1, n):\n lu_matrix[i, k] = (a[i, k] - lu_matrix[i, : k] * lu_matrix[: k, k]) / lu_matrix[k, k]\n\n return lu_matrix", "def trisolve(l, u, c, b):\n n = shape(b)[0]\n for k in range(1, n):\n b[k] -= l[k-1]*b[k - 1]\n b[n-1] /= u[n-1]\n for k in range(n-2,-1,-1):\n b[k] -= c[k]*b[k + 1]\n b[k] /= u[k]", "def test_LU(self):\n A = np.random.rand(10, 10)\n MA = to_matrix(A)\n ML, MU = MA.decomposeLU()\n self.assertEqual(ML*MU, MA)\n self.assertTrue(ML.is_lower_triangular())\n self.assertTrue(MU.is_upper_triangular())", "def test_lu_factor():\n\t#[A, b] = lu_read('test1.txt')\n\t# it is poor form to read an external file into a test function, as above\n\tA = np.array([\n\t\t[ 2., 3., -4., 2.],\n\t\t[-4., -5., 6., -3.],\n\t\t[ 2., 2., 1., 0.],\n\t\t[-6., -7., 14., -4.]])\t\n\tLU,p = lu_factor(A, pivot=False)\n\tLU_soln = np.array([\n\t\t[ 2, 3,-4, 2],\n\t\t[-2, 1,-2, 1],\n\t\t[ 1,-1, 3,-1],\n\t\t[-3, 2, 2, 2]])\t\n\tassert norm(LU - LU_soln) < 1.e-10\t\n\n\n\t# test 2\n\t[A2, b2] = lu_read('test2.txt')\t\t\t\t\t\t# read a matrix and RHS vector\n\tLU2,p2 = lu_factor(A2) \t\t\t\t\t\t\t\t# change display to False when LU_FACTOR working\n\tLU_soln2 = np.array([\n\t\t [0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 1],\n\t\t [-100., 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 100],\n\t\t [0., -100., 0.01, 0., 0., 0., 0., 0., 0., 0., 10000],\n\t\t [0., 0., -100., 0.01, 0., 0., 0., 0., 0., 0., 1000000],\n\t\t [0., 0., 0., -100., 0.01, 0., 0., 0., 0., 0., 100000000],\n\t\t [0., 0., 0., 0., -100., 0.01, 0., 0., 0., 0., 10000000000],\n\t\t [0., 0., 0., 0., 0., -100., 0.01, 0., 0., 0., 1000000000000],\n\t\t [0., 0., 0., 0., 0., 0., -100., 0.01, 0., 0., 100000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., -100., 0.01, 0., 10000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., -100, 0.01, 1000000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., 0., -100., 100000000000000000000]])\n\tassert norm(LU2 - LU_soln2) < 1.e-10", "def usolve(self, ub):\n raise NotImplementedError", "def lu(matrix):\n SIZE = matrix.shape[0]\n BS = np.BLOCKSIZE\n\n if matrix.shape[0] != matrix.shape[0]:\n raise Exception(\"LU only supports squared matricis\")\n if not matrix.dist():\n raise Exception(\"The matrix is not distributed\")\n\n if(SIZE % np.BLOCKSIZE != 0):\n raise Exception(\"The matrix dimensions must be divisible \"\\\n \"with np.BLOCKSIZE(%d)\"%np.BLOCKSIZE)\n\n (prow,pcol) = matrix.pgrid()\n A = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True);A += matrix\n L = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n U = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpL = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpU = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n for k in xrange(0,SIZE,BS):\n bs = min(BS,SIZE - k) #Current block size\n kb = k / BS # k as block index\n\n #Compute vertical multiplier\n slice = ((kb,kb+1),(kb,kb+1))\n for a,l,u in zip(A.blocks(slice), L.blocks(slice), U.blocks(slice)):\n (p,tl,tu) = linalg.lu(a)\n if not (np.diag(p) == 1).all():#We do not support pivoting\n raise Exception(\"Pivoting was needed!\")\n #There seems to be a transpose bug in SciPy's LU\n l[:] = tl.T\n u[:] = tu.T\n\n #Replicate diagonal block horizontal and vertical\n for tk in xrange(k+bs,SIZE,BS):\n tbs = min(BS,SIZE - tk) #Current block size\n L[tk:tk+tbs,k:k+bs] = U[k:k+tbs,k:k+bs]\n U[k:k+bs,tk:tk+tbs] = L[k:k+bs,k:k+tbs]\n\n if k+bs < SIZE:\n #Compute horizontal multiplier\n slice = ((kb,kb+1),(kb+1,SIZE/BS))\n for a,u in zip(A.blocks(slice), U.blocks(slice)):\n u[:] = np.linalg.solve(u.T,a.T).T\n\n #Compute vertical multiplier\n slice = ((kb+1,SIZE/BS),(kb,kb+1))\n for a,l in zip(A.blocks(slice), L.blocks(slice)):\n l[:] = np.linalg.solve(l,a)\n\n #Apply to remaining submatrix\n A -= pyHPC.summa(L[:,:k+bs],U[:k+bs,:], ao=(k+bs,k),\n bo=(k,k+bs), co=(k+bs,k+bs))\n\n return (L, U)", "def lu_decomposition(self):\n if self.rows_count() != self.columns_count():\n raise ValueError(\"Matrix needs to me square for LU decomposition.\")\n for i in range(self.rows_count() - 1):\n for j in range(i + 1, self.rows_count()):\n if self[i, i] == 0: # or abs(self[i, i]) <= 0.000001):\n raise ValueError(\"Can't divide by 0\")\n self[j, i] = self[j, i] / self[i, i]\n for k in range(i + 1, self.rows_count()):\n self[j, k] -= self[j, i] * self[i, k]", "def LU(A):\n m, n = A.shape\n L, U = np.zeros([m, n]), np.zeros([m, n])\n for i in range(n):\n L[i][i] = 1\n\n for i in range(n):\n\n # Upper triangular matrix\n for j in range(i, n):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*U[k][j]\n U[i][j] = A[i][j] - summ\n\n # Lower triangular matrix\n for j in range(i+1, n):\n summ = 0\n for k in range(0, i):\n summ += L[j][k]*U[k][i]\n L[j][i] = (A[j][i] - summ)/U[i][i]\n return L, U", "def L1U(A, d):\n \n\n n, _ = A.shape\n L = np.eye(n, n, dtype=A.dtype)\n U = np.zeros((n, n), dtype=A.dtype)\n\n U[0, 0] = A[0, 0]\n for k in range(1, n):\n km = max(0, k-d)\n L[k, km : k] = np.transpose(rforwardsolve(np.transpose(U[km:k, km:k]),\\\n np.transpose(A[k, km:k]), d))\n U[km:k+1, k] = rforwardsolve(L[km:k+1, km:k+1], A[km:k+1, k], d)\n return L, U", "def L1U(A, d):\n n = shape(A)[0]\n L = eye(n)\n U = matrix(zeros((n,n))); U[0,0] = A[0,0]\n for k in range(1,n):\n km = array([0, k - d]).max()\n if km < k:\n L[k, km:k] = A[k, km:k]\n rforwardsolve(U[km:k, km:k].T, L[k, km:k].T, d) # L\n U[km:(k + 1), k] = A[km:(k + 1), k]\n rforwardsolve(L[km:(k + 1), km:(k + 1)], U[km:(k + 1), k], d) # U\n return L, U", "def solve_lin(matrix_u,vector_d):\n m_np = np.array(matrix_u)\n v_np = np.array(vector_d)\n\n return np.linalg.solve(m_np, v_np)", "def test_lu_forward_sub():\t\n\t# test 1\n\tL = np.array([\n\t\t[ 2, 3,-4, 2],\n\t\t[-2, 1,-2, 1],\n\t\t[ 1,-1, 3,-1],\n\t\t[-3, 2, 2, 2]])\t\n\n\tb = np.array([4, -8, 9, 6])\n\n\ty = lu_forward_sub(L, b) \t\t\n\ty_soln = np.array([4,0,5,8])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y - y_soln) < 1.e-10\n\n\t# test 2\n\tL2 = np.array([\n\t\t [0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 1],\n\t\t [-100., 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 100],\n\t\t [0., -100., 0.01, 0., 0., 0., 0., 0., 0., 0., 10000],\n\t\t [0., 0., -100., 0.01, 0., 0., 0., 0., 0., 0., 1000000],\n\t\t [0., 0., 0., -100., 0.01, 0., 0., 0., 0., 0., 100000000],\n\t\t [0., 0., 0., 0., -100., 0.01, 0., 0., 0., 0., 10000000000],\n\t\t [0., 0., 0., 0., 0., -100., 0.01, 0., 0., 0., 1000000000000],\n\t\t [0., 0., 0., 0., 0., 0., -100., 0.01, 0., 0., 100000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., -100., 0.01, 0., 10000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., -100, 0.01, 1000000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., 0., -100., 100000000000000000000]])\n\n\tb2 = np.array ([[1.01], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [0.]])\n\n\ty2 = lu_forward_sub(L2, b2) \t\t\n\ty_soln2 = np.array([1.01, -101.99, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 99])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y2 - y_soln2) < 1.e-10", "def solve(self, A, B):\n return tf.matrix_solve_ls(matrix=A, rhs=B)", "def solve_L(L, b):\n n = b.size\n assert L.shape == (n,n)\n x = zeros(n)\n for i in range(n):\n x[i] = (b[i] - dot(x[:i], L[i,:i])) / L[i,i]\n if not numpy.isfinite(x[i]):\n x[i] = 0.0\n return x", "def housetriang_solve(A, b):\n\n n, _ = A.shape\n b = np.reshape(b.copy(), (n, 1))\n R, c = housetriang(A, b)\n x = np.reshape(rbackwardsolve(R, c, n), (n,))\n\n\n return x", "def solver(mesh, model, ele, nodal_load):\r\n A = kinematics.A_matrix(model, ele)\r\n\r\n Ks = stiffness.Ks_matrix(model, ele)\r\n\r\n K = np.dot(A.T, np.dot(Ks, A))\r\n\r\n P = load.P_vector(model, nodal_load)\r\n\r\n Kf, Pf = index.fdof(model, K, P)\r\n\r\n Uf = np.linalg.solve(Kf, Pf)\r\n\r\n U = index.tdof(model, Uf)\r\n\r\n V = np.dot(A, U)\r\n\r\n Q = np.dot(Ks, V)\r\n\r\n return U, Q", "def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history", "def solve(self):\n is_valid = self.verify_sub_matrixes()\n \n if not is_valid:\n raise ValueError((\n \"El determinante es igual a cero \"\n \"el método no puede continuar\"\n ))\n \n (lower, upper) = self.doolittle_factorization()\n\n lower_solution_vector = lower.solve_matrix(matrix=None, vector=self.vector.vector)\n lower_solution_vector.print_vector()\n upper_solution_vector = upper.solve_matrix(\n matrix=None, vector=lower_solution_vector.vector)\n upper_solution_vector.print_vector()\n\n comprobation = self.matrix.comprobation(upper_solution_vector.vector)\n return comprobation", "def lu_solve(self, rhs):\n if self.shape[0] != rhs.shape[0]:\n raise DMShapeError(\"Shape\")\n if not self.domain.is_Field:\n raise DMNotAField('Not a field')\n sol = self.rep.lu_solve(rhs.rep)\n return self.from_rep(sol)", "def singular_solve(U, e, V, b):\n # Calculate S * V^T * x = U^T * b\n y = np.dot(np.transpose(U), b)\n\n for i in range(len(y)):\n y[i] /= e[i]\n\n # Solve\n x = np.dot(V, y)\n\n return x", "def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True", "def gem_solve(A, b):\r\n\tstart = time()\r\n\tn = len(A)\r\n\tU = [[0.0 for k in range(n)] for k in range(n)]\r\n\tfor k in range(n):\r\n\t\tfor i in range(k+1,n):\r\n\t\t\tA[i][k] = A[i][k]/A[k][k]\r\n\t\t\tb[i] = b[i] - A[i][k]*b[k]\r\n\t\tfor j in range(k+1,n):\r\n\t\t\tfor i in range(k+1, n):\r\n\t\t\t\tA[i][j] = A[i][j]-A[i][k]*A[k][j]\r\n\t\t\t\t\r\n\tfor i in range(n):\r\n\t\tfor j in range(n):\r\n\t\t\tif i>j:\r\n\t\t\t\tU[i][j] = 0\r\n\t\t\telse:\r\n\t\t\t\tU[i][j] = A[i][j]\r\n\t\r\n\tx, place = backward(U, b)\r\n\tend = time()\r\n\treturn x, (end-start)", "def solve_matrix(M, b):\n\n try:\n x = np.linalg.solve(M, b)\n except np.LinAlgError:\n print(\"ERR: Matrix is singular\")\n return None\n\n if not np.allclose(np.dot(M, x), b):\n print(\"ERR: Matrix is inconsistent (most likely with the independent sources)\")\n return None\n \n return x", "def do(self, a, b):\n if csingle == a.dtype or cdouble == a.dtype:\n raise SkipTest\n\n x_lo = gula.chosolve(a, b, UPLO='L')\n x_up = gula.chosolve(a, b, UPLO='U')\n assert_almost_equal(x_lo, x_up)\n # inner1d not defined for complex types\n # todo: implement alternative test\n assert_almost_equal(b, gula.matrix_multiply(a, x_lo))\n assert_almost_equal(b, gula.matrix_multiply(a, x_up))", "def _solve(self, mu=None):\n pass", "def Solve(M, b):\n\tm2 = [row[:]+[right] for row,right in zip(M,b) ]\n\treturn [row[-1] for row in m2] if gauss_jordan(m2) else None", "def solveLinearSystem(aMat, bMat):\n numRow = aMat.rows\n dummyVec = mkVector(\"x\", numRow)\n dummySymbols = [v for v in dummyVec]\n #\n system = aMat, bMat\n result = sympy.linsolve(system, *dummyVec)\n lst = flatten(result)\n # Handle case of multiple solutions\n subs = {s: 1 for s in lst if s in dummySymbols}\n return evaluate(sympy.Matrix(lst), subs=subs)", "def doolittle_factorization(self, matrix=None):\n try:\n local_matrix = self.matrix.matrix if not matrix else matrix.matrix\n except AttributeError:\n local_matrix = matrix\n \n (result, upper, lower, temp_sum) = (0.0, [[]], [[]], 0.0)\n for i in range(len(local_matrix)):\n lower.append([])\n for j in range(len(local_matrix[i])):\n lower[i].append(0)\n \n lower.remove([])\n\n for i in range(len(local_matrix)):\n for j in range(len(local_matrix[i])):\n temp_sum = 0.0\n for k in range(i):\n temp_sum += lower[i][k] * upper[k][j]\n result = local_matrix[i][j] - temp_sum\n upper[i].append(round(result, 9))\n upper.append([])\n \n for j in range(len(local_matrix[i])):\n temp_sum = 0.0\n for k in range(i):\n temp_sum += lower[j][k] * upper[k][i]\n result = local_matrix[j][i] - temp_sum\n lower[j][i] = round(result / upper[i][i], 9)\n\n upper.remove([])\n upper = Matrix(upper, name=\"U\")\n lower = Matrix(lower, name=\"L\")\n\n print(\"Las matrices son: \")\n upper.print_matrix()\n lower.print_matrix()\n\n print(\"Al multiplicarlas queda\")\n comprobation = lower.multiply_matrix(upper.matrix)\n comprobation.set_name(\"comprobación de la factorización LU\")\n comprobation.print_matrix()\n\n return (lower, upper)", "def solver(I, f, c, bc, Lx, Ly, nx, ny, dt, tstop,\n user_action=None, \n implementation={'ic': 'vec', # or 'scalar' or 'weave'\n 'inner': 'vec',\n 'bc': 'vec',\n 'storage': 'f77'},\n verbose=True):\n dx = Lx/float(nx)\n dy = Ly/float(ny)\n x = linspace(0, Lx, nx+1) # grid points in x dir\n y = linspace(0, Ly, ny+1) # grid points in y dir\n xv = x[:,newaxis] # for vectorized function evaluations\n yv = y[newaxis,:]\n\n if dt <= 0: # max time step?\n dt = (1/float(c))*(1/sqrt(1/dx**2 + 1/dy**2))\n Cx = c*dt/dx**2; Cy = c*dt/dy**2 # help variables\n\n up = zeros((nx+1,ny+1)) # solution array\n u = up.copy() # solution at t-dt\n\n # use scalar implementation mode if no info from user:\n if 'ic' not in implementation:\n implementation['ic'] = 'scalar'\n if 'bc' not in implementation:\n implementation['bc'] = 'scalar'\n if 'inner' not in implementation:\n implementation['inner'] = 'scalar'\n\n if 'weave' in implementation.itervalues() or \\\n 'f77' in implementation.itervalues():\n # we avoid callback to Python and require f, bc, and I to be\n # string formulas:\n print f, bc, I\n if not isinstance(f, StringFunction) or \\\n not isinstance(bc, StringFunction) or \\\n not isinstance(I, StringFunction):\n raise TypeError, \\\n 'with Weave or F77, f, bc, and I must be StringFunction'\n\n if 'f77' in implementation.itervalues():\n make_f77(f, bc, I) # build F77 module\n import f77\n # unified names with py versions:\n ic_f77 = f77.ic_f77\n bc_f77 = f77.bc_f77\n scheme_f77 = f77.scheme_f77\n \n # turn arrays to column major storage after the init. cond.\n\n # set initial condition:\n t0 = time.clock()\n t = 0.0\n print '***', implementation['ic']\n func = 'ic_'+implementation['ic']\n if func == 'ic_vec':\n u = eval(func)(u, I, xv, yv)\n elif func == 'ic_f77':\n u = eval(func)(u, x, y)\n else:\n u = eval(func)(u, I, x, y)\n t_ic = time.clock() - t0\n \n\n if implementation['inner'] == 'f77':\n # turn input arrays to Fortran storage for all arrays\n # that are input arrays in loop subroutine\n # (actually not necessary as up, u, and um are all fed\n # through the f77.loop routine and brought to column\n # major storage in turn - recall um=u, u=up, up=um)\n if implementation.get('storage', 'f77') == 'f77':\n up = asarray(up, order='Fortran')\n u = asarray(u, order='Fortran')\n\n if user_action is not None:\n user_action(u, xv, yv, t) # allow user to plot etc.\n\n t_inner = 0 # CPU time inner loops\n t_bc = 0 # CPU time boundary update\n \n while t <= tstop:\n t_old = t; t += dt\n if verbose:\n print 'solving (%s version) at t=%g' % \\\n (implementation['inner'], t)\n\n t0 = time.clock()\n # update all inner points:\n func = 'scheme_'+implementation['inner']\n if func == 'scheme_vec':\n up = eval(func)(up, u, um, f, xv, yv, t, Cx, Cy, dt2, t_old)\n elif func == 'scheme_f77':\n up = eval(func)(up, u, um, x, y, t, Cx, Cy, dt2, t_old)\n else:\n up = eval(func)(up, u, um, f, x, y, t, Cx, Cy, dt2, t_old)\n\n #id_u = id(u); id_um = id(um)\n #up,u,um = f77.loop(up, u, um, f_array, Cx, Cy, dt2)\n #print 'u changed:', id_u!=id(u),\n #print 'um changed:', id_um!=id(um),\n t_inner += time.clock() - t0\n\n t0 = time.clock()\n # insert boundary conditions:\n func = 'bc_'+implementation['bc']\n if func == 'bc_f77':\n up = eval(func)(up, x, y, t)\n else:\n up = eval(func)(up, bc, x, y, t)\n t_bc += time.clock() - t0\n \n if user_action is not None:\n user_action(up, xv, yv, t)\n # update data structures for next step:\n u, up = u, up\n\n # dt might be computed in this function\n return dt, t_ic, t_inner, t_bc", "def _lapack_linalg(field: Type[Array], a: Array, b: Array, function, out=None, n_sum=None) -> Array:\n assert field._is_prime_field\n\n # Determine the return data-type which is the minimum of the two inputs' data-types\n if np.object_ in [a.dtype, b.dtype]:\n return_dtype = np.object_\n else:\n return_dtype = a.dtype if np.iinfo(a.dtype).max < np.iinfo(b.dtype).max else b.dtype\n\n a = a.view(np.ndarray)\n b = b.view(np.ndarray)\n\n # Determine the minimum dtype to hold the entire product and summation without overflowing\n if n_sum is None:\n n_sum = 1 if len(a.shape) == 0 else max(a.shape)\n max_value = n_sum * (field.characteristic - 1) ** 2\n dtypes = [dtype for dtype in DTYPES if np.iinfo(dtype).max >= max_value]\n dtype = np.object_ if len(dtypes) == 0 else dtypes[0]\n a = a.astype(dtype)\n b = b.astype(dtype)\n\n # Compute result using native NumPy LAPACK/BLAS implementation\n if function in [np.inner, np.vdot]:\n # These functions don't have and `out` keyword argument\n c = function(a, b)\n else:\n c = function(a, b, out=out)\n c = c % field.characteristic # Reduce the result mod p\n\n if np.isscalar(c):\n # TODO: Sometimes the scalar c is a float?\n c = field(int(c), dtype=return_dtype)\n else:\n c = field._view(c.astype(return_dtype))\n\n return c", "def solve_lyap_lrcf(A, E, B, trans=False, options=None, default_solver=None):\n\n _solve_lyap_lrcf_check_args(A, E, B, trans)\n if default_solver is None:\n default_solver = 'pymess_lradi' if A.source.dim >= mat_eqn_sparse_min_size() else 'pymess_glyap'\n options = _parse_options(options, lyap_lrcf_solver_options(), default_solver, None, False)\n\n if options['type'] == 'pymess_glyap':\n X = solve_lyap_dense(to_matrix(A, format='dense'),\n to_matrix(E, format='dense') if E else None,\n B.to_numpy().T if not trans else B.to_numpy(),\n trans=trans, options=options)\n Z = _chol(X)\n elif options['type'] == 'pymess_lradi':\n opts = options['opts']\n opts.type = pymess.MESS_OP_NONE if not trans else pymess.MESS_OP_TRANSPOSE\n eqn = LyapunovEquation(opts, A, E, B)\n Z, status = pymess.lradi(eqn, opts)\n relres = status.res2_norm / status.res2_0\n if relres > opts.adi.res2_tol:\n logger = getLogger('pymor.bindings.pymess.solve_lyap_lrcf')\n logger.warning(f'Desired relative residual tolerance was not achieved '\n f'({relres:e} > {opts.adi.res2_tol:e}).')\n else:\n raise ValueError(f'Unexpected Lyapunov equation solver ({options[\"type\"]}).')\n\n return A.source.from_numpy(Z.T)", "def solve(self, b: Array, check_finite: Optional[bool] = None) -> Array:\n if check_finite is None:\n check_finite = self.check_finite\n if self.cho_factor:\n fact_solve = lambda x: jsl.cho_solve(self.factor, x, check_finite=check_finite)\n else:\n fact_solve = lambda x: jsl.lu_solve(self.factor, x, trans=0, check_finite=check_finite)\n\n if b.ndim == 1:\n D = self.D\n else:\n D = self.D[:, snp.newaxis]\n N, M = self.A.shape\n if N < M and self.D.ndim == 1:\n w = fact_solve(self.A @ (b / D))\n x = (b - (self.A.T.conj() @ w)) / D\n else:\n x = fact_solve(b)\n\n return x", "def solve(self, A, b):\n if is_sparse(A) or is_sparse(b):\n A, b = A.tocsc(), b.tocsc()\n x = sparse.COO(scipy.sparse.linalg.spsolve(A, b))\n else:\n x = np.linalg.solve(A, b)\n\n return x", "def linsolve(A, b, symmetric=True):\n try:\n F = b.asarray()\n except AttributeError:\n F = np.asarray(b)\n\n use_np_solve = not symmetric or flapack is None\n x, info = None, 1\n if not use_np_solve:\n c, x, info = flapack.dposv(A, F, lower=0, overwrite_a=0, overwrite_b=0)\n if info < 0:\n raise ValueError(\n \"ILLEGAL VALUE IN {0}-TH ARGUMENT OF \" \"INTERNAL DPOSV\".format(-info)\n )\n if info != 0:\n use_np_solve = True\n\n if use_np_solve:\n try:\n x = la.solve(A, F)\n info = 0\n except la.LinAlgError:\n raise RuntimeError(\"ATTEMPTING TO SOLVE UNDER CONSTRAINED SYSTEM\")\n\n if info > 0:\n tty.warn(\"LINSOLVE FAILED, USING LEAST SQUARES \" \"TO SOLVE SYSTEM\")\n x = la.lstsq(A, F)[0]\n\n return x", "def solver(\n kappa, f, u_D, Nx, Ny, degree=1,\n linear_solver='Krylov', # Alternative: 'direct'\n abs_tol=1E-5, # Absolute tolerance in Krylov solver\n rel_tol=1E-3, # Relative tolerance in Krylov solver\n max_iter=1000, # Max no of iterations in Krylov solver\n log_level=PROGRESS, # Amount of solver output\n dump_parameters=False, # Write out parameter database?\n ):\n # Create mesh and define function space\n mesh = UnitSquareMesh(Nx, Ny)\n V = FunctionSpace(mesh, 'P', degree)\n\n def boundary(x, on_boundary):\n return on_boundary\n\n bc = DirichletBC(V, u_D, boundary)\n\n # Define variational problem\n u = TrialFunction(V)\n v = TestFunction(V)\n a = kappa*dot(grad(u), grad(v))*dx\n L = f*v*dx\n\n # Compute solution\n u = Function(V)\n\n if linear_solver == 'Krylov':\n prm = parameters['krylov_solver'] # short form\n prm['absolute_tolerance'] = abs_tol\n prm['relative_tolerance'] = rel_tol\n prm['maximum_iterations'] = max_iter\n print(parameters['linear_algebra_backend'])\n set_log_level(log_level)\n if dump_parameters:\n info(parameters, True)\n solver_parameters = {'linear_solver': 'gmres',\n 'preconditioner': 'ilu'}\n else:\n solver_parameters = {'linear_solver': 'lu'}\n\n solve(a == L, u, bc, solver_parameters=solver_parameters)\n return u", "def solve_system(self, rhs, factor, u0, t):\n\n me = self.dtype_u(self.init)\n L = splu(sp.eye(self.params.nvars, format='csc') + factor * self.A)\n me[:] = L.solve(rhs)\n return me", "def solve_cholesky(A, b, debug=False):\n L = cholesky(A, reveal_diagonal=debug)\n if debug:\n Optimizer.stat('L', L)\n x = solve_lower(L, b)\n if debug:\n Optimizer.stat('intermediate', x)\n return solve_upper(L.transpose(), x)", "def _solve_lambert(f, symbol, gens):\n nrhs, lhs = f.as_independent(symbol, as_Add=True)\n rhs = -nrhs\n\n lamcheck = [tmp for tmp in gens\n if (tmp.func in [exp, log] or\n (tmp.is_Pow and symbol in tmp.exp.free_symbols))]\n if not lamcheck:\n raise NotImplementedError()\n\n if lhs.is_Mul:\n lhs = expand_log(log(lhs))\n rhs = log(rhs)\n\n lhs = factor(lhs, deep=True)\n # make sure we have inverted as completely as possible\n r = Dummy()\n i, lhs = _invert(lhs - r, symbol)\n rhs = i.xreplace({r: rhs})\n\n # For the first ones:\n # 1a1) B**B = R != 0 (when 0, there is only a solution if the base is 0,\n # but if it is, the exp is 0 and 0**0=1\n # comes back as B*log(B) = log(R)\n # 1a2) B*(a + b*log(B))**p = R or with monomial expanded or with whole\n # thing expanded comes back unchanged\n # log(B) + p*log(a + b*log(B)) = log(R)\n # lhs is Mul:\n # expand log of both sides to give:\n # log(B) + log(log(B)) = log(log(R))\n # 1b) d*log(a*B + b) + c*B = R\n # lhs is Add:\n # isolate c*B and expand log of both sides:\n # log(c) + log(B) = log(R - d*log(a*B + b))\n\n soln = []\n mainlog = _mostfunc(lhs, log, symbol)\n if mainlog:\n if lhs.is_Mul and rhs != 0:\n soln = _lambert(log(lhs) - log(rhs), symbol)\n elif lhs.is_Add:\n other = lhs.subs({mainlog: 0})\n if other and not other.is_Add and [\n tmp for tmp in other.atoms(Pow)\n if symbol in tmp.free_symbols]:\n if not rhs:\n diff = log(other) - log(other - lhs)\n else:\n diff = log(lhs - other) - log(rhs - other)\n soln = _lambert(expand_log(diff), symbol)\n else:\n # it's ready to go\n soln = _lambert(lhs - rhs, symbol)\n\n # 2) d*p**(a*B + b) + c*B = R\n # collect on main pow\n # log(R - c*B) - a*B*log(p) = log(d) + b*log(p)\n\n if not soln:\n mainpow = _mostfunc(lhs, Pow, symbol)\n if mainpow and symbol in mainpow.exp.free_symbols:\n lhs = collect(lhs, mainpow)\n if lhs.is_Mul and rhs != 0:\n soln = _lambert(expand_log(log(lhs) - log(rhs)), symbol)\n elif lhs.is_Add:\n # move all but mainpow-containing term to rhs\n other = lhs.subs({mainpow: 0})\n mainterm = lhs - other\n rhs = rhs - other\n diff = log(mainterm) - log(rhs)\n soln = _lambert(expand_log(diff), symbol)\n\n if not soln:\n raise NotImplementedError(f'{f} does not appear to have a solution in '\n 'terms of LambertW')\n\n return list(ordered(soln))", "def lu_solve(self, rhs):\n if not self.domain == rhs.domain:\n raise DMDomainError(\"Domains must match: %s != %s\" % (self.domain, rhs.domain))\n\n # XXX: As for inv we should consider whether to return a matrix over\n # over an associated field or attempt to find a solution in the ring.\n # For now we follow the existing DomainMatrix convention...\n if not self.domain.is_Field:\n raise DMDomainError(\"Field expected, got %s\" % self.domain)\n\n m, n = self.shape\n j, k = rhs.shape\n if m != j:\n raise DMShapeError(\"Matrix size mismatch: %s * %s vs %s * %s\" % (m, n, j, k))\n sol_shape = (n, k)\n\n # XXX: The Flint solve method only handles square matrices. Probably\n # Flint has functions that could be used to solve non-square systems\n # but they are not exposed in python-flint yet. Alternatively we could\n # put something here using the features that are available like rref.\n if m != n:\n return self.to_ddm().lu_solve(rhs.to_ddm()).to_dfm()\n\n try:\n sol = self.rep.solve(rhs.rep)\n except ZeroDivisionError:\n raise DMNonInvertibleMatrixError(\"Matrix det == 0; not invertible.\")\n\n return self._new(sol, sol_shape, self.domain)", "def deterministic_solve(LEP):\n # solve state equation\n SE = LEP.SE(LEP.u, LEP.phi_n[0], LEP.v_u, LEP.g)\n solve(lhs(SE) == rhs(SE), LEP.u_n, bcs=LEP.bcSE, solver_parameters={\"linear_solver\": \"umfpack\", \"preconditioner\": \"default\"}, form_compiler_parameters=None)\n\n # solve adjoint equation\n LEP.p_n.assign(LEP.u_n) # p_n = u_n\n\n # solve gradient equation\n GE = LEP.GE(LEP.phi, LEP.phi_n, LEP.v_phi, LEP.p_n, LEP.u_n, LEP.tau[0], LEP.gamma)\n solve(lhs(GE) == rhs(GE), LEP.phi_next, bcs=None, solver_parameters={\"linear_solver\": \"umfpack\", \"preconditioner\": \"default\"}, form_compiler_parameters=None)\n\n LEP.project_phi()\n\n print(\"Phi_n as Vector:\", LEP.phi_next.vector()[:])\n print(\"Länge von Phi:\", len(LEP.phi_next.vector()[:]))\n\n J_eps = LEP.J_eps(LEP.u_n, LEP.phi_n[0])\n\n return J_eps", "def tensorsolve(a, b, axes=None):\n\n return TensorSolve(axes)(a, b)", "def solve(a, b):\n #-> getrf + getrs\n a, _, _ = get_computation_matrix(a)\n b, cv2, isM2 = get_computation_matrix(b)\n if a.get_dtype() != b.get_dtype():\n raise TypeError(\"solve: dtype of a and b are not compatible!\")\n if a.numRows() != a.numCols():\n raise ValueError(\"solve: input a is not a square matrix!\")\n t_dtype = TypeUtil.to_numpy_dtype(a.get_dtype())\n (_, _, x, _) = gesv(a, b, overwrite_a=1, overwrite_b=1, dtype=t_dtype)\n\n if cv2:\n if isM2:\n return x.to_numpy_matrix()\n else:\n return x.to_numpy_array()\n else:\n return x", "def rbackwardsolve(A, b, d):\n\n n = len(b)\n if np.iscomplexobj(A) or np.iscomplexobj(b):\n A = A.astype('complex128')\n b = b.astype('complex128')\n x = b.copy()\n x[n-1] = b[n-1] / A[n-1, n-1]\n\n for k in range(n-2, -1, -1):\n uk = min(n-1, k+d)\n x[k] = (b[k] - np.dot(A[k, k+1:uk+1], x[k+1:uk+1])) / A[k, k]\n\n return x", "def deterministic_NN_solve(LEP,model):\n # solve state equation\n SE = LEP.SE(LEP.u, LEP.phi_n[0], LEP.v_u, LEP.g)\n solve(lhs(SE) == rhs(SE), LEP.u_n, bcs=LEP.bcSE, solver_parameters={\"linear_solver\": \"umfpack\", \"preconditioner\": \"default\"}, form_compiler_parameters=None)\n\n # solve adjoint equation\n LEP.p_n.assign(LEP.u_n) # p_n = u_n\n\n # solve gradient equation\n NN_gradienten_step(LEP,model)\n LEP.project_phi()\n\n J_eps = LEP.J_eps(LEP.u_n, LEP.phi_n[0])\n\n return J_eps", "def gel_solve(\n A,\n y,\n l_1,\n l_2,\n ns,\n b_init=None,\n block_solve_fun=block_solve_agd,\n block_solve_kwargs=None,\n max_cd_iters=None,\n rel_tol=1e-6,\n Cs=None,\n Is=None,\n verbose=False,\n):\n p = len(A)\n m = len(y)\n device = A[0].device\n dtype = A[0].dtype\n y = y.to(device, dtype)\n if block_solve_kwargs is None:\n block_solve_kwargs = dict()\n\n # Create initial values if not specified.\n if b_init is None:\n b_init = 0.0, torch.zeros(p, max(ns), device=device, dtype=dtype)\n\n if not isinstance(ns, torch.Tensor):\n ns = torch.tensor(ns)\n sns = ns.to(device, dtype).sqrt()\n a_1 = l_1 * sns\n ma_1 = m * a_1\n a_2 = 2 * l_2 * sns\n b_0, B = b_init\n b_0_prev, B_prev = b_0, B\n k = 1 # iteration number\n pbar_stats = {} # stats for the outer progress bar\n pbar = tqdm.tqdm(\n desc=\"Solving gel with CD (l_1 {:.2g}, l_2 {:.2g})\".format(l_1, l_2),\n disable=not verbose,\n )\n\n while True:\n # First minimize with respect to b_0. This has a closed form solution\n # given by b_0 = 1'@(y - sum_j A_j@b_j) / m.\n b_0 = (y - sum(A[j] @ B[j, : ns[j]] for j in range(p))).sum() / m\n\n # Now, minimize with respect to each b_j.\n for j in tqdm.trange(\n p, desc=\"Solving individual blocks\", disable=not verbose, leave=False\n ):\n r_j = y - b_0 - sum(A[k] @ B[k, : ns[k]] for k in range(p) if k != j)\n\n # Check if b_j must be set to 0. The condition is ||A_j'@r_j|| <=\n # m*a_1.\n if (A[j].t() @ r_j).norm(p=2) <= ma_1[j]:\n B[j] = 0\n else:\n # Otherwise, minimize. First make sure initial value is not 0.\n if len((B[j, : ns[j]].abs() < 1e-6).nonzero()) == ns[j]:\n B[j, : ns[j]] = 1e-3\n\n # Add C_j and I_j to the arguments if using Newton's method.\n if block_solve_fun is block_solve_newton:\n block_solve_kwargs[\"C_j\"] = Cs[j]\n block_solve_kwargs[\"I_j\"] = Is[j]\n\n B[j, : ns[j]] = block_solve_fun(\n r_j,\n A[j],\n a_1[j].item(),\n a_2[j].item(),\n m,\n B[j, : ns[j]],\n verbose=verbose,\n **block_solve_kwargs,\n )\n\n # Compute relative change in b.\n b_0_diff = b_0 - b_0_prev\n B_diff = B - B_prev\n delta_norm = (b_0_diff ** 2 + (B_diff ** 2).sum()).sqrt()\n b_norm = (b_0 ** 2 + (B ** 2).sum()).sqrt()\n\n pbar_stats[\"rel change\"] = \"{:.2g}\".format(delta_norm.item() / b_norm.item())\n pbar.set_postfix(pbar_stats)\n pbar.update()\n\n # Check max iterations exit criterion.\n if max_cd_iters is not None and k == max_cd_iters:\n break\n k += 1\n\n # Check tolerance exit criterion.\n if delta_norm.item() <= rel_tol * b_norm.item() and k > 2:\n break\n b_0_prev, B_prev = b_0, B\n\n pbar.close()\n return b_0.item(), B", "def lup_decomposition(self):\n p = [i for i in range(self.rows_count())]\n for i in range(self.rows_count() - 1):\n pivot = i\n for j in range(i + 1, self.rows_count()):\n if abs(self[p[j], i]) > abs(self[p[pivot], i]):\n pivot = j\n p[pivot], p[i] = p[i], p[pivot]\n for j in range(i + 1, self.rows_count()):\n if abs(self[p[i], i]) < math.pow(10, -6):\n raise ValueError(\"Can't divide by 0\")\n self[p[j], i] /= self[p[i], i]\n for k in range(i + 1, self.rows_count()):\n self[p[j], k] -= self[p[j], i] * self[p[i], k]\n lst = []\n for i in p:\n lst.append(self.value[i])\n return p, Matrix(lst)", "def lu(self):\n L, U, swaps = self.to_ddm().lu()\n return L.to_dfm(), U.to_dfm(), swaps", "def solve_elas(self,x,E_p=None):\n \n if x['Crystal_Structure'] == \"Cubic\":\n self.estf = self.Ccubic( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2] )\n\n elif x['Crystal_Structure'] == \"HCP\":\n self.estf = self.Chcp( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2], x['Stiffness'][3], x['Stiffness'][4] )\n\n # Update orientation\n for n in range(9):\n cell_num_list = list((9*self.cell_num)+n)\n self.orient.vector()[cell_num_list] = self.rots[self.subdomain_num,n]\n \n self.a = inner(self.sigs3x3(self.u), sym(grad(self.v)))*dx\n \n if E_p:\n # Note use of sym(), assuming E_p to be the \\chi field\n L_elas_rhs = self.L_elas + inner(self.sigs_e(sym(E_p)), sym(grad(self.v)))*dx\n else:\n L_elas_rhs = self.L_elas \n\n self.A_elas, self.b_elas = assemble_system(self.a, L_elas_rhs, self.bc_elas) \n \n # Attach near nullspace to matrix\n as_backend_type(self.A_elas).set_near_nullspace(self.null_space)\n\n # Set matrix operator\n self.elasticity_solver.set_operator(self.A_elas);\n\n # Compute solution\n self.elasticity_solver.solve(self.ue.vector(), self.b_elas);\n \n if E_p:\n self.Ue_sym = project( sym(grad(self.ue) - E_p), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n else:\n self.Ue_sym = project( sym(grad(self.ue)), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n \n self.sim_strn = np.reshape(self.Ue_sym.vector().get_local(),(len(self.grains.array()),9))\n\n for grain_no in range(self.grains.array().max()):\n # Grain numbering is 1 index origin\n cell_subset = self.grains.array()==(grain_no+1)\n if np.any(cell_subset):\n self.sim_avg[grain_no,:] = np.average(self.sim_strn[cell_subset,:],\n axis=0,weights=self.dVol[cell_subset]) \n \n deps = self.exp_strn - self.sim_avg\n resid = np.linalg.norm(deps.ravel())\n print(resid) #,self.its)\n return resid", "def TriangleForwardSub(L,b):\n C = solve(L,b)\n return C", "def test_solvers_bc():\n tol = 3E-12 # Appropriate tolerance for these tests (P2, 20x20 mesh)\n import sympy as sym\n x, y = sym.symbols('x[0], x[1]')\n u = 1 + x**2 + 2*y**2\n f = -sym.diff(u, x, 2) - sym.diff(u, y, 2)\n f = sym.simplify(f)\n u_00 = u.subs(x, 0) # x=0 boundary\n u_01 = u.subs(x, 1) # x=1 boundary\n g = -sym.diff(u, y).subs(y, 1) # x=1 boundary\n r = 1000 # arbitrary function can go here\n s = u\n\n # Turn to C/C++ code for UFL expressions\n f = sym.printing.ccode(f)\n u_00 = sym.printing.ccode(u_00)\n u_01 = sym.printing.ccode(u_01)\n g = sym.printing.ccode(g)\n r = sym.printing.ccode(r)\n s = sym.printing.ccode(s)\n print('Test problem (C/C++):\\nu = %s\\nf = %s' % (u, f))\n print('u_00: %s\\nu_01: %s\\ng = %s\\nr = %s\\ns = %s' %\n (u_00, u_01, g, r, s))\n\n # Turn into FEniCS objects\n u_00 = Expression(u_00)\n u_01 = Expression(u_01)\n f = Expression(f)\n g = Expression(g)\n r = Expression(r)\n s = Expression(s)\n u_exact = Expression(sym.printing.ccode(u))\n\n # Define boundary conditions\n boundary_conditions = {0: {'Dirichlet': u_00},\n 1: {'Dirichlet': u_01},\n 2: {'Robin': (r, s)},\n 3: {'Neumann': g}}\n\n for Nx, Ny in [(3,3), (3,5), (5,3), (20,20)]:\n for degree in 1, 2, 3:\n for linear_solver in ['direct']:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n kappa = Constant(1)\n u, kappa = solver_bc(\n kappa, f, boundary_conditions, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol,\n rel_tol=0.1*tol)\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_e_Function = interpolate(u_exact, V) # exact solution\n # Check that dof arrays are equal\n u_e_array = u_e_Function.vector().array() # dof values\n max_error = (u_e_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol, msg", "def solve_LF(self):\n self.u = zeros(self.N)\n self.u[0] = self.u0\n self.u[1] = self.u1\n u = self.u\n f= self.f\n dt = self.dt\n t = self.t\n N = self.N\n for n in xrange(1,N-1):\n u[n+1] = 2*dt*f(u[n],t[n]) + u[n-1]\n #return t,u", "def solve_fwd_bkwd(matrix_a, b):\n _L = cholesky(matrix_a) \n _U = transpose_matrix(_L) \n \n n = len(b)\n x = [0 for i in xrange(n)] \n y = [0 for i in xrange(n)] \n\n #forward solve _Ly = b\n for i in xrange(n):\n y[i] = b[i]\n for j in xrange(i):\n\t y[i] -= _L[i][j] * y[j]\n\ty[i] /= _L[i][i]\n\n #backward solve _Ux = y\n for i in xrange(n-1, -1, -1):\n\tx[i] = y[i]\n for j in xrange(i+1, n):\n x[i] -= _U[i][j] * x[j]\n x[i] /= _U[i][i]\n\n return x", "def linear_LS_triangulation(u1, P1, u2, P2):\n A = np.zeros((4, 3))\n b = np.zeros((4, 1))\n\n # Create array of triangulated points\n x = np.zeros((3, len(u1)))\n\n # Initialize C matrices\n C1 = np.array(linear_LS_triangulation_C)\n C2 = np.array(linear_LS_triangulation_C)\n\n for i in range(len(u1)):\n # Derivation of matrices A and b:\n # for each camera following equations hold in case of perfect point matches:\n # u.x * (P[2,:] * x) = P[0,:] * x\n # u.y * (P[2,:] * x) = P[1,:] * x\n # and imposing the constraint:\n # x = [x.x, x.y, x.z, 1]^T\n # yields:\n # (u.x * P[2, 0:3] - P[0, 0:3]) * [x.x, x.y, x.z]^T + (u.x * P[2, 3] - P[0, 3]) * 1 = 0\n # (u.y * P[2, 0:3] - P[1, 0:3]) * [x.x, x.y, x.z]^T + (u.y * P[2, 3] - P[1, 3]) * 1 = 0\n # and since we have to do this for 2 cameras, and since we imposed the constraint,\n # we have to solve 4 equations in 3 unknowns (in LS sense).\n #\n # Build C matrices, to construct A and b in a concise way\n C1[:, 2] = u1[i, :]\n C2[:, 2] = u2[i, :]\n\n # Build A matrix:\n # [\n # [ u1.x * P1[2,0] - P1[0,0], u1.x * P1[2,1] - P1[0,1], u1.x * P1[2,2] - P1[0,2] ],\n # [ u1.y * P1[2,0] - P1[1,0], u1.y * P1[2,1] - P1[1,1], u1.y * P1[2,2] - P1[1,2] ],\n # [ u2.x * P2[2,0] - P2[0,0], u2.x * P2[2,1] - P2[0,1], u2.x * P2[2,2] - P2[0,2] ],\n # [ u2.y * P2[2,0] - P2[1,0], u2.y * P2[2,1] - P2[1,1], u2.y * P2[2,2] - P2[1,2] ]\n # ]\n A[0:2, :] = C1.dot(P1[0:3, 0:3]) # C1 * R1\n A[2:4, :] = C2.dot(P2[0:3, 0:3]) # C2 * R2\n\n # Build b vector:\n # [\n # [ -(u1.x * P1[2,3] - P1[0,3]) ],\n # [ -(u1.y * P1[2,3] - P1[1,3]) ],\n # [ -(u2.x * P2[2,3] - P2[0,3]) ],\n # [ -(u2.y * P2[2,3] - P2[1,3]) ]\n # ]\n b[0:2, :] = C1.dot(P1[0:3, 3:4]) # C1 * t1\n b[2:4, :] = C2.dot(P2[0:3, 3:4]) # C2 * t2\n b *= -1\n\n # Solve for x vector\n cv2.solve(A, b, x[:, i:i + 1], cv2.DECOMP_SVD)\n\n return np.transpose(x), np.ones(len(u1), dtype=bool)", "def l1(P, q):\n\n m, n = P.size\n\n # Solve equivalent LP \n #\n # minimize [0; 1]' * [u; v]\n # subject to [P, -I; -P, -I] * [u; v] <= [q; -q]\n #\n # maximize -[q; -q]' * z \n # subject to [P', -P']*z = 0\n # [-I, -I]*z + 1 = 0 \n # z >= 0 \n \n c = matrix(n*[0.0] + m*[1.0])\n h = matrix([q, -q])\n\n def Fi(x, y, alpha = 1.0, beta = 0.0, trans = 'N'): \n if trans == 'N':\n # y := alpha * [P, -I; -P, -I] * x + beta*y\n u = P*x[:n]\n y[:m] = alpha * ( u - x[n:]) + beta*y[:m]\n y[m:] = alpha * (-u - x[n:]) + beta*y[m:]\n\n else:\n # y := alpha * [P', -P'; -I, -I] * x + beta*y\n y[:n] = alpha * P.T * (x[:m] - x[m:]) + beta*y[:n]\n y[n:] = -alpha * (x[:m] + x[m:]) + beta*y[n:]\n\n\n def Fkkt(W): \n\n # Returns a function f(x, y, z) that solves\n #\n # [ 0 0 P' -P' ] [ x[:n] ] [ bx[:n] ]\n # [ 0 0 -I -I ] [ x[n:] ] [ bx[n:] ]\n # [ P -I -W1^2 0 ] [ z[:m] ] = [ bz[:m] ]\n # [-P -I 0 -W2 ] [ z[m:] ] [ bz[m:] ]\n #\n # On entry bx, bz are stored in x, z.\n # On exit x, z contain the solution, with z scaled (W['di'] .* z is\n # returned instead of z). \n\n d1, d2 = W['d'][:m], W['d'][m:]\n D = 4*(d1**2 + d2**2)**-1\n A = P.T * spdiag(D) * P\n lapack.potrf(A)\n\n def f(x, y, z):\n\n x[:n] += P.T * ( mul( div(d2**2 - d1**2, d1**2 + d2**2), x[n:]) \n + mul( .5*D, z[:m]-z[m:] ) )\n lapack.potrs(A, x)\n\n u = P*x[:n]\n x[n:] = div( x[n:] - div(z[:m], d1**2) - div(z[m:], d2**2) + \n mul(d1**-2 - d2**-2, u), d1**-2 + d2**-2 )\n\n z[:m] = div(u-x[n:]-z[:m], d1)\n z[m:] = div(-u-x[n:]-z[m:], d2)\n\n return f\n\n\n # Initial primal and dual points from least-squares solution.\n\n # uls minimizes ||P*u-q||_2; rls is the LS residual.\n uls = +q\n lapack.gels(+P, uls)\n rls = P*uls[:n] - q \n\n # x0 = [ uls; 1.1*abs(rls) ]; s0 = [q;-q] - [P,-I; -P,-I] * x0\n x0 = matrix( [uls[:n], 1.1*abs(rls)] ) \n s0 = +h\n Fi(x0, s0, alpha=-1, beta=1) \n\n # z0 = [ (1+w)/2; (1-w)/2 ] where w = (.9/||rls||_inf) * rls \n # if rls is nonzero and w = 0 otherwise.\n if max(abs(rls)) > 1e-10: \n w = .9/max(abs(rls)) * rls\n else: \n w = matrix(0.0, (m,1))\n z0 = matrix([.5*(1+w), .5*(1-w)])\n\n dims = {'l': 2*m, 'q': [], 's': []}\n sol = solvers.conelp(c, Fi, h, dims, kktsolver = Fkkt, \n primalstart={'x': x0, 's': s0}, dualstart={'z': z0})\n return sol['x'][:n]", "def rforwardsolve(A, b, d):\n \n \n\n n = len(b)\n if np.iscomplexobj(A) or np.iscomplexobj(b):\n A = A.astype('complex128')\n b = b.astype('complex128')\n x = b.copy()\n x[0] = x[0] / A[0, 0]\n for k in range(1, n):\n lk = max(0, k-d)\n x[k] = b[k] - np.dot(A[k, lk : k], x[lk : k])\n x[k] = x[k] / A[k, k] \n return x", "def laplace2d(get_A, get_rho, N=Mynum, Te=2):\n # Reduce the row and column of Laplacian matrix by 2 \n # Reduced row and column will be replace with embed in future\n # n = N - 2 for embed\n n = N\n # Solving for the PDE(1)\n h = 1.0/(n-1)\n A = get_A(n) * (1/(h**2))\n b = get_rho(n, Te)\n U = sp.linalg.solve(A, b)\n\n # Reshape the u vector into nxn matrix for heat map plotting\n T = U.reshape((n, n))\n print T\n \n # Embed the surrounding of U matrix into zeros\n Tfull = embed(T, Te)\n\n # Verify that dot function of A matrix and U vector\n # return the same rho value at midpoint\n CheckU = np.dot(A,U)\n\n # Filter very small value into zeros\n for i in range(0,len(CheckU)):\n if (abs(CheckU[i]) < 1e-12):\n CheckU[i] = 0\n\n # Validate that product of A and U matrix is the same as rho vector\n # Will give warning if it is not the same\n # assert np.all(CheckU == b) # working only mynum = 7 and 9 \n\n # Print value of the products at midpoint.\n mid = (n**2-1)/2\n print \"Q1: Value of the dot product A.u1 is %5.3f at (0.5,0.5).\" % (CheckU[mid])\n return Tfull", "def actualSolve(self, lp):\n\t\tif lp.isMIP() and self.mip: return self.solve_CBC(lp)\n\t\telse: return self.solve_CLP(lp)", "def solve_L(centers_i,centers_r):\n\t# The first term is a column vector of size N. Each of its rows\n\t# multiplies with the respective row on the x_r with 2 columns and \n\t# N rows.\n\tLy = centers_i[:,1][np.newaxis,:].transpose() * centers_r\n\tLx = - centers_i[:,0][np.newaxis,:].transpose() * centers_r\n\tL = np.concatenate((Ly,centers_i[:,1][np.newaxis,:].transpose(),Lx),\n\t\taxis=1)\n\tb = centers_i[:,0]\n\tprint(\"solving for the rotation and translation coefficients...\")\n\trl,resids,rank,svals = np.linalg.lstsq(L,b)\n\tprint(\"residue:%0.4f\trank:%0.4f\"%(np.sum(resids),rank))\n\treturn rl", "def spectral_laplace(x_values, dd_math_function, sigma, ua, ub):\n B = []\n for x in x_values:\n B += [-dd_math_function(x, sigma)]\n B[0] = ua\n B[len(x_values) - 1] = ub\n #B ferdig\n A=[]\n for i in range (len(x_values)):\n a = []\n for j in range (len(x_values)):\n if i == 0 or i == len(x_values) - 1:\n a.append(lagrange(x_values, j, x_values[i]))\n else:\n a.append(dd_lagrange(x_values, j, x_values[i]))\n A.append(a)\n #A ferdig\n return np.linalg.solve(A, B)", "def sparsetriangularsolvedense(self,transposed_,lnzc,lptrc,lsubc,lvalc,b): # 3\n if not isinstance(transposed_,transpose): raise TypeError(\"Argument transposed has wrong type\")\n n_ = None\n if n_ is None:\n n_ = len(b)\n elif n_ != len(b):\n raise IndexError(\"Inconsistent length of array b\")\n if n_ is None:\n n_ = len(lnzc)\n elif n_ != len(lnzc):\n raise IndexError(\"Inconsistent length of array lnzc\")\n if n_ is None:\n n_ = len(lptrc)\n elif n_ != len(lptrc):\n raise IndexError(\"Inconsistent length of array lptrc\")\n if n_ is None: n_ = 0\n if lnzc is None: raise TypeError(\"Invalid type for argument lnzc\")\n if lnzc is None:\n lnzc_ = None\n else:\n try:\n lnzc_ = memoryview(lnzc)\n except TypeError:\n try:\n _tmparr_lnzc = array.array(\"i\",lnzc)\n except TypeError:\n raise TypeError(\"Argument lnzc has wrong type\")\n else:\n lnzc_ = memoryview(_tmparr_lnzc)\n \n else:\n if lnzc_.format != \"i\":\n lnzc_ = memoryview(array.array(\"i\",lnzc))\n \n if lnzc_ is not None and len(lnzc_) != (n_):\n raise ValueError(\"Array argument lnzc has wrong length\")\n if lptrc is None: raise TypeError(\"Invalid type for argument lptrc\")\n if lptrc is None:\n lptrc_ = None\n else:\n try:\n lptrc_ = memoryview(lptrc)\n except TypeError:\n try:\n _tmparr_lptrc = array.array(\"q\",lptrc)\n except TypeError:\n raise TypeError(\"Argument lptrc has wrong type\")\n else:\n lptrc_ = memoryview(_tmparr_lptrc)\n \n else:\n if lptrc_.format != \"q\":\n lptrc_ = memoryview(array.array(\"q\",lptrc))\n \n if lptrc_ is not None and len(lptrc_) != (n_):\n raise ValueError(\"Array argument lptrc has wrong length\")\n lensubnval_ = None\n if lensubnval_ is None:\n lensubnval_ = len(lsubc)\n elif lensubnval_ != len(lsubc):\n raise IndexError(\"Inconsistent length of array lsubc\")\n if lensubnval_ is None:\n lensubnval_ = len(lvalc)\n elif lensubnval_ != len(lvalc):\n raise IndexError(\"Inconsistent length of array lvalc\")\n if lensubnval_ is None: lensubnval_ = 0\n if lsubc is None: raise TypeError(\"Invalid type for argument lsubc\")\n if lsubc is None:\n lsubc_ = None\n else:\n try:\n lsubc_ = memoryview(lsubc)\n except TypeError:\n try:\n _tmparr_lsubc = array.array(\"i\",lsubc)\n except TypeError:\n raise TypeError(\"Argument lsubc has wrong type\")\n else:\n lsubc_ = memoryview(_tmparr_lsubc)\n \n else:\n if lsubc_.format != \"i\":\n lsubc_ = memoryview(array.array(\"i\",lsubc))\n \n if lsubc_ is not None and len(lsubc_) != (lensubnval_):\n raise ValueError(\"Array argument lsubc has wrong length\")\n if lvalc is None: raise TypeError(\"Invalid type for argument lvalc\")\n if lvalc is None:\n lvalc_ = None\n else:\n try:\n lvalc_ = memoryview(lvalc)\n except TypeError:\n try:\n _tmparr_lvalc = array.array(\"d\",lvalc)\n except TypeError:\n raise TypeError(\"Argument lvalc has wrong type\")\n else:\n lvalc_ = memoryview(_tmparr_lvalc)\n \n else:\n if lvalc_.format != \"d\":\n lvalc_ = memoryview(array.array(\"d\",lvalc))\n \n if lvalc_ is not None and len(lvalc_) != (lensubnval_):\n raise ValueError(\"Array argument lvalc has wrong length\")\n if b is None: raise TypeError(\"Invalid type for argument b\")\n _copyback_b = False\n if b is None:\n b_ = None\n else:\n try:\n b_ = memoryview(b)\n except TypeError:\n try:\n _tmparr_b = array.array(\"d\",b)\n except TypeError:\n raise TypeError(\"Argument b has wrong type\")\n else:\n b_ = memoryview(_tmparr_b)\n _copyback_b = True\n else:\n if b_.format != \"d\":\n b_ = memoryview(array.array(\"d\",b))\n _copyback_b = True\n if b_ is not None and len(b_) != (n_):\n raise ValueError(\"Array argument b has wrong length\")\n res = self.__obj.sparsetriangularsolvedense(transposed_,n_,lnzc_,lptrc_,lensubnval_,lsubc_,lvalc_,b_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_b:\n b[:] = _tmparr_b", "def forward_substitution(self, b):\n if not self.is_lower_triangular():\n raise ValueError(\"Not a lower triangular matrix\")\n if b.cols != 1:\n raise IndexError(\"Require an Nx1 Matrix: (%i, %i)\"%\n (b.rows, b.cols))\n if b.rows != self.rows:\n raise IndexError(\"Row/column mismatch: (%i, %i) x (%i, %i)\"%\n (self.rows, self.cols, b.rows, b.cols))\n\n L = self\n N = self.rows\n\n y = make_matrix(N, 1)\n for i in range(N):\n y[i, 0] = (b[i, 0] - sum(L[i, k] * y[k, 0] for k in range(i))) / L[i, i]\n\n return y", "def ot_ul2_solve_mu(C, a, b, reg, nitermax=100000, tol=1e-14, P0=None, verbose=False):\n\n if P0 is None:\n P = a[:, None] * b[None, :]\n else:\n P = P0\n abt = np.maximum(a[:, None] + b[None, :] - C / (2 * reg), 0)\n for i in range(nitermax):\n Pold = P.copy()\n P = P * abt / (P.sum(0, keepdims=True) + P.sum(1, keepdims=True) + 1e-16)\n pmax = P.max()\n P = P * (P > pmax * 1e-16)\n if verbose:\n print(np.linalg.norm(P - Pold))\n if np.linalg.norm(P - Pold) < tol:\n break\n return P", "def solve_lyap_dense(A, E, B, trans=False, options=None):\n\n _solve_lyap_dense_check_args(A, E, B, trans)\n options = _parse_options(options, lyap_lrcf_solver_options(), 'pymess_glyap', None, False)\n\n if options['type'] == 'pymess_glyap':\n Y = B.dot(B.T) if not trans else B.T.dot(B)\n op = pymess.MESS_OP_NONE if not trans else pymess.MESS_OP_TRANSPOSE\n X = pymess.glyap(A, E, Y, op=op)[0]\n else:\n raise ValueError(f'Unexpected Lyapunov equation solver ({options[\"type\"]}).')\n\n return X", "def Solver(line1, line2):\n\ta = np.array(line1[0])\n\tb = np.array(line1[1])\n\tu = np.array(line2[0])\n\tv = np.array(line2[1])\n\t#print(a,b,u,v)\n\tc = u[:2]-a[:2]\n\tA = np.vstack((b[:2],-v[:2])).T\n\t#print(A)\n\tx = np.linalg.solve(A,c)\n\t#print(x)\n\tp = a+x[0]*b\n\t#print(p)\n\treturn p", "def solve_triangular(a, b, lower=False):\n # TODO maybe commit this to gvar.linalg\n # TODO can I raise a LinAlgError if a[i,i] is 0, and still return the\n # result and have it assigned to a variable using try...finally inside this\n # function?\n x = np.copy(b)\n a = a.reshape(a.shape + (1,) * len(x.shape[1:]))\n if lower:\n x[0] /= a[0, 0]\n for i in range(1, len(x)):\n x[i:] -= x[i - 1] * a[i:, i - 1]\n x[i] /= a[i, i]\n else:\n x[-1] /= a[-1, -1]\n for i in range(len(x) - 1, 0, -1):\n x[:i] -= x[i] * a[:i, i]\n x[i - 1] /= a[i - 1, i - 1]\n return x", "def collatz_solve(r, w):\n\tfor s in r:\n\t\ti, j = collatz_read(s)\n\t\tv = collatz_eval(i, j)\n\t\tcollatz_print(w, i, j, v)", "def cforwardsolve(A, b, d):\n A = matrix(A)\n n = len(b)\n for k in range(n-1):\n b[k] /= A[k, k]\n uk = array([n, k + d + 1]).min()\n b[(k+1):uk] -= A[(k+1):uk, k]*b[k]\n b[n - 1] /= A[n - 1,n - 1]", "def test_solver():\n # Choice of nonlinear coefficient\n m = 2\n\n def q(u):\n return (1+u)**m\n\n def Dq(u):\n return m*(1+u)**(m-1)\n\n u_exact = Expression(\n 'pow((pow(2, m+1)-1)*x[0] + 1, 1.0/(m+1)) - 1', m=m)\n linear_solver = 'direct'\n errors = []\n for method in 'alg_Newton', 'pde_Newton':\n for J_comp in 'manual', 'automatic':\n for degree in 1, 2, 3:\n error_prev = -1\n for divisions in [(10, 10), (20, 20), (40, 40)]:\n u = solver(\n q, Dq, f, divisions, degree,\n method, J_comp,\n linear_solver,\n abs_tol_Krylov=1E-10,\n rel_tol_Krylov=1E-10,\n abs_tol_Newton=1E-10,\n rel_tol_Newton=1E-10)\n\n # Find max error\n u_e = interpolate(u_exact, u.function_space())\n import numpy as np\n error = np.abs(u_e.vector().array() -\n u.vector().array()).max()\n # Expect convergence as h**(degree+1)\n if error_prev > 0:\n frac = abs(error - error_prev/2**(degree+1))\n errors.append(frac)\n error_prev = error\n tol = 4E-5\n for error_reduction in errors:\n assert error_reduction < tol, error_reduction", "def linear_program_eq(self, c, A, b, lb, ub):\n if self.solver == solver_SCIPY:\n c = c.reshape((c.size,))\n b = b.reshape((b.size,))\n return scipy_linear_program_eq(c, A, b, lb, ub)\n elif self.solver == solver_GUROBI:\n return gurobi_linear_program_eq(c, A, b, lb, ub)\n else:\n raise ValueError('QP solver %s not available' % self.solver)", "def jacobian_solver(self,\n u: np.ndarray,\n lmbda: float,\n rhs: np.ndarray) -> np.ndarray:\n A = self.lap - lmbda * dia_matrix((self.mass @ np.exp(u), 0),\n self.mass.shape)\n du = np.zeros_like(u)\n du = solve(*condense(A, rhs, D=self.D))\n return du", "def choleski_solve(A, b, half_bandwidth=None):\n n = len(A[0])\n if half_bandwidth is None:\n elimination(A, b)\n else:\n elimination_banded(A, b, half_bandwidth)\n x = Matrix.empty(n, 1)\n back_substitution(A, x, b)\n return x", "def solve(self, b):\n raise NotImplementedError", "def test_solve_lsap_with_removed_col():\n num_rows = 10\n num_cols = 20\n num_rounds = 1000\n\n for i in range(num_rounds):\n cost_matrix = np.random.randint(10, size=(num_rows, num_cols))\n cost_matrix = cost_matrix.astype(np.double)\n\n row_idx_1, col_idx_1 = linear_sum_assignment(cost_matrix)\n # Note that here we specifically pick a column that appears in the\n # previous optimal assignment.\n removed_col = random.choice(col_idx_1)\n\n # Get the submatrix with the removed col\n sub_cost_matrix = cost_matrix[:, ~one_hot(removed_col, num_cols)]\n sub_row_idx_1, sub_col_idx_1 = linear_sum_assignment(sub_cost_matrix)\n sub_cost_matrix_sum = sub_cost_matrix[sub_row_idx_1, sub_col_idx_1].sum()\n for i in range(len(sub_col_idx_1)):\n if sub_col_idx_1[i] >= removed_col:\n # Need to increment 1 to return these to their original index\n sub_col_idx_1[i] += 1\n\n # Solve the problem with dynamic algorithm\n row4col, col4row, u, v = lap._solve(cost_matrix)\n assert (\n np.array_equal(col_idx_1, col4row)\n or cost_matrix[row_idx_1, col_idx_1].sum()\n == cost_matrix[row_idx_1, col4row].sum()\n )\n\n lap.solve_lsap_with_removed_col(cost_matrix, removed_col, row4col, col4row, v)\n assert (\n np.array_equal(sub_col_idx_1, col4row)\n or sub_cost_matrix_sum == cost_matrix[row_idx_1, col4row].sum()\n )", "def solve_linear_system(system, goal):\n if goal.column:\n sol = goal.data.copy()\n else:\n raise ValueError('goal is not a column vector')\n\n if not (len(goal) == system.n_cols):\n raise ValueError('len(goal) != system.n_cols')\n\n if system.n_rows == system.n_cols:\n reduced, ops = system.to_reduced_echelon(True)\n for op in ops:\n if op[0] == 'swap':\n sol[op[1]], sol[op[2]] = sol[op[2]], sol[op[1]]\n elif op[0] == 'multiplication':\n sol[op[1]] = sol[op[1]] * op[2]\n elif op[0] == 'subtract':\n sol[op[2]] = sol[op[2]] - sol[op[1]] * op[3]\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('system is not a square matrix')\n\n for i, row in enumerate(reduced.data):\n if sum(row) == 0 and sol[i] != 0:\n raise ValueError('system is not consistent and \\\n sol is not 0 at pivotless row')\n\n return Vector(sol)", "def solver(I, f, c, U_0, U_L, L, n, dt, tstop,\n user_action=None, version='scalar'):\n import time\n t0 = time.clock()\n \n dx = L/float(n)\n x = linspace(0, L, n+1) # grid points in x dir\n if dt <= 0: dt = dx/float(c) # max time step?\n\n u = zeros(n+1) # NumPy solution array\n rhs = u.copy()\n \n # set initial condition (pointwise - allows straight if-tests):\n t = 0.0\n u = ic_scalar(u, x, I, U_0, U_L)\n with_bc = False #True\n F = WaveRHS(rhs, u, c, f, x, n, dx, version, with_bc, U_0, U_L)\n solver = ODESolver(F, dt, u, user_action, [x])\n solver.timeloop(tstop)\n \n t1 = time.clock()\n return dt, x, t1-t0", "def _solve_complex_unc(self, d, v, a, force):\n nt = force.shape[1]\n pc = self.pc\n if self.rbsize:\n # solve:\n # for i in range(nt-1):\n # drb[:, i+1] = drb[:, i] + G*vrb[:, i] +\n # A*(rbforce[:, i] + rbforce[:, i+1]/2)\n # vrb[:, i+1] = vrb[:, i] + Ap*(rbforce[:, i] +\n # rbforce[:, i+1])\n rb = self.rb\n if self.m is not None:\n if self.unc:\n rbforce = self.imrb * force[rb]\n else:\n rbforce = la.lu_solve(self.imrb, force[rb], check_finite=False)\n else:\n rbforce = force[rb]\n if nt > 1:\n G = pc.G\n A = pc.A\n Ap = pc.Ap\n if self.order == 1:\n AF = A * (rbforce[:, :-1] + rbforce[:, 1:] / 2)\n AFp = Ap * (rbforce[:, :-1] + rbforce[:, 1:])\n else:\n AF = (1.5 * A) * rbforce[:, :-1]\n AFp = (2 * Ap) * rbforce[:, :-1]\n drb = d[rb]\n vrb = v[rb]\n di = drb[:, 0]\n vi = vrb[:, 0]\n for i in range(nt - 1):\n di = drb[:, i + 1] = di + G * vi + AF[:, i]\n vi = vrb[:, i + 1] = vi + AFp[:, i]\n if not self.slices:\n d[rb] = drb\n v[rb] = vrb\n a[rb] = rbforce\n\n if self.ksize and nt > 1:\n self._delconj()\n # solve:\n # for i in range(nt-1):\n # u[:, i+1] = Fe*u[:, i] + Ae*w[:, i] + Be*w[:, i+1]\n Fe = pc.Fe\n Ae = pc.Ae\n Be = pc.Be\n ur_d = pc.ur_d\n ur_v = pc.ur_v\n rur_d = pc.rur_d\n iur_d = pc.iur_d\n rur_v = pc.rur_v\n iur_v = pc.iur_v\n ur_inv_d = pc.ur_inv_d\n ur_inv_v = pc.ur_inv_v\n\n kdof = self.kdof\n if self.m is not None:\n if self.unc:\n imf = self.invm * force[kdof]\n else:\n imf = la.lu_solve(self.invm, force[kdof], check_finite=False)\n else:\n imf = force[kdof]\n w = ur_inv_v @ imf\n if self.order == 1:\n ABF = Ae[:, None] * w[:, :-1] + Be[:, None] * w[:, 1:]\n else:\n ABF = (Ae + Be)[:, None] * w[:, :-1]\n\n y = np.empty((ur_inv_v.shape[0], nt), complex, order=\"F\")\n di = y[:, 0] = ur_inv_v @ v[kdof, 0] + ur_inv_d @ d[kdof, 0]\n for i in range(nt - 1):\n di = y[:, i + 1] = Fe * di + ABF[:, i]\n if self.systype is float:\n # Can do real math for recovery. Note that the\n # imaginary part of 'd' and 'v' would be zero if no\n # modes were deleted of the complex conjugate pairs.\n # The real part is correct however, and that's all we\n # need.\n ry = y[:, 1:].real.copy()\n iy = y[:, 1:].imag.copy()\n d[kdof, 1:] = rur_d @ ry - iur_d @ iy\n v[kdof, 1:] = rur_v @ ry - iur_v @ iy\n else:\n d[kdof, 1:] = ur_d @ y[:, 1:]\n v[kdof, 1:] = ur_v @ y[:, 1:]", "def laplace_2d(x_loc, y_loc, u):\n\n assert u.shape == (x_loc.shape[0], y_loc.shape[0])\n\n dx = x_loc[1:] - x_loc[0:-1]\n dy = y_loc[1:] - y_loc[0:-1]\n\n u_sol = u.copy()\n error = 1\n while error > 1e-3:\n u = u_sol.copy()\n u_sol[1:-1, 1:-1] = (dy[0:-1] * dy[1:] * (u_sol[2:, 1:-1] + u_sol[0:-2, 1:-1]) \\\n + dx[0:-1] * dx[1:] * (u_sol[1:-1, 2:] + u_sol[1:-1, 0:-2])) \\\n / (2 * (dy[0:-1] * dy[1:] + dx[0:-1] * dx[1:]))\n\n u_sol[0, :] = 0\n u_sol[-1, :] = y_loc\n u_sol[:, 0] = u_sol[:, 1]\n u_sol[:, -1] = u_sol[:, -2]\n\n error = np.abs(np.sum(np.abs(u_sol[:]) - np.abs([u])) / np.sum(np.abs(u[:])))\n return u_sol", "def test_solve_lsap_with_removed_row():\n num_rows = 10\n num_cols = 500\n num_rounds = 100\n\n for i in range(num_rounds):\n # Note that here we set all costs to integer values, which might\n # lead to existence of multiple solutions.\n cost_matrix = np.random.randint(10, size=(num_rows, num_cols))\n cost_matrix = cost_matrix.astype(np.double)\n\n removed_row = random.randint(0, num_rows - 1)\n row_idx_1, col_idx_1 = linear_sum_assignment(cost_matrix)\n\n # Get the submatrix with the removed row\n sub_cost_matrix = cost_matrix[~one_hot(removed_row, num_rows), :]\n sub_row_idx_1, sub_col_idx_1 = linear_sum_assignment(sub_cost_matrix)\n\n # Solve the problem with dynamic algorithm\n row4col, col4row, u, v = lap._solve(cost_matrix)\n assert (\n np.array_equal(col_idx_1, col4row)\n or cost_matrix[row_idx_1, col_idx_1].sum()\n == cost_matrix[row_idx_1, col4row].sum()\n )\n\n lap.solve_lsap_with_removed_row(cost_matrix, removed_row, row4col, col4row, v)\n assert (\n np.array_equal(sub_col_idx_1, col4row[~one_hot(removed_row, num_rows)])\n or sub_cost_matrix[sub_row_idx_1, sub_col_idx_1].sum()\n == cost_matrix[\n ~one_hot(removed_row, num_rows),\n col4row[~one_hot(removed_row, num_rows)],\n ].sum()\n )", "def resolves_matrix(self):\n self.P = np.linalg.solve(self.M, self.f)", "def solve_cholesky(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape)\n return jax.scipy.linalg.solve(A, b, sym_pos=True)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape)\n return jax.scipy.linalg.solve(A, b.ravel(), sym_pos=True).reshape(*b.shape)\n else:\n raise NotImplementedError", "def __solve_alternative_linear_problem(self, user):\n result = [0] * self.layout_slots\n de_rand_approach = \"greedy\"\n bins_per_category = []\n bins_cardinality = []\n for _ in range(len(self.categories)):\n bins_per_category.append([])\n bins_cardinality.append([])\n\n for cat in range(len(self.categories)):\n for _ in range(len(self.news_row_pivots) + 1):\n bins_per_category[cat].append([])\n bins_cardinality[cat].append([])\n for _ in range(len(self.news_column_pivots) + 1):\n bins_per_category[cat][-1].append([])\n bins_cardinality[cat][-1].append(0)\n\n for news in self.news_pool:\n category_index = self.categories.index(news.news_category)\n x, y = self.__compute_position_in_learning_matrix(user=user, news=news)\n bins_per_category[category_index][x][y].append(news)\n bins_cardinality[category_index][x][y] += 1\n\n index = 0\n bin_samples = []\n for cat in range(len(self.categories)):\n for x in range(len(self.news_row_pivots) + 1):\n for y in range(len(self.news_column_pivots) + 1):\n if (y == 0) and (x != 0):\n continue\n self.alt_B[index] = min(bins_cardinality[cat][x][y], self.layout_slots)\n index += 1\n try:\n selected_news = np.random.choice(bins_per_category[cat][x][y])\n self.sample_quality(selected_news, user, interest_decay=True)\n bin_samples += [selected_news.sampled_quality] * self.layout_slots\n except ValueError:\n bin_samples += [0] * self.layout_slots\n\n self.alt_C = np.array(list(np.array(self.alt_lambdas) * bin_samples)) * -1\n linear_problem = opt.linprog(A_ub=self.alt_A, b_ub=self.alt_B, c=self.alt_C)\n\n # FOR EACH SLOT, ISOLATES THE CORRESPONDING VARIABLES\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n slot_promenances = self.real_slot_promenances.copy()\n slot_promenances_norm = np.array(slot_promenances) / sum(slot_promenances)\n slots_nr = [s for s in range(0, self.layout_slots)]\n for i in range(self.layout_slots):\n if de_rand_approach == \"ordered\":\n k = i\n elif (de_rand_approach == \"greedy\") or (de_rand_approach == \"greedy_max\"):\n k = np.argmax(slot_promenances)\n slot_promenances[k] = 0\n elif de_rand_approach == \"randomized\":\n k = np.random.choice(slots_nr, p=slot_promenances_norm)\n slot_promenances[k] = 0\n else:\n raise RuntimeError(\"De_randomization approach not recognized. Try either 'ordered', 'greedy', \"\n \"'randomized' or 'greedy_max'.\")\n\n target_slot_probabilities = [x for x in slots_assegnation_probabilities[k]]\n target_slot_probabilities_norm = np.array(target_slot_probabilities) / sum(target_slot_probabilities)\n if de_rand_approach == \"greedy_max\":\n assigning_bin_index = np.argmax(target_slot_probabilities)\n cat_index = int(assigning_bin_index / self.num_of_bins)\n x = self.bins_for_position[int(assigning_bin_index)][0]\n y = self.bins_for_position[int(assigning_bin_index)][1]\n\n else:\n assigning_bin = np.random.choice([x for x in range(len(slots_assegnation_probabilities[k]))], p=target_slot_probabilities_norm)\n cat_index = int(assigning_bin / self.num_of_bins)\n x = self.bins_for_position[int(assigning_bin)][0]\n y = self.bins_for_position[int(assigning_bin)][1]\n\n result[k] = np.random.choice(bins_per_category[cat_index][x][y])\n\n return result", "def solve_lsqr(self, b, rho=None, v=None, x_init=None, options=None):\n\n # Add additional linear terms for the rho terms\n sizev = 0\n if rho is not None:\n vf = v.flatten() * np.sqrt(rho / 2.0)\n sizeb = self.K.input_size\n sizev = np.prod(v.shape)\n b = np.hstack((b, vf))\n\n input_data = np.zeros(self.K.input_size)\n output_data = np.zeros(self.K.output_size + sizev)\n\n def matvec(x, output_data):\n if rho is None:\n # Traverse compgraph\n self.K.forward(x, output_data)\n else:\n # Compgraph and additional terms\n self.K.forward(x, output_data[0:0 + sizeb])\n np.copyto(output_data[sizeb:sizeb + sizev],\n x * np.sqrt(rho / 2.0))\n\n return output_data\n\n def rmatvec(y, input_data):\n if rho is None:\n self.K.adjoint(y, input_data)\n else:\n self.K.adjoint(y[0:0 + sizeb], input_data)\n input_data += y[sizeb:sizeb + sizev] * np.sqrt(rho / 2.0)\n\n return input_data\n\n # Define linear operator\n def matvecComp(x):\n return matvec(x, output_data)\n\n def rmatvecComp(y):\n return rmatvec(y, input_data)\n\n K = LinearOperator((self.K.output_size + sizev, self.K.input_size),\n matvecComp, rmatvecComp)\n\n # Options\n if options is None:\n # Default options\n return lsqr(K, b)[0]\n else:\n if not isinstance(options, lsqr_options):\n raise Exception(\"Invalid LSQR options.\")\n return lsqr(K,\n b,\n atol=options.atol,\n btol=options.btol,\n show=options.show,\n iter_lim=options.iter_lim)[0]", "def test_triangular_checks(self):\n A = np.random.rand(10, 10)\n MA = to_matrix(A)\n L, U = MA.decomposeLU()\n self.assertTrue(L.is_lower_triangular())\n self.assertTrue(U.is_upper_triangular())", "def lfunc(x,u):\n return mpc.mtimes(u.T, R, u) + mpc.mtimes((x-goal).T, Q, (x-goal))", "def solve(self, x, y):\n\t\tx = np.concatenate((np.ones([x.shape[0], 1]), x), axis=1)\n\t\txtx = np.dot(x.T, x)\n\t\txty = np.dot(y, x)\n\t\tself.w = np.dot(np.linalg.inv(xtx), xty.T)", "def _solve(self):\n B = sp.linalg.solve(self._XtX, self._XtY, assume_a='pos', overwrite_a=False, overwrite_b=False)\n self.coef_ = B[1:]\n self.intercept_ = B[0]\n self.is_fitted_ = True", "def write_ldl_lsolve(f, variables):\n\n data = variables['data']\n priv = variables['priv']\n Lp = priv['L']['p']\n\n f.write(\"void LDL_lsolve(LDL_int n, c_float X [ ], LDL_int Lp [ ]\")\n f.write(\", LDL_int Li [ ], c_float Lx [ ]){\\n\")\n f.write(\"LDL_int p;\\n\")\n\n # Unroll for loop\n for j in range(data['m'] + data['n']):\n if Lp[j+1] > Lp[j]: # Write loop ONLY if necessary\n f.write(\"for (p = %i ; p < %i ; p++){\\n\" % (Lp[j], Lp[j+1]))\n f.write(\"X [Li [p]] -= Lx [p] * X [%i];\\n\" % (j))\n f.write(\"}\\n\")\n\n # Close function\n f.write(\"}\\n\\n\")", "def leastsquares(A,b,qr=qrfact.qri_mgs_piv,alpha=0.5):\n \n\n A = numpy.array(A, dtype=float)\n m,n = A.shape\n z = numpy.zeros( n )\n a = numpy.zeros( n )\n x = numpy.zeros( n )\n b = numpy.transpose(b)[0]\n\n # do the QR factorization\n try:\n Q,R = qr(A)[:2] # Some QR routines return a third permutation P solving AP=QR.\n PA = A\n except TypeError:\n Q,R,P = qr(A,alpha)[:3] # Some QR routines return a third permutation P solving AP=QR.\n AP = numpy.dot( A, P )\n\n # Step 1'': orthogonalization of b against Q\n u = b\n for j in range( 0, n ) :\n # print \"Qj = \", Q[:,j]\n # print \"u = \", u\n # print \"dot = \", numpy.dot( Q[:,j], u )\n z[j] = numpy.dot( Q[:,j], u )\n u = u - z[j] * Q[:,j]\n\n # Step 2'': iterative orthogonalization of u\n ul2norm = numpy.linalg.norm( u )\n ii = 0\n while True : # iterate\n for j in range( 0, n ) :\n a[j] = numpy.dot( Q[:,j], u )\n z[j] = z[j] + a[j]\n u = u - a[j] * Q[:,j]\n\n ii = ii + 1\n ulnorm = ul2norm\n ul2norm = numpy.linalg.norm( u )\n\n #print ul2norm, ulnorm\n \n if (ul2norm > alpha * ulnorm) or ul2norm == 0 :\n # print \"used\", ii, \"orthogonalizations\"\n break\n\n #print z\n #print R\n\n # Step 3'': use back substitution to solve Rx = z\n for i in range( n-1, -1, -1 ) :\n x[i] = z[i]\n for j in range( i+1, n ) :\n x[i] = x[i] - R[i,j] * x[j]\n x[i] = x[i] / R[i,i]\n #print x\n\n #need to permute x according to permutation matrix P\n \n return numpy.dot( P, x )" ]
[ "0.8204323", "0.77781606", "0.7435162", "0.7326853", "0.71451354", "0.7074888", "0.6962283", "0.69174", "0.6882864", "0.6877735", "0.68558407", "0.6839501", "0.67079276", "0.6632435", "0.6581103", "0.6516492", "0.6455007", "0.63229835", "0.62904537", "0.62402534", "0.6206865", "0.61846966", "0.6057433", "0.60542107", "0.5929366", "0.58872217", "0.5857919", "0.5856226", "0.5809597", "0.5795168", "0.5794811", "0.57896155", "0.57884324", "0.57821834", "0.5753448", "0.5739651", "0.57390624", "0.5732068", "0.5727023", "0.5700012", "0.56929606", "0.5657974", "0.565073", "0.56261116", "0.56234723", "0.5595026", "0.5594619", "0.55940664", "0.55834883", "0.5549564", "0.5544687", "0.5538217", "0.5485445", "0.5465909", "0.54576606", "0.5456828", "0.541641", "0.53993976", "0.539743", "0.5394006", "0.5385716", "0.5378614", "0.53759724", "0.53716606", "0.5366472", "0.53540313", "0.53504926", "0.5343782", "0.5340893", "0.53399247", "0.53370917", "0.5329035", "0.531435", "0.53119457", "0.53065926", "0.5305252", "0.5288922", "0.5287085", "0.5285799", "0.52805096", "0.5276925", "0.5272319", "0.5268464", "0.52556527", "0.5251243", "0.5250765", "0.52489376", "0.5225413", "0.5216995", "0.520225", "0.52022475", "0.5201454", "0.5192061", "0.5182462", "0.5180259", "0.5178878", "0.51763827", "0.5175552", "0.5173067", "0.51681954" ]
0.79200137
1
Starting offset of the segment
def start(self): return self.start_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_offset(self):\n return self.get_info_value(\"D_STARTOFFS\")", "def offset_from_start(self, part):\n index = self.parts.index(part)\n return sum([p.length for p in self.parts[:index]])", "def min_offset(self):\n return self.offset", "def offset_segment(self, i):\n return self.segments[i % len(self.segments)]", "def get_initial_point(self):\r\n if isinstance(self.pieces[0], LineSegment):\r\n return self.pieces[0].start", "def getStart(self):\n return _libsbml.LineSegment_getStart(self)", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def get_pre_start_coordinate(self):\r\n if self.__orientation == Direction.VERTICAL:\r\n pre_start_coordinate = (self.__location[0] - 1,\r\n self.__location[1])\r\n if self.__orientation == Direction.HORIZONTAL:\r\n pre_start_coordinate = (self.__location[0],\r\n self.__location[1] - 1)\r\n return pre_start_coordinate", "def offset(self):\r\n return self.buf[0].unib[9:11]", "def get_start_point(self):\n return self.first_point", "def offset(self, start):\n if self.out_start is not None:\n self.out_start += start\n if self.start is not None:\n self.start += start\n if self.in_start is not None:\n self.in_start += start\n if self.in_end is not None:\n self.in_end += start\n if self.end is not None:\n self.end += start\n if self.out_end is not None:\n self.out_end += start", "def OldStartingIndex(self) -> int:", "def start(self) -> pos.Pos:\n return self.__start", "def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')", "def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')", "def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')", "def offset(self):\n return self.__offset", "def offset(self):\n return self.__offset", "def begining_of_line():\r\n set_point(point().begining_of_line())", "def start(self):\n return self.__start_line", "def get_line_start(self):\n return self._line_start", "def starting_position(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"starting_position\")", "def get_offset(self):\n return self.offset", "def _get_start(self, variant, reference_start, cigar, ignore_softclip=False):\n indels = get_indel_from_cigar(cigar, ignore_softclip)\n start = variant.POS - reference_start - 1\n # for pos, val in indels.iteritems(): # python2\n for pos, val in indels.items():\n if pos > start:\n break\n if val[0] == 'I':\n start += val[1]\n elif val[0] == 'D':\n start -= val[1]\n return start", "def _chunk_start(c):\n start = None\n if isinstance(c, list):\n for e in c:\n if start is None or e.offset < start:\n start = e.offset\n else:\n start = c.offset\n return start", "def getStart(self) -> long:\n ...", "def first_log_entry_offset(self):\n return 0x200", "def get_startline(self):\n return self.get_attribute(\"startline\")", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def start(self, start=None):\n return self.bounds(start)[0]", "def get_begin(self):\n return self.__begin", "def get_start(i,v):\n return i-v[i]-1", "def start_point(self) -> Vec3:\n v = list(self.vertices([self.dxf.start_angle]))\n return v[0]", "def _get_next_offset(self):\n return self.__offset", "def offset(self):\r\n return self._get_instantiation()[3]", "def get_start_loc(self) -> Tuple[int, int]:\n assert self.pos_marker\n return self.pos_marker.working_loc", "def getStartVertex(self):", "def affected_start(self):\n types = {alt.type for alt in self.ALT} # set!\n BAD_MIX = {INS, SV, BND, SYMBOLIC} # don't mix well with others\n if (BAD_MIX & types) and len(types) == 1 and list(types)[0] == INS:\n # Only insertions, return 0-based position right of first base\n return self.POS # right of first base\n else: # Return 0-based start position of first REF base\n return self.POS - 1 # left of first base", "def startOffset(padFile,sampleRate,dateStart,dataColumns=4):\n #bytesPerRecord = dataColumns * 4\n bb, bj, be = fileTimeRange(padFile)\n #print ' start: %s\\ndateStart: %s\\n stop: %s' % ( unixTimeToString(bb), dateStart, unixTimeToString(be) )\n practicalStart = max(stringTimeToUnix(dateStart), bb)\n dateStartOffset = practicalStart - bb # time to skip in first pad file\n startOffsetSamples = int(dateStartOffset * sampleRate - 0.5)\n #startOffsetBytes = startOffsetSamples * bytesPerRecord\n actualStart = bb + startOffsetSamples/float(sampleRate)\n #print 'START OFFSET: samples: %d, bytes: %d, sec: %f' % ( startOffsetSamples, startOffsetBytes, startOffsetSamples/float(sampleRate) )\n #print 'START OFFSET: samples: %d, sec: %f' % ( startOffsetSamples, startOffsetSamples/float(sampleRate) )\n return startOffsetSamples,actualStart", "def getStartAddressOfRawData(self) -> long:\n ...", "def starting_position(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"starting_position\")", "def elemoffset(self):\n return self.offset // self.itemsize", "def lineOffset(self):\n if self.__lineOffset is None:\n self.__lineOffset = self.__offset - self.__source.rfind(\"\\n\", 0, self.__offset) - 1\n\n return self.__lineOffset", "def offset(self):\n\n return self._offset", "def NewStartingIndex(self) -> int:", "def tell(self):\n return self.offset", "def start_index(self):\r\n return (self.per_page * (self.page_number - 1)) + 1", "def start(self) -> global___Pos:", "def offset(self):\n self._fetch_if_needed()\n return self._offset", "def startIndex(self):\n return self._startIndex", "def locus_start(self):\n return int(open(self.locus_file).read().split('\\t')[3])", "def get_alignment_offset(self):\n\n return 0", "def offset_point(self,base, offset):\r\n return (base[0] + offset[0], base[1] + offset[1])", "def segment(self):\n start = self.alignment.matching_function_startpoint(self.idx)\n end = self.alignment.matching_function_endpoint(self.idx)\n return [start, end]", "def set_starting_pos(self):\n if self.start and self.is_unoccupied(*self.start):\n self.current_pos = self.start[:]\n else:\n self.set_random_pos('starting')", "def start_loc(self) -> str:\n return self._start_loc", "def wm_offset(self):\n return self.get_par(\"offset\")", "def _leaf_start(self):\n return (self._bounds_tree.shape[0] + 1) // 2 - 1", "def page_from(self):\n return 0 if self.page_size == 0 else self.page_slice[0] + 1", "def get_startpos(self) -> Dict[AtomKey, numpy.array]:\n ...", "def _get_start(self):\n return self._start", "def top_offset(self):\n raise NotImplementedError", "def StaggeredStart(self):\n\t\treturn self._get_attribute('staggeredStart')", "def offset(self):\n return _PositionD(self._dx, self._dy)", "def loc0(self):\n return slice(self.start - 1, self.end)", "def first_hbin_offset(self):\n return 0x1000", "def calculate_segment_bin_start(startbin, stopbin, nbin, fraction_step=1):\n st = np.arange(startbin, stopbin, int(nbin * fraction_step), dtype=int)\n if st[-1] + nbin > stopbin:\n return st[:-1]\n return st", "def GetCurrentOffset():\r\n return GetData().offsetCurrent", "def starting_mark(height):\n return round(height * LINEAR_RELATION + OFFSET, 2)", "def get(self, offset: int) -> Position:\n line = bisect_right(self.line_starts, offset) - 1\n character = offset - self.line_starts[line]\n return Position(line=line, character=character)", "def seek_to_start_time(self):\n return 0", "def tell(self):\n\n return make_virtual_offset(self._block_start_offset, self._within_block_offset)", "def offset(self, offset):\n raise NotImplementedError(\"This should have been implemented.\")", "def offset(self):\n return self.query.offset", "def offset(self):\n return self.unpack_dword(0x0)", "def get_offset():\n try:\n offset = open(offset_file, 'r+')\n except IOError as e:\n offset = open(offset_file, 'a+')\n o = offset.readline()\n if len(o) == 0 or o == \"\\n\":\n o = 0\n return o\n offset.close()", "def get_start_plus_coordinate(self):\r\n if self.__orientation == Direction.VERTICAL:\r\n start_plus_coordinate = (self.__location[0] + 1,\r\n self.__location[1])\r\n if self.__orientation == Direction.HORIZONTAL:\r\n start_plus_coordinate = (self.__location[0],\r\n self.__location[1] + 1)\r\n return start_plus_coordinate", "def _get_offset(self, lnum, offset):\n start, end = self._get_linespan(lnum)\n length = end - start\n if offset < 0 or offset >= length:\n raise IndexError(\"offset not in 0..%d\" % length)\n\n return start + offset", "def find_offsets(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def get_segment(self):\n return self.segment", "def get_segment(self):\n return self.segment", "def move_to_line_start(self) -> None:\n self.index = self.buffer.get_line_start(self.index)", "def smpte_offset(self) -> int:\n return self.__smpte_offset", "def getStartAndEndCoordinates(alignedSegment):\n return alignedSegment.reference_start, getFirstNonClippedPositionInRead(alignedSegment, readSeq), \\\n alignedSegment.reference_end-1, getLastNonClippedPositionInRead(alignedSegment, readSeq)", "def setStart(self, *args):\n return _libsbml.LineSegment_setStart(self, *args)", "def get_start(self):\n return self._start", "def getOffset(self):\n return _libsbml.Unit_getOffset(self)", "def start_word(self):\n return self._start", "def _calculate_chunk_offsets(self):\n offset = 0\n offsets = []\n for chunk in self.data.iterchunks():\n offsets.append(offset)\n offset += len(chunk)\n return np.array(offsets)", "def find_offset(self,value):\n return self.header.find_offset(value)", "def LOWER_START():\n return 7", "def default_start_index(self):\n return self._default_start_index", "def next_section_start_pos(text, start):\n section_re = re.compile(\"^.*\\n-+$\", re.I|re.MULTILINE) \n next_section = section_re.search(text, start)\n return len(text) if next_section is None else next_section.start()" ]
[ "0.75695366", "0.7171375", "0.7162001", "0.71174693", "0.7113862", "0.7081324", "0.7017677", "0.7017677", "0.7017677", "0.69003886", "0.679636", "0.67469805", "0.66896033", "0.6640042", "0.66054326", "0.6576339", "0.6576339", "0.6576339", "0.6549182", "0.6549182", "0.64778554", "0.64558816", "0.64442253", "0.64342797", "0.6418956", "0.64077985", "0.64049786", "0.6402911", "0.64003646", "0.638865", "0.6385033", "0.6385033", "0.6385033", "0.6385033", "0.6385033", "0.6385033", "0.6385033", "0.6371675", "0.63631934", "0.6361697", "0.6351454", "0.63429856", "0.6318677", "0.6312058", "0.6275796", "0.6260594", "0.62519825", "0.62487715", "0.6223022", "0.62132573", "0.61914384", "0.61911964", "0.61734337", "0.6171127", "0.6162422", "0.6157426", "0.6147855", "0.6137292", "0.6121506", "0.61203235", "0.61059994", "0.6104696", "0.6102388", "0.60867345", "0.6081676", "0.60810596", "0.6057521", "0.60522157", "0.6040938", "0.6039531", "0.60309374", "0.60111266", "0.60066885", "0.5987751", "0.5986817", "0.5983677", "0.5983013", "0.59812325", "0.59808517", "0.5972071", "0.59664786", "0.5956798", "0.5943219", "0.5937088", "0.5922044", "0.5912881", "0.590889", "0.59062207", "0.59062207", "0.5903912", "0.58989215", "0.5881209", "0.58795863", "0.58773816", "0.5875343", "0.5866637", "0.58660454", "0.5863863", "0.58333147", "0.5813235", "0.5812998" ]
0.0
-1
End offset of the segment
def end(self): return self.end_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEnd(self):\n return _libsbml.LineSegment_getEnd(self)", "def end(self):\n return self.start + self.size", "def endaddr(self):\n return self.startaddr + self.size", "def _get_end(self):\n return self._end", "def get_end(self):\n return self.__end", "def getEnd(self) -> long:\n ...", "def end_address(self):\n return self.address + len(self.data)", "def getEndAddressOfRawData(self) -> long:\n ...", "def end(self):\n return self._t0 + self.length", "def get_end(self):\n return self._end", "def end(self) -> pos.Pos:\n return self.__end", "def end(self):\n return self.__end_line", "def lastaddr(self):\n return self.startaddr + self.size - 1", "def end(self):\n return self.range().end()", "def max_offset(self):\n return self.offset + self.filesize - 1", "def get_end_loc(self) -> Tuple[int, int]:\n assert self.pos_marker\n return self.pos_marker.working_loc_after(\n self.raw,\n )", "def __len__(self):\n return self.end - self.begin", "def end(self) -> int:\n if self.is_leaf():\n return KEY_SIZE * 8\n\n return self.data_bytes[ProofPath._Positions.LEN_POS]", "def get_line_end(self):\n return self._line_end", "def bottom_offset(self):\n raise NotImplementedError", "def get_endline(self):\n return self.get_attribute(\"endline\")", "def end (self):\n return self._end if self._end != self.inf else self.e", "def offset(self):\r\n return self.buf[0].unib[9:11]", "def end(self, finish=None):\n return self.bounds(finish=finish)[1]", "def i (self):\n\n return self.end - 1", "def tell(self):\n return self.offset", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def p (self):\n\n return self.end - 1", "def offset_segment(self, i):\n return self.segments[i % len(self.segments)]", "def payload_end(self):\n return self.total_length - 4", "def get_end_plus_coordinate(self):\r\n if self.__orientation == Direction.VERTICAL:\r\n end_plus_coordinate = (self.__location[0] + self.__length,\r\n self.__location[1])\r\n if self.__orientation == Direction.HORIZONTAL:\r\n end_plus_coordinate = (\r\n self.__location[0], self.__location[1] + self.__length)\r\n return end_plus_coordinate", "def test_sv_end_svend():\n # Example:\n # 2 321682 . T <DEL> 6 PASS SVTYPE=DEL;END=321887;SVLEN=-205;CIPOS=-56,20;CIEND=-10,62 GT:GQ 0/1:12\n end = sv_end(pos=321682, alt=\"<DEL>\", svend=321887, svlen=-205)\n assert end == 321886", "def end(self):\n return self.__end", "def end_point(self) -> Vec3:\n v = list(self.vertices([self.dxf.end_angle]))\n return v[0]", "def length(self):\n if self.is_null():\n return 0\n return self.end - self.begin", "def end(self):\n\t\treturn self._end", "def end(self):\n return self._end", "def end(self):\n return self._end", "def end(self):\n return self._end", "def end(self):\n return self._get('end')", "def tell(self):\n return self._offset", "def last_pos(self):\n return self.locs[self.indices[-1], 2:4]", "def get_end(self):\n return self._start + self._duration", "def get_end(self):\n\n return self.end_cycle", "def last_segment(self):\n\t\tseg_sort = sorted(self.segments, key=lambda x: stringutil.extract_numbers(x.filename))\n\t\tif seg_sort:\n\t\t\treturn seg_sort[-1]\n\t\telse:\n\t\t\treturn None", "def getEndExplicitlySet(self):\n return _libsbml.LineSegment_getEndExplicitlySet(self)", "def end(self):\r\n return conf.lib.clang_getRangeEnd(self)", "def _findExonEnd(self, exonRecs, iBlkStart):\n iBlkEnd = iBlkStart + 1\n while (iBlkEnd < len(exonRecs)) and (self._tGapSize(exonRecs, iBlkEnd) < minIntronSize):\n iBlkEnd += 1\n return iBlkEnd, exonRecs[iBlkEnd - 1].end - exonRecs[iBlkStart].start", "def offset(self):\n return self.__offset", "def offset(self):\n return self.__offset", "def affected_end(self):\n types = {alt.type for alt in self.ALT} # set!\n BAD_MIX = {INS, SV, BND, SYMBOLIC} # don't mix well with others\n if (BAD_MIX & types) and len(types) == 1 and list(types)[0] == INS:\n # Only insertions, return 0-based position right of first base\n return self.POS # right of first base\n else: # Return 0-based end position, behind last REF base\n return (self.POS - 1) + len(self.REF)", "def end(self):\n self.set_initial_offset(1e6)", "def end(self):\n return self.properties.get(\"end\", DateTimeTimeZone())", "def setEnd(self, *args):\n return _libsbml.LineSegment_setEnd(self, *args)", "def get_basic_block_end_from_ea( ea ): \r\n\tlastea = ea\r\n\twhile get_first_fcref_from( ea ) == BADADDR and ea != BADADDR and \\\r\n\t\tget_first_fcref_to( get_first_cref_from(ea) ) == BADADDR:\r\n\t\tlastea = ea\r\n\t\tea = get_first_cref_from( ea )\r\n\tif ea == BADADDR:\r\n\t\treturn lastea\r\n\treturn ea", "def offset(self, start):\n if self.out_start is not None:\n self.out_start += start\n if self.start is not None:\n self.start += start\n if self.in_start is not None:\n self.in_start += start\n if self.in_end is not None:\n self.in_end += start\n if self.end is not None:\n self.end += start\n if self.out_end is not None:\n self.out_end += start", "def max_pos(self, start, end, header) -> int:", "def symbolic_end(self):\n return self.symbolic_bounds[1]", "def end(self, end: pos.Pos) -> None:\n self.__end = end", "def end(self) -> pdarray:\n return self._ends", "def test_sv_end_bnd():\n # Example:\n # 2\t321681\tbnd_W\tG\tG]17:198982]\t6\tPASS\tSVTYPE=BND;MATEID=bnd_Y\tGT\t0/1\n end = sv_end(pos=321681, alt=ALT, svend=None, svlen=None)\n assert end == 198981", "def GetSRange(self):\n ...", "def segment_n(self):\n return len(self.segment_lengths)", "def at(self):\n return self.data[self.end]", "def page_end(self):\n self._npos = self._npages - 1\n self.display()", "def get_segment(self):\n return self.segment", "def get_segment(self):\n return self.segment", "def end(self):\n if \"end\" in self._prop_dict:\n if isinstance(self._prop_dict[\"end\"], OneDriveObjectBase):\n return self._prop_dict[\"end\"]\n else :\n self._prop_dict[\"end\"] = DateTimeTimeZone(self._prop_dict[\"end\"])\n return self._prop_dict[\"end\"]\n\n return None", "def inc_end(self):\n return self._exc_end - TIMERANGE_PRECISION", "def offset_from_start(self, part):\n index = self.parts.index(part)\n return sum([p.length for p in self.parts[:index]])", "def last_hbin_offset(self):\n from warnings import warn\n warn(\"last_hbin_offset is obsolete, use hbins_size instead!\")\n return self.unpack_dword(0x28)", "def last(self):\n return self._make_position(self._trailer._prev)", "def end(self) -> Vertex:\n return self._end", "def get_end_point_marker(self) -> PositionMarker:\n assert self.pos_marker\n return self.pos_marker.end_point_marker()", "def PageEnd(self, default=None):\n return self.data.get('page_end', default)", "def PageEnd(self, default=None):\n return self.data.get('page_end', default)", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def end_of_line():\r\n set_point(point().end_of_line())", "def _chunk_end(c):\n end = None\n if isinstance(c, list):\n for e in c:\n if end is None or e.offset + e.length > end:\n end = e.offset + e.length\n else:\n end = c.offset + c.length\n return end", "def get_end_vertex(self):\n\n return self._end_vertex", "def get_feature_start_end(feature_record):\n return (feature_record.location.start.position+1, feature_record.location.end.position)", "def get_last_seg(*args):\n return _ida_segment.get_last_seg(*args)", "def height(self):\n return abs(self.end[1] - self.start[1])", "def position_last(self):\n return self._position_last", "def start_offset(self):\n return self.get_info_value(\"D_STARTOFFS\")", "def __len__(self):\n if self.e > self.s: return self.e - self.s + self.len_tail()\n else: return self.memory.size - self.s + self.e + self.len_tail()\n\n # overwrite build in functions to add annotations", "def elemoffset(self):\n return self.offset // self.itemsize", "def data_offset(self):\n return self._offset + 0x4", "def get_ends(self): \n return self.last_words", "def _distance_to_last_event(self, step):\n if self.last_off is None:\n raise ValueError('No events in the stream')\n return step - self.offset - self.last_off", "def epoch_end(self):\n pass", "def tell(self):\n\n return make_virtual_offset(self._block_start_offset, self._within_block_offset)" ]
[ "0.73068535", "0.72605735", "0.72010446", "0.699925", "0.6922253", "0.6915124", "0.68964475", "0.6843549", "0.68276006", "0.6826976", "0.67586005", "0.6580891", "0.6480106", "0.64694715", "0.64635617", "0.6447487", "0.64299506", "0.6396373", "0.63472545", "0.63421696", "0.6340457", "0.6335594", "0.62650776", "0.6253964", "0.6220686", "0.61957794", "0.6178688", "0.6178688", "0.6178688", "0.6175778", "0.61500037", "0.6118137", "0.6111716", "0.6086628", "0.6070568", "0.6064888", "0.6051911", "0.6040022", "0.6029637", "0.6029637", "0.6029637", "0.59975", "0.5978997", "0.5977365", "0.59707415", "0.5961127", "0.59542185", "0.5936606", "0.5933485", "0.5929176", "0.59185", "0.59185", "0.5915115", "0.5914364", "0.58788824", "0.586396", "0.5857188", "0.58444566", "0.584204", "0.5821283", "0.5820167", "0.58167976", "0.5809139", "0.57852787", "0.5783505", "0.57786465", "0.57781667", "0.5778083", "0.5778083", "0.57763344", "0.57733864", "0.5763945", "0.57621145", "0.5749069", "0.57377267", "0.57358557", "0.5734165", "0.5734165", "0.57340544", "0.57340544", "0.57340544", "0.57340544", "0.57340544", "0.57340544", "0.57340544", "0.57314277", "0.57184166", "0.57170546", "0.57163614", "0.5698255", "0.56835437", "0.5683414", "0.5681055", "0.5676068", "0.5662634", "0.5659555", "0.5658505", "0.56464314", "0.56447357", "0.56344956" ]
0.6141078
31
Name of the tag
def name(self): return self.name_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tag(self) -> str:\n return self.name or ''", "def get_tag_name(self):\n\n pass", "def tag_name(self) -> str:\n return pulumi.get(self, \"tag_name\")", "def get_name(self):\n return self.tagnode", "def tag_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"tag_name\")", "def tag(self) -> str:\n return pulumi.get(self, \"tag\")", "def tag(self) -> str:\n return self._tag", "def name(self):\r\n pass", "def name(self):\n pass", "def get_name_tag(obj):\n if 'Name' in obj.tags:\n return obj.tags['Name']\n else:\n return \"\"", "def tag(cls):\n pass", "def name(self, name):\n pass", "def tag(self):\n return self._tag", "def wantsNametag(self):\n return 0", "def tag(self):\n return self._tag", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n return self.doc.get('name', self.identifier())", "def name_type(self):\n return self.tag(\"name_type\")", "def tag(self,name):\n return self._tags.get(name,None)", "def name(self):\n ...", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def name(self) -> str:\n ...", "def name(self) -> str:\n ...", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def tag(self):\n return self.tag_", "def name ( self ) :\n return self.__name if self.__name else ''", "def get_tag_name(self, xml):\r\n tag = etree.fromstring(xml).tag\r\n return tag", "def get_name_tag(obj):\n if 'Name' in obj.tags:\n return obj.tags['Name']\n else:\n return \"\"", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self):\n raise NotImplementedError # pragma: no cover", "def name(self): \n\t\treturn self._name", "def tag(self) -> 'genmod.Tag':\n return self._generation.tag", "def name(self):\n\t\treturn self.name_", "def name(self):\n raise NotImplementedError()", "def name(self):\n raise NotImplementedError()", "def name(self, name):\n return self.name", "def name(self) -> \"str\":\n return self._attrs.get(\"name\")", "def name(self) -> \"str\":\n return self._attrs.get(\"name\")", "def name(self) -> \"str\":\n return self._attrs.get(\"name\")", "def get_name(self):\n pass", "def get_name(self):\n pass", "def name(self) -> str: # pragma: no cover", "def name(self):\n return self._name", "def name(self):\n\t\treturn self._name", "def name(self):\n\t\treturn self._name", "def name():\n pass", "def name():\n pass", "def getName(self):", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self) -> str:\n raise NotImplementedError", "def name(self):\r\n return None", "def Name(self):\r\n\t\treturn self._get_attribute('name')", "def get_name(self):", "def get_name(self):", "def name(self):\n cSld = self._element.cSld\n return cSld.get('name', default='')", "def name(self):\n return None", "def name(self) -> str:\n\t\traise NotImplementedError", "def tag(self):\n\n return self._tag", "def getName(self):\n return _libsbml.XMLToken_getName(self)", "def get_name(self):\n return", "def name(self):\n return self[\"name\"]", "def name(self):\r\n return self._name", "def name(self):\r\n return self._name", "def name(self):\r\n return self._name", "def name(self):\r\n return self._name", "def name(self):\r\n return self._name", "def name(self):\r\n return self._name" ]
[ "0.8629432", "0.856235", "0.84365237", "0.80975956", "0.8038163", "0.7732518", "0.764949", "0.76234204", "0.7597434", "0.7510624", "0.74376386", "0.74048376", "0.7393784", "0.73702985", "0.73563516", "0.7353813", "0.7353813", "0.7353813", "0.7353813", "0.7344953", "0.73428065", "0.7325516", "0.73175645", "0.73166806", "0.73166806", "0.73166806", "0.73166806", "0.73166806", "0.73166806", "0.731018", "0.731018", "0.7276557", "0.7276557", "0.7276557", "0.7276557", "0.72753125", "0.72753125", "0.72753125", "0.72753125", "0.72753125", "0.72753125", "0.72753125", "0.72753125", "0.72753125", "0.72753125", "0.72688204", "0.7262401", "0.7244092", "0.7233988", "0.7219562", "0.7219562", "0.7219562", "0.7219562", "0.7219562", "0.719159", "0.7169646", "0.7147433", "0.71352446", "0.71174514", "0.71174514", "0.71131676", "0.71100366", "0.71100366", "0.71100366", "0.71058166", "0.71058166", "0.7103272", "0.7102933", "0.710225", "0.710225", "0.70979863", "0.70979863", "0.70973885", "0.7063864", "0.7063864", "0.7063864", "0.7063864", "0.7063864", "0.7063864", "0.7063864", "0.7063864", "0.7063864", "0.7063864", "0.7048014", "0.70208365", "0.7020709", "0.70171666", "0.70171666", "0.7015356", "0.7004238", "0.70017296", "0.6999793", "0.69914037", "0.69906926", "0.6988755", "0.6985948", "0.6985948", "0.6985948", "0.6985948", "0.6985948", "0.6985948" ]
0.0
-1
Value of the tag
def tag(self): return self.tag_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tag_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tag_value\")", "def get(self):\n return self.tag.get()", "def value(self):\n return self.raw.get_attribute(\"value\")", "def value(self):\n return self.element.get_attribute('value')", "def value(self):\n return self.element.get_attribute('value')", "def value(self):\n return self.element.get_attribute('value')", "def value(self):\n return self.get_attribute(\"value\", str(self.children))", "def getValue(self):\n return self.text()", "def value(self):\n\n return self.element().get_attribute('value') if self.exists() else ''", "def value(self):\n return self.get_data(\"value\")", "def value(self):\n return self.node_value", "def read_value(self):\n return self.load_attr(\"value\")", "def value(self, tag, default=None):\n element = self._root.find(tag)\n if element is not None:\n return convert_to_primitive(element.text)\n return default", "def value(self) -> str:\n return self[\"Value\"]", "def _value(self):\n if self.data:\n # Display tags as a comma-separated list.\n return ','.join([tag.name for tag in self.data])\n\n return ''", "def Value(self) -> str:", "def tag(self):\n return self._tag", "def value(self) -> str:\n return self.textContent", "def value(self):\r\n return self._data['value']", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self) -> str:\n return pulumi.get(self, \"value\")", "def value(self):\n return self.string", "def _get_tagged_value(self, key):\n return self._tagged_values_dict[key]", "def tag(self):\n return self._tag", "def get_value(self):\n pass", "def value(self):\n\n\t\treturn self.__value", "def getval(self):\r\n return self.value", "def value(self) -> Sequence[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Sequence[str]:\n return pulumi.get(self, \"value\")", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def getValue(self):\n return self.field.text()", "def value(self) -> str:\n return self._value", "def value(self) -> str:\n return self._value", "def value(self) -> str:\n return self._value", "def getValue(self):\n return self.value", "def value(self):\n return self.__value", "def value(self):\n return self.__value", "def get_value(self):", "def get_tag(self):\n return self.tag", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def tag(self) -> str:\n return pulumi.get(self, \"tag\")", "def value(self):\n return self.value()._value", "def value(self):\n if hasattr(self, '_m_value'):\n return self._m_value if hasattr(self, '_m_value') else None\n\n self._m_value = self.lookup_table[(self.tag - 75)]\n return self._m_value if hasattr(self, '_m_value') else None", "def value (self):\r\n return self.entry.get()", "def getValue(self):\n return self.field.currentText()", "def value (self) :\n\n return self.__value__", "def get_value(self, data):\n value = data['value']\n return value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def getValue(self):\n return self.value", "def get(self):\n return self.match.group(\"value\")", "def getvalue(self):\n return str(self.data)", "def value(self):\n return self['input'].value()", "def value(self):\n return self._value_", "def value(self):\n return self._val", "def value(self):\n return str(self.input.text())", "def get_value(self):\n return self._value" ]
[ "0.7868648", "0.764538", "0.76248944", "0.7556161", "0.7556161", "0.7556161", "0.747307", "0.74238324", "0.74004674", "0.7335146", "0.730605", "0.7223917", "0.72173995", "0.7187691", "0.71812", "0.7105039", "0.70779276", "0.7067446", "0.70370305", "0.70255345", "0.70255345", "0.70255345", "0.70255345", "0.70255345", "0.70255345", "0.70255345", "0.70255345", "0.70255345", "0.70255345", "0.70255345", "0.70255345", "0.70255345", "0.70255345", "0.7017001", "0.69997185", "0.69717973", "0.6970137", "0.6952174", "0.6924803", "0.69167507", "0.69167507", "0.6908055", "0.6908055", "0.6908055", "0.68991095", "0.68797874", "0.68797874", "0.68797874", "0.6876776", "0.68737537", "0.68737537", "0.685913", "0.684301", "0.6841991", "0.6841991", "0.6840795", "0.6840795", "0.68347377", "0.6833244", "0.6815617", "0.68145835", "0.6808624", "0.67988294", "0.67983615", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67882293", "0.67865705", "0.6780483", "0.67772865", "0.67707044", "0.6768658", "0.67572635", "0.6744409", "0.672599" ]
0.6916473
41
Computes the Jacobian of y wrt x assuming minibatchmode.
def _get_minibatch_jacobian(y, x, create_graph=False): assert y.shape[0] == x.shape[0] y = y.view(y.shape[0], -1) # Compute Jacobian row by row. jac = [] for j in range(y.shape[1]): dy_j_dx = torch.autograd.grad(y[:, j], x, torch.ones_like(y[:, j]), retain_graph=True, create_graph=True)[0].view(x.shape[0], -1) jac.append(torch.unsqueeze(dy_j_dx, 1)) jac = torch.cat(jac, 1) return jac
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_minibatch_jacobian(y, x):\n assert y.shape[0] == x.shape[0]\n y = y.view(y.shape[0], -1)\n\n # Compute Jacobian row by row.\n jac = []\n for j in range(y.shape[1]):\n dy_j_dx = torch.autograd.grad(y[:, j], x, torch.ones_like(y[:, j]), retain_graph=True,\n create_graph=True)[0].view(x.shape[0], -1)\n jac.append(torch.unsqueeze(dy_j_dx, 1))\n jac = torch.cat(jac, 1)\n return jac", "def jacobian(self, x):\n pass", "def fd_jacobian(self,y):\n res0 = self.residual(y)\n eps = 1e-6\n dofs = y.shape[0]\n jac_approx = np.zeros((dofs,dofs))\n for i in range(dofs):\n y_temp = np.copy(y)\n y_temp[i]+=eps\n\n r2 = self.residual(y_temp)\n dr = (r2-res0)/eps\n for j in range(dofs):\n jac_approx[j,i] = dr[j]\n \n return jac_approx", "def jacobian(self, x):\n x_ = np.atleast_2d(x)\n if self.normalize:\n x_ = (x_ - self.sample_mean) / self.sample_std\n s_ = (self.samples - self.sample_mean) / self.sample_std\n else:\n s_ = self.samples\n\n fx, jf = self.reg_model(x_)\n rx, drdx = self.corr_model(x=x_, s=s_, params=self.corr_model_params, dx=True)\n y_grad = np.einsum('ikj,jm->ik', jf, self.beta) + np.einsum('ijk,jm->ki', drdx.T, self.gamma)\n if self.normalize:\n y_grad = y_grad * self.value_std / self.sample_std\n if x_.shape[1] == 1:\n y_grad = y_grad.flatten()\n return y_grad", "def jacobian_i(self, x):\n return np.matrix([-x**3, -x**2, -x, -1])", "def jacobian(self,x,y,l,a):\n J = np.zeros([*x.shape,2,2])\n\n J = _jacobian(x,y,l,a,J)\n\n return J", "def jacobian(self,x,p,fun):\n n = self.n\n y = fun(x,p)\n h = 1e-4\n nout = np.size(y)\n dfdx = np.zeros((nout,n))\n for j in range(n):\n dx1 = np.zeros(n)\n dx2 = np.zeros(n)\n dx1[j] = -h\n dx2[j] = h\n dfdx[:,j] = (fun(x+dx2,p)-fun(x+dx1,p))/(2*h)\n return dfdx", "def _compute_theoretical_jacobian(x, x_shape, x_data, dy, dy_shape, dx,\n extra_feed_dict):\n # Complex vectors are treated as vectors of twice as many reals.\n if x.dtype.is_complex:\n x_shape = tuple(x_shape) + (2,)\n dy_factor = 2 if dy.dtype.is_complex else 1\n\n # To compute the jacobian, we treat x and y as one-dimensional vectors.\n x_size = _product(x_shape)\n x_val_size = _product(x_shape[1:]) # This is used for sparse gradients\n dy_size = _product(dy_shape) * dy_factor\n\n # Allocate 2-D Jacobian, with x dimensions smashed into the first\n # dimension and y dimensions smashed into the second.\n jacobian = np.zeros((x_size, dy_size),\n dtype=x.dtype.real_dtype.as_numpy_dtype)\n\n # For each of the entry of dy, we set this to be 1 and\n # everything else to be 0 and compute the backprop -- this will give us one\n # one column of the Jacobian matrix.\n dy_data = np.zeros(dy_shape, dtype=dy.dtype.as_numpy_dtype)\n dy_data_flat = dy_data.ravel().view(dy.dtype.real_dtype.as_numpy_dtype)\n sess = tf.get_default_session()\n for col in range(dy_size):\n dy_data_flat[col] = 1\n if isinstance(dx, tf.IndexedSlices):\n backprop_indices, backprop_values = sess.run(\n [dx.indices, dx.values],\n feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n for i, v in zip(backprop_indices, backprop_values):\n r_begin = i * x_val_size\n r_end = r_begin + x_val_size\n jacobian[r_begin:r_end, col] += v.flat\n else:\n assert isinstance(dx, tf.Tensor), \"dx = \" + str(dx)\n backprop = sess.run(\n dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n jacobian[:, col] = backprop.ravel().view(jacobian.dtype)\n dy_data_flat[col] = 0\n\n # If the output is empty, run the gradients at least once and make sure\n # they produce zeros.\n if not dy_size:\n backprop = sess.run(\n dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n if backprop.shape != x_data.shape:\n raise ValueError(\"Empty gradient has wrong shape: expected %s, got %s\" %\n (x_data.shape, backprop.shape))\n if np.any(backprop):\n raise ValueError(\"Empty tensor with nonzero gradients\")\n\n return jacobian", "def jacobian(f, x, epsilon = 1e-10):\n f_ = f(x)\n value = np.zeros((len(f_), len(x)))\n \n for i in range(len(x)):\n f_ = partial_derivative(f, x, i, epsilon)\n value[:,i] = f_\n\n return value", "def jacobian_c(self, x, out=None, **kwargs):\n return empty_matrix(0, self.nx)", "def JacobianFunction(p,x,y,z):\n \n n = len(x)\n \n J = np.array([ np.ones((n)),x,x**2,y,y**2,x*y ])\n \n return J", "def jacobian(self, x1, x2, out=None):\n raise NotImplementedError", "def jacobian_c(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_c(x, out=out, **kwargs)", "def jacobian(f, x):\n\n B, N = x.shape\n x.requires_grad = True\n in_ = torch.zeros(B, 1)\n \n y = f(in_, x)\n jacobian = list()\n \n for i in range(N):\n v = torch.zeros_like(y)\n v[:, i] = 1.\n dy_i_dx = torch.autograd.grad(y,\n x,\n grad_outputs=v,\n retain_graph=True,\n create_graph=True,\n allow_unused=True)[0] # shape [B, N]\n jacobian.append(dy_i_dx)\n\n jacobian = torch.stack(jacobian, dim=2).requires_grad_()\n\n return jacobian", "def jacobian(self, dt):\n return self._F_cache", "def jacobian(self, dt):\n raise NotImplementedError", "def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)", "def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)", "def numerical_jacobian (fhandle, x, **args):\n \n y = fhandle (x, **args)\n numRows, numCols = (len (y), len (x))\n J = np.zeros ((numRows, numCols))\n\n for col in range (0, numCols):\n xPrime = x.copy ()\n deltaX = max (1e-4*x[col], 1e-6)\n xPrime[col] += deltaX\n yPrime = fhandle (xPrime, **args)\n J[:, col] = (yPrime - y) / deltaX\n\n return J", "def jacobian(f, x, dx):\n x = np.atleast_1d(x)\n dx = np.atleast_1d(dx)\n nx = len(x)\n ny = 0\n jacobi = None\n e = np.zeros(nx)\n for ix in xrange(nx):\n e *= 0\n e[ix] = 1\n deriv = np.atleast_1d((f(x + e * dx) - f(x - e * dx)) / (2 * dx[ix]))\n if ix == 0:\n ny = len(deriv)\n jacobi = np.empty((ny, nx))\n jacobi[:, ix] = deriv\n return jacobi", "def jacobian_g(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out)", "def jacobian(self, c):\n\n raise NotImplementedError", "def EvaluateJacobian(x):\n j = np.zeros((NOBSERVATIONS, 3))\n\n for i in range(NOBSERVATIONS):\n base = np.exp(-x[0] * t[i]) / (x[1] + x[2] * t[i])\n\n j[i][0] = t[i] * base\n j[i][1] = base / (x[1] + x[2] * t[i])\n j[i][2] = base * t[i] / (x[1] + x[2] * t[i])\n\n return j", "def __update_jacobian(self, x, F):\n old_err = _n.seterr(divide='raise')\n\n try:\n y = F - self.F\n s = x - self.x\n\n zt = None\n if self.update_type == BroydenSolver.UPDATE_ICUM:\n maxi = abs(_n.ravel(y)).argmax()\n zt = _n.transpose(_n.zeros((1,self.n), _n.float_))\n zt[0, maxi] = 1\n elif self.update_type == BroydenSolver.UPDATE_GOOD_BROYDEN:\n # (Good) Broyden update\n zt = _n.dot(_n.transpose(s), self.H)\n elif self.update_type == BroydenSolver.UPDATE_BAD_BROYDEN:\n # (Bad) Broyden update\n zt = _n.transpose(y)\n else:\n raise ValueError(\"Unknown update type %s\" % (self.update_type))\n\n self.H = self.H \\\n + _n.dot(s - _n.dot(self.H, y), zt) / _n.dot(zt, y)\n except FloatingPointError:\n warnings.warn(\"%% Broyden reset: singular\", BroydenWarning)\n self.H = _n.identity(self.n) / self.initial_scale\n\n _n.seterr(**old_err)", "def jacobian(x, u):\n yaw = x[2, 0]\n v = u[0, 0]\n jac = np.array([\n [1.0, 0.0, -dt * v * math.sin(yaw), dt * math.cos(yaw)],\n [0.0, 1.0, dt * v * math.cos(yaw), dt * math.sin(yaw)],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n\n return jac", "def jacobian(self, xs):\n rx_list = []\n for nx,x in enumerate(xs):\n \n numpy.testing.assert_array_almost_equal(self.independentVariableShapeList[nx], numpy.shape(x), err_msg = '\\ntaped xs[%d].shape != forward xs[%d]\\n'%(nx,nx))\n rx = numpy.ravel(x)\n rx_list.append(rx)\n self.x = numpy.concatenate(rx_list)\n return wrapped_functions.jacobian(self.tape_tag, self.x)", "def compute_jacobian(self):\n \n d = len(self.theta)\n n,p = self.b.shape\n \n if not self.quiet:\n print \"Running jacobian computation.\"\n print \"D will be a {}x{}x{} array\".format(p,n,d)\n \n if self.x is None:\n raise ValueError('Can not compute Jacobian. self.x is None.')\n \n #print \"n={},n={}\".format(n,d);\n \n D = numpy.zeros((p,n,d))\n \n \n for k in range(d):\n A_k, b_k = self.get_diff_A_b(k)\n \n for i in range(p):\n D[i,:,k] = - self.solver.backsolve(A_k.dot(self.x[:,i]) - b_k[:,i])\n \n return D", "def jacobian_g(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_g(x, out=out, **kwargs)", "def jacobian(self,var,g=None):\n if (g==None):g=self.g\n jac=np.zeros([self.n+1,self.n])\n for i in range(self.n):\n for j in range(self.n):\n if(i==j): jac[i][j]=2.*(var[i]+1.)-g*np.sum([self.XXZ.Z(i,k) for k in range(self.n) if k!=i])\n else: jac[i][j]=g*self.XXZ.Z(i,j)\n for i in range(self.n):\n jac[self.n][i]=1.\n return jac", "def cost(self, X, y) :\n ### ========== TODO : START ========== ###\n # part d: compute J(theta)\n #we know for linear/polynomial regression, the cost is the square of the errors\n X = self.generate_polynomial_features(X)\n y_pred_vector = np.dot(X, self.coef_)\n cost = np.dot((y-y_pred_vector).transpose(),(y-y_pred_vector))#write in the matrix form\n ### ========== TODO : END ========== ###\n return cost", "def transform_and_compute_jacobian(self, xj):\n x = xj[:, :self.d].detach()\n log_j = xj[:, -1]\n\n x.requires_grad = True\n y = self.flow_(x)\n\n n_batch = xj.shape[0]\n\n jx = torch.zeros(n_batch, self.d, self.d).to(log_j.device)\n directions = torch.eye(self.d).to(log_j).unsqueeze(0).repeat(n_batch, 1, 1)\n\n for i in range(self.d):\n jx[:, i, :] = torch.autograd.grad(y, x, directions[:, i, :],\n allow_unused=True, create_graph=True, retain_graph=True)[0]\n x.requires_grad = False\n x.grad = None\n\n log_det_j = torch.log(torch.abs(torch.det(jx)))\n return torch.cat([y.detach(), (log_j + log_det_j).unsqueeze(1)], 1)", "def jacobian(self, b):\n \n # Substitute parameters in partial derivatives\n subs = [pd.subs(zip(self._b, b)) for pd in self._pderivs]\n # Evaluate substituted partial derivatives for all x-values\n vals = [sp.lambdify(self._x, sub, \"numpy\")(self.xvals) for sub in subs]\n # Arrange values in column-major order\n return np.column_stack(vals)", "def jacobian(self, theta, force=False):\n \n # Update the internal solution\n self.solution_update(theta, force)\n \n # Run the internal jacobian calculation\n return self.compute_jacobian()", "def jacobian_func(f):\n jacobian = jacfwd(f)\n return jacobian", "def get_jacobian_spatial(self, qs=None) -> np.ndarray:\n if qs is None:\n qs = self.get_current_joint_position()\n return self.robot.jacob0(qs)", "def costFunction(self, x, y ):\n self.yEst = self.forward_propagate(x)\n sqErrors = ( self.yEst - y ) ** 2\n J = sqErrors.sum() / 2\n return J", "def dJ(theta, x_b, y):\n return x_b.T.dot(self._sigmoid(x_b.dot(theta)) - y) / len(x_b)", "def _calculate_jacobian(self,\n x0: np.ndarray,\n step: float = 10 ** (-6)) -> np.ndarray:\n y0 = self._calculate_residual(x0)\n\n jacobian = []\n for i in enumerate(x0):\n x = x0.copy()\n x[i] += step\n y = self._calculate_residual(x)\n derivative = (y - y0) / step\n jacobian.append(derivative)\n jacobian = np.array(jacobian).T\n\n return jacobian", "def evaluate_jacobian(self, x, V):\n jac = self._numeric_jacobian(x, V, *self.model.params.values())\n return jac", "def calc_jacobian_numerical(model, x, dim, device, eps=1e-6):\n\n # set to eval mode but remember original state\n in_training: bool = model.training\n model.eval() # otherwise we will get 0 gradients\n\n # clone input to avoid problems\n x = x.clone().requires_grad_(True)\n\n # init jacobian\n J = torch.zeros(dim, x.shape[1])\n\n # iterate over input dims and perturb\n for j in range(dim):\n delta = torch.zeros(dim).to(device)\n delta[j] = eps\n J[:, j] = (model(x + delta) - model(x)).abs().mean(0) / (2 * eps)\n\n # reset to original state\n if in_training is True:\n model.train()\n\n return J", "def jacobin(y):\n\n df = np.zeros((3,3))\n\n df[0,0] = 77.27*(1.0 - y(1) -2.*8.375e-6*y(0))\n df[0,1] = 77.27*(1.0 -y(0) )\n df[0,2] = 0.0;\n df[1,0] = -1.0/77.27;\n df[1,1] = (-1.0/77.27)*(1.0+y(0))\n df[1,2] = 1.0/77.27\n df[2,0] = 0.161\n df[2,1] = 0.0\n df[2,2] = -0.161\n\n return df", "def jacobian(kernel: Kern, variable_points: ndarray, fixed_points: ndarray) -> ndarray:\n if isinstance(kernel, RBF):\n lengthscale = kernel.lengthscale.values[0]\n k = kernel.K(variable_points, fixed_points)\n\n # The (i, j, k)-th element of this is the k-th component of X_i - D_j.\n differences = variable_points[:, newaxis, :] - fixed_points[newaxis, :, :]\n\n return -k[:, :, newaxis] * differences / (lengthscale ** 2)\n else:\n raise NotImplementedError", "def _approx_jacobian(func, xbar, epsilons):\n\n n = xbar.shape[0]\n ybar = func(xbar)\n m = ybar.shape[0]\n\n J = np.zeros((m, n))\n \n for i in range(n):\n # Forward evaluation\n xf = np.copy(xbar)\n xf[i] = xbar[i] + epsilons[i]\n yf = func(xf)\n\n # Backward evaluation\n xb = np.copy(xbar)\n xb[i] = xbar[i] - epsilons[i]\n yb = func(xb)\n \n # Slope\n delta = yf - yb\n\n J[:, i] = delta / (2.0 * epsilons[i])\n\n return J", "def jacobian(self, A, B):\r\n\r\n # Compute the derivatives spectrally\r\n A_x_hat = self.calc_derivative(A, 'x')\r\n A_y_hat = self.calc_derivative(A, 'y')\r\n B_x_hat = self.calc_derivative(B, 'x')\r\n B_y_hat = self.calc_derivative(B, 'y')\r\n\r\n # Compute the values in realspace for multiplication\r\n A_x = self.inverse_fft(self.dealias_pad(A_x_hat))\r\n A_y = self.inverse_fft(self.dealias_pad(A_y_hat))\r\n B_y = self.inverse_fft(self.dealias_pad(B_y_hat))\r\n B_x = self.inverse_fft(self.dealias_pad(B_x_hat))\r\n\r\n # Compute the Jacobian\r\n J_canonical = (A_x*B_y) - (B_x*A_y)\r\n\r\n # Return to spectral space the return\r\n return self.dealias_unpad(self.forward_fft(J_canonical))", "def jacobian_numba(coordinates, points, jac, greens_function):\n east, north, upward = coordinates[:]\n point_east, point_north, point_upward = points[:]\n for i in prange(east.size):\n for j in range(point_east.size):\n jac[i, j] = greens_function(\n east[i],\n north[i],\n upward[i],\n point_east[j],\n point_north[j],\n point_upward[j],\n )", "def jacobian(self, dt):\n if dt not in self._F_cache:\n d = self._dimension\n with torch.no_grad():\n F = eye_like(self.sa2, d)\n F[: d // 2, d // 2 :] = dt * eye_like(self.sa2, d // 2)\n self._F_cache[dt] = F\n\n return self._F_cache[dt]", "def newton_jacobian(f, x0, Jf, eps=1e-10):\n # Initialization\n globvar.ncalls = 0\n x = np.copy(x0)\n n = len(x)\n J = np.zeros((n, n), dtype='float64')\n fx = f(x)\n\n # Begin root search\n while True:\n globvar.ncalls += 1\n\n # Calculate Jacobian\n J = Jf(x)\n\n # Decompose and solve using Given's rotations\n decomp(J)\n Dx = -fx\n solve(J, Dx)\n\n # Begin backtracking linesearch\n lamb = 2.0\n while True: \n lamb /= 2\n y = x + Dx * lamb\n fy = f(y)\n\n fynorm = np.linalg.norm(fy)\n fxnorm = np.linalg.norm(fx)\n\n if (fynorm < (1 - lamb / 2) * fxnorm) or (lamb < (1 / 128.0)):\n break\n\n # Save latest approximation\n x = y\n fx = fy\n\n fxnorm = np.linalg.norm(fx)\n if fxnorm < eps:\n break\n\n return x", "def jacobian1(self,A):\r\n\r\n # Compute second derivatives in spectral space\r\n A_x_x_hat = self.calc_derivative(A, 'x', 'x')\r\n A_y_y_hat = self.calc_derivative(A, 'y', 'y')\r\n A_x_y_hat = self.calc_derivative(A, 'x', 'y')\r\n A_y_x_hat = self.calc_derivative(A, 'y', 'x')\r\n\r\n # Compute realspace representations for multiplication\r\n A_x_x = self.inverse_fft(self.dealias_pad(A_x_x_hat))\r\n A_y_y = self.inverse_fft(self.dealias_pad(A_y_y_hat))\r\n A_x_y = self.inverse_fft(self.dealias_pad(A_x_y_hat))\r\n A_y_x = self.inverse_fft(self.dealias_pad(A_y_x_hat))\r\n\r\n # Multiply in realspace\r\n J_canonical = (A_x_x*A_y_y) - (A_x_y*A_y_x)\r\n\r\n # Return to Fourier space and return spectrum\r\n return self.dealias_unpad(self.forward_fft(J_canonical))", "def fit(self, x, y):\n # *** START CODE HERE ***\n num_examples = x.shape[0]\n num_features = x.shape[1]\n iteration = 1\n if self.theta == None:\n self.theta = np.zeros((num_features,))\n while iteration <= self.max_iter:\n h_theta = np.dot(x, self.theta)\n g_theta = self.sigmoid(h_theta)\n J_cost = -np.mean(y*np.log(g_theta) + (1 - y)*np.log(1 - g_theta))\n H = 1/num_examples*(np.dot(np.transpose(g_theta*(1-g_theta))*np.transpose(x), x))\n J_prime = - 1/num_examples*np.dot(np.transpose(y - g_theta), x)\n d_theta = - np.linalg.solve(H, J_prime)\n self.theta += d_theta\n if np.linalg.norm(d_theta, 1) < self.eps:\n break\n if self.verbose:\n print(\"Loss value: \", J_cost)\n iteration += 1\n # *** END CODE HERE ***", "def jacobF(x, u):\n v_x =u[0, 0] \n v_y =u[1, 0] \n jF = np.matrix([ \n [1.0, 0.0, 1, 0],\n [0.0, 1.0, 0, 1],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n return jF", "def dynamax_jaccard(x, y):\n # feature generation\n u = np.vstack((x, y))\n m_x = fuzzify(x, u)\n m_y = fuzzify(y, u)\n # fuzzy jaccard\n m_inter = np.sum(np.minimum(m_x, m_y))\n m_union = np.sum(np.maximum(m_x, m_y))\n return m_inter / m_union", "def _get_jacobian(self):\n srcs, recs = self.srcs, self.recs\n if not self.sparse:\n jac = numpy.array(\n [ttime2d.straight([cell], '', srcs, recs, velocity=1.)\n for cell in self.mesh]).T\n else:\n shoot = ttime2d.straight\n nonzero = []\n extend = nonzero.extend\n for j, c in enumerate(self.mesh):\n extend((i, j, tt)\n for i, tt in enumerate(shoot([c], '', srcs, recs,\n velocity=1.))\n if tt != 0)\n row, col, val = numpy.array(nonzero).T\n shape = (self.ndata, self.nparams)\n jac = scipy.sparse.csr_matrix((val, (row, col)), shape)\n return jac", "def jacobian(self, t, x, u, w):\n a= u[0]\n theta = x[2]\n v = x[3]\n fx = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [-v*np.sin(theta), v*np.cos(theta), 0, 0],\n [np.cos(theta), np.sin(theta), 0, 0]])\n fu = np.array([[0, 0, 0, 1],\n [0, 0, 1, 0]])\n w = w * self.w_scale\n fw = np.array([[np.cos(theta), - np.sin(theta), 0, 0],\n [np.sin(theta), np.cos(theta), 0, 0],\n [0, 0, v, 0],\n [0, 0, 0, v]])\n return [fx, fu, fw]", "def calculate_jacobian(robot_position, landmark_pos):\n\n return None", "def jacobian(Lfrac, Lstar_10, qlf):\n D = np.tile(qlf.c_B*Lstar_10**qlf.k_B, [len(Lfrac),1])\n Lfrac_2D = np.tile(Lfrac, [len(qlf.c_B),1]).T\n return np.sum(-D*Lfrac_2D**qlf.k_B,axis=1) / np.sum(D*(qlf.k_B -1)*Lfrac_2D**qlf.k_B,axis=1)\n #return np.sum(D*(1.+qlf.k_B)*Lfrac_2D**qlf.k_B, axis=1)/np.sum(D*Lfrac_2D**qlf.k_B, axis=1)", "def objective_function(theta, X, y):\n # m number of training instances\n m = X.shape[0]\n jtheta = sum((np.dot(X, theta) - y)**2) / (2.0*m)\n return jtheta", "def _get_J(self, omega, y):\n x = y[:-1]\n newt_lambda = y[-1]\n J = np.zeros([len(x)+1, len(x)+1])\n J[:-1, :-1] = omega + newt_lambda*np.diagflat(1/(x**2))\n J[:-1, -1] = -1/x.ravel()\n J[-1, :-1] = 1\n return J", "def _compute_jacobian(self):\n q_sum = np.cumsum(self._q)\n self._sines = np.sin(q_sum)\n self._cosines = np.cos(q_sum)\n (s_1, s_12, s_123) = self._sines\n (c_1, c_12, c_123) = self._cosines\n self._jacobian = np.array([\n np.cumsum([\n self._jnt_lengths[2] * c_123,\n self._jnt_lengths[1] * c_12,\n self._jnt_lengths[0] * c_1\n ])[::-1], # compute jacobian 1st row\n np.cumsum([\n -self._jnt_lengths[2] * s_123,\n -self._jnt_lengths[1] * s_12,\n -self._jnt_lengths[0] * s_1\n ])[::-1] # jacobian 2nd row\n ])\n self._jacobian_psinv = np.matmul(\n self._jacobian.T,\n np.linalg.inv(np.matmul(self._jacobian, self._jacobian.T))\n )", "def jacobian(self, p):\n delta = 1.\n props = {'density': self.density}\n xp, zp = self.x, self.z\n verts = self.verts\n x, z = p\n jac = np.transpose([\n (talwani.gz(xp, zp, [Polygon(verts + [[x + delta, z]], props)]) -\n talwani.gz(xp, zp, [Polygon(verts + [[x - delta, z]], props)])\n ) / (2. * delta),\n (talwani.gz(xp, zp, [Polygon(verts + [[x, z + delta]], props)]) -\n talwani.gz(xp, zp, [Polygon(verts + [[x, z - delta]], props)])\n ) / (2. * delta)])\n return jac", "def softmax_jacobian_analytic(x, dim):\n y = F.softmax(x, dim)\n y[y != y] = 0 # replace nan-s with zeros\n J = torch.zeros((x.shape[dim],) + tuple(x.shape), dtype=x.dtype, device=x.device)\n si = [slice(None)] * len(y.shape)\n sj = [slice(None)] * len(y.shape)\n s = [slice(None)] * len(J.shape)\n for i in range(y.shape[dim]):\n si[dim] = i\n s[dim + 1] = i\n yi = y[tuple(si)]\n for j in range(y.shape[dim]):\n sj[dim] = j\n s[0] = j\n if i == j:\n J[tuple(s)] = yi * (1 - yi)\n else:\n yj = y[tuple(sj)]\n J[tuple(s)] = - yi * yj\n sj[dim] = slice(None)\n si[dim] = slice(None)\n s[dim + 1] = slice(None)\n return J", "def _reduce(self, x, y):\n coef = self._update_parallel_coef_constraints(x)\n self.coef_ = coef.T", "def jacobian(self, points):\n # check if re-computation of dW/dx can be avoided\n if not np.array_equal(self._cached_points, points):\n # recompute dW/dx, i.e. the relative weight of each point wrt\n # the source landmarks\n self.dW_dX = self.transform.weight_points(points)\n # cache points\n self._cached_points = points\n\n # dX/dp is simply the Jacobian of the model\n dX_dp = self.pdm.model.jacobian\n\n # dW_dX: n_points x n_points x n_dims\n # dX_dp: n_points x n_params x n_dims\n dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)\n # dW_dp: n_points x n_params x n_dims\n\n return dW_dp", "def costFunction(theta, X, y):\n\n # Initialize some useful values\n m = y.size # number of training examples\n J = np.sum(np.array([inner(theta, xi, yi) for xi, yi in zip(X, y)]))\n J /= m\n\n\n return J", "def convertBetaToJacobi(self,y):\n u = 0.5*(self.high+self.low)\n s = 0.5*(self.high-self.low)\n return (y-u)/(s)", "def costFunction(self,theta, X, y): \n m = len(y)\n h = self.sigmoid(X@theta)\n J = 1 / m * (- y.T @ self.log(h) - (1-y).T @ self.log(1-h)) \n # grad = 1/ m * X.T @ (h - y)\n return J", "def newtonJacobian(self,r):\n #x_vec=np.array(r)\n x=r[0]\n y=r[1]\n jacobi=np.zeros([2,2], float)\n \n \n jacobi[0][0]=(4.0*(self.x_0-x)**2.0-2.0)*self.sfunc(x,y)\n jacobi[1][1]=(4.0*(self.y_0-y)**2.0-2.0)*self.sfunc(x,y)\n jacobi[1][0]=4.0*(self.x_0-x)*(self.y_0-y)*self.sfunc(x,y)\n jacobi[0][1]=jacobi[1][0]\n #print \"newton jacobian is \",jacobi\n try:\n return mat.inv(jacobi)\n except:\n print \"singular jacobi not invertable\"\n return 0", "def C(self, y, x):\n return self.minor(y,x).det()*(-1.0)**(y+x+2.0)", "def jacobian(self, p):\n verts = self.p2vertices(p)\n delta = np.array([0, 1])\n jac = np.empty((self.ndata, self.nparams))\n for i in range(self.nparams):\n diff = Polygon([verts[i + 2], verts[i + 1] - delta,\n verts[i], verts[i + 1] + delta], self.props)\n jac[:, i] = talwani.gz(self.x, self.z, [diff])/(2*delta[1])\n return jac", "def y(x):\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4", "def jeffreys(self, x):\n return 1./np.sqrt(x*(1.-x))", "def compute_jacobian(self, finger_id, q0):\n frame_id = self.tip_link_ids[finger_id]\n return pinocchio.computeFrameJacobian(\n self.robot_model,\n self.data,\n q0,\n frame_id,\n pinocchio.ReferenceFrame.LOCAL_WORLD_ALIGNED,\n )", "def __calc_jacobian_matrix(self):\n\n tf_matrix_first_to_last = self.tf_matrices_list[-1]\n self.jacobian_matrix = [diff(tf_matrix_first_to_last[:3, -1], self.q[i]).reshape(1, 3) for i in range(len(self.q))]\n self.jacobian_matrix = Matrix(self.jacobian_matrix).T # .T returns the transpose of matrix.", "def jacobian(self, v):\n from scipy.special import erf, erfcx\n def integrand(u_arr):\n \"\"\"Integrand of self-consistency equation\"\"\"\n integrand_all = erfcx(-u_arr)\n #integrand_all = np.zeros(u_arr.shape)\n #u_mask = u_arr < -4.0\n #u = u_arr[u_mask]\n #integrand_all[u_mask] = -1. / np.sqrt(np.pi) * (1.0 / u - 1.0 / (2.0 * u**3) + \n #3.0 / (4.0 * u**5) - \n #15.0 / (8.0 * u**7))\n #integrand_all[~u_mask] = np.exp(u_arr[~u_mask]**2) * (1. + erf(u_arr[~u_mask]))\n return integrand_all\n\n\n mu_v = self.mu(v)\n sd_v = self.sd(v)\n low = (self.V_r - mu_v) / sd_v # reduced resting potential\n up = (self.theta - mu_v) / sd_v # reduced threshold\n f_low = integrand(low)\n f_up = integrand(up)\n jac_mat_1 = self.tau_m * 1e-3 * np.sqrt(np.pi) * self.mat_mu\n jac_mat_2 = self.tau_m * 1e-3 * np.sqrt(np.pi) * self.mat_var / (2. * sd_v**2)\n\n jac_T = np.diag(1. / v**2) - \\\n jac_mat_1.T * (f_up - f_low) + \\\n jac_mat_2.T * (f_up * up - f_low * low)\n return jac_T.T", "def get_jac(wcs, cenx, ceny):\n import galsim\n\n crpix = wcs.crpix\n galsim_pos = galsim.PositionD(x=crpix[0], y=crpix[1])\n\n galsim_jac = wcs.jacobian(image_pos=galsim_pos)\n\n return ngmix.Jacobian(\n x=cenx,\n y=ceny,\n dudx=galsim_jac.dudx,\n dudy=galsim_jac.dudy,\n dvdx=galsim_jac.dvdx,\n dvdy=galsim_jac.dvdy,\n )", "def compute_cost(x, y, theta=[[0], [0]]):\n m = y.size\n h = x.dot(theta)\n j = 1 / (2 * m) * np.sum(np.square(h - y))\n return j", "def jacobian(self, points):\n # check if re-computation of dW/dx can be avoided\n if not np.array_equal(self._cached_points, points):\n # recompute dW/dx, i.e. the relative weight of each point wrt\n # the source landmarks\n self.dW_dX = self.transform.weight_points(points)\n # cache points\n self._cached_points = points\n\n model_jacobian = self.pdm.model.jacobian\n points = self.pdm.model.mean.points\n\n # compute dX/dp\n\n # dX/dq is the Jacobian of the global transform evaluated at the\n # mean of the model.\n dX_dq = self._global_transform_jacobian(points)\n # dX_dq: n_points x n_global_params x n_dims\n\n # by application of the chain rule dX_db is the Jacobian of the\n # model transformed by the linear component of the global transform\n dS_db = model_jacobian\n dX_dS = self.pdm.global_transform.jacobian_points(points)\n dX_db = np.einsum('ilj, idj -> idj', dX_dS, dS_db)\n # dS_db: n_points x n_weights x n_dims\n # dX_dS: n_points x n_dims x n_dims\n # dX_db: n_points x n_weights x n_dims\n\n # dX/dp is simply the concatenation of the previous two terms\n dX_dp = np.hstack((dX_dq, dX_db))\n\n # dW_dX: n_points x n_points x n_dims\n # dX_dp: n_points x n_params x n_dims\n dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)\n # dW_dp: n_points x n_params x n_dims\n\n return dW_dp", "def fit_mult_linear(X,y):\n # TODO complete the following code\n nsamp,natt=X.shape\n\n ones=np.ones((nsamp,1))\n X_orig=X\n X=np.hstack((ones,X_orig))\n X.shape\n Xt=np.transpose(X)\n\n beta = np.dot(np.linalg.inv(np.dot(Xt,X)),np.dot(Xt,y))\n return beta", "def fit(self, X, y):\n\n if issparse(X):\n X = X.tocsc()\n X_data = X.data\n X_indices = X.indices\n X_indptr = X.indptr\n\n (\n beta_hat_cyclic_cd_true,\n residuals,\n primal_hist,\n dual_hist,\n gap_hist,\n r_list,\n n_active_features_true,\n theta_hat_cyclic_cd,\n P_lmbda,\n D_lmbda,\n G_lmbda,\n safe_set,\n ) = sparse_cd(\n X_data,\n X_indices,\n X_indptr,\n y,\n self.lmbda,\n self.epsilon,\n self.f,\n self.n_epochs,\n self.screening,\n self.store_history,\n )\n else:\n X = np.asfortranarray(X)\n (\n beta_hat_cyclic_cd_true,\n residuals,\n primal_hist,\n dual_hist,\n gap_hist,\n r_list,\n n_active_features_true,\n theta_hat_cyclic_cd,\n P_lmbda,\n D_lmbda,\n G_lmbda,\n safe_set,\n ) = cyclic_coordinate_descent(\n X,\n y,\n self.lmbda,\n self.epsilon,\n self.f,\n self.n_epochs,\n self.screening,\n self.store_history,\n )\n\n self.slopes = beta_hat_cyclic_cd_true\n self.residuals = residuals\n self.G_lmbda = G_lmbda\n self.P_lmbda = P_lmbda\n self.r_list = r_list\n self.safe_set = safe_set\n\n return self", "def backprop(self, x, y):\n\n\t\tnabla_b = [np.zeros(b.shape) for b in self.biases]\n\t\tnabla_w = [np.zeros(w.shape) for w in self.weights]\n\t\t# feedforward\n\t\tactivation = self.board_to_input(x)\n#\t\tactivation = np.reshape(activation,(len(activation),1))\n\t\tactivations = [activation] # list to store all the activations, layer by layer\n\t\tzs = [] # list to store all the z vectors, layer by layer\n\t\tfor b, w in zip(self.biases, self.weights):\n\t\t\tz = np.dot(w, activation)+b\n\t\t\tzs.append(z)\n\t\t\tactivation = self.act_func(z)\n\n#\t\t\tactivation = np.reshape(activation,(len(activation),1))\n\t\t\tactivations.append(activation)\n\t\t# backward pass\n\t\tdelta = self.cost_derivative(activations[-1], np.reshape(y,(81,1))) * \\\n\t\t\tself.act_grad(zs[-1])\n\n\t\t#delta = np.reshape(delta,(len(delta),1))\n\t\t\n\t\tnabla_b[-1] = delta\n\n\t\tnabla_w[-1] = np.dot(delta, activations[-2].transpose())\n\t\t\n\t\t# Note that the variable l in the loop below is used a little\n\t\t# differently to the notation in Chapter 2 of the book. Here,\n\t\t# l = 1 means the last layer of neurons, l = 2 is the\n\t\t# second-last layer, and so on. It's a renumbering of the\n\t\t# scheme in the book, used here to take advantage of the fact\n\t\t# that Python can use negative indices in lists.\n\t\tfor l in xrange(2, self.num_layers):\n\t\t\tz = zs[-l]\n\t\t\tsp = self.act_grad(z)\n\t\t\tdelta = np.dot(self.weights[-l+1].transpose(), delta) * sp\n\t\t\tnabla_b[-l] = delta\n\t\t\tnabla_w[-l] = np.dot(delta, activations[-l-1].transpose())\n\t\t\t#nabla_w[-l] = delta[0] * activations[-l-1].transpose()\n\t\treturn (nabla_b, nabla_w)", "def jacobian_information(self):\n has_jacobian = False\n jacobian_free_solvers = []\n return has_jacobian, jacobian_free_solvers", "def deriv_costFunction(self, x, y):\n self.yEst = self.forward_propagate(x)\n\n delta_o = np.multiply( ( self.yEst - y ), self.deriv_sigmoid(self.z_o) )\n #partial deriv of cost wrt hidden -> output weights\n partial_J_w_ho = np.dot( self.a_h.T, delta_o )\n\n ones_o = np.ones( delta_o.shape[0] )\n #partial deriv of cost wrt output biases\n partial_J_b_o = np.dot( ones_o, delta_o )\n\n delta_h = np.dot( delta_o, self.w_ho.T ) * self.deriv_sigmoid( self.z_h )\n #partial deriv of cost wrt input -> hidden weights\n partial_J_w_ih = np.dot( x.T, delta_h )\n \n ones_h = np.ones( delta_h.shape[0] )\n #partial deriv of cost wrt hidden biases\n partial_J_b_h = np.dot( ones_h, delta_h)\n\n return partial_J_w_ih, partial_J_w_ho, partial_J_b_h, partial_J_b_o", "def calc_jacobian(*args, **kwargs):\n try:\n tag = kwargs[\"tag\"]\n except:\n tag = 0\n\n try:\n sparse = kwargs[\"sparse\"]\n except:\n sparse = True\n\n if sparse:\n try:\n shape = kwargs[\"shape\"]\n except:\n raise ValueError(\"'shape' should be passed to calculate sparse jacobian!\")\n\n \n options = np.array([0,0,0,0],dtype=int)\n result = ad.colpack.sparse_jac_no_repeat(tag, *args, options=options)\n nnz = result[0]\n ridx = result[1]\n cidx = result[2]\n values = result[3]\n assert nnz > 0\n jac = sp.csr_matrix((values, (ridx, cidx)), shape=shape)\n jac = jac.toarray()\n else:\n jac = ad.jacobian(tag, *args)\n return jac", "def fun(_, y):\n return np.array([-self.r * self.beta * y[1] * y[0] / self.N,\n self.r * self.beta * y[1] * y[0] / self.N - self.gamma * y[1],\n self.gamma * y[1]])", "def J(theta, x, y):\n m = len(y)\n z = theta.dot(x.T) #argument for hypothesis function\n return 1. / m * np.sum(-y * np.log(g(z)) - (1. - y) * np.log(1 - g(z)))", "def jacobianTransformedParameters(self, x):\n temp = self.invLogit(x)\n return (self.upper - self.lower) * temp * (1.0 - temp)", "def log_jacobian_tensor(self, x):\n pass", "def minfunc(beta, yvec, xmat ):\n return yvec - exp(dot(xmat, beta))", "def jacobianstructure(self):\n pass", "def f(self, x: np.array) -> np.array:\n return self.m * x + self.c", "def jacobian(expression, wrt, consider_constant=None, disconnected_inputs=\"raise\"):\n\n if not isinstance(expression, Variable):\n raise TypeError(\"jacobian expects a Variable as `expression`\")\n\n if expression.ndim > 1:\n raise ValueError(\n \"jacobian expects a 1 dimensional variable as `expression`.\"\n \" If not use flatten to make it a vector\"\n )\n\n using_list = isinstance(wrt, list)\n using_tuple = isinstance(wrt, tuple)\n\n if isinstance(wrt, (list, tuple)):\n wrt = list(wrt)\n else:\n wrt = [wrt]\n\n if expression.ndim == 0:\n # expression is just a scalar, use grad\n return as_list_or_tuple(\n using_list,\n using_tuple,\n grad(\n expression,\n wrt,\n consider_constant=consider_constant,\n disconnected_inputs=disconnected_inputs,\n ),\n )\n\n def inner_function(*args):\n idx = args[0]\n expr = args[1]\n rvals = []\n for inp in args[2:]:\n rval = grad(\n expr[idx],\n inp,\n consider_constant=consider_constant,\n disconnected_inputs=disconnected_inputs,\n )\n rvals.append(rval)\n return rvals\n\n # Computing the gradients does not affect the random seeds on any random\n # generator used n expression (because during computing gradients we are\n # just backtracking over old values. (rp Jan 2012 - if anyone has a\n # counter example please show me)\n jacobs, updates = aesara.scan(\n inner_function,\n sequences=aesara.tensor.arange(expression.shape[0]),\n non_sequences=[expression] + wrt,\n )\n assert not updates, \"Scan has returned a list of updates; this should not happen.\"\n return as_list_or_tuple(using_list, using_tuple, jacobs)", "def jeffreys(self, x):\n return np.sqrt(1. / x)", "def regress_origin(x, y):\r\n x, y = array(x, 'Float64'), array(y, 'Float64')\r\n return sum(x * y) / sum(x * x), 0", "def _evaluate(self, x, y):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n f = (1 - alpha) * self.xInterpolators[y_pos - 1](\n x\n ) + alpha * self.xInterpolators[y_pos](x)\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n f = np.zeros(m) + np.nan\n if y.size > 0:\n for i in range(1, self.y_n):\n c = y_pos == i\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n f[c] = (1 - alpha) * self.xInterpolators[i - 1](\n x[c]\n ) + alpha * self.xInterpolators[i](x[c])\n return f", "def AB_zero_Jy(self):\n return self._get_mean_and_samples_attribute('AB_zero_Jy')", "def fit(self, x, y):\n extra_ones = np.array([1.0 for i in range(len(x))],dtype='f').reshape(-1,1) # create an extra column of ones (neutral elements)\n \n x = np.concatenate((extra_ones,x),1) # append extra ones to x with axis 1 because we want a matrix \n xt = x.transpose()\n \n # multiply matrices\n self.betas = np.linalg.inv(xt.dot(x)).dot(xt).dot(y)", "def jacobian(theta, event, parameters_to_fit):\n for (key, val) in enumerate(parameters_to_fit):\n setattr(event.model.parameters, val, theta[key])\n return event.chi2_gradient(parameters_to_fit)", "def objective(beta, lambdat, X, y):\n return 1/len(y) * (np.sum(\n (np.maximum(0, 1-((y[:, np.newaxis]*X).dot(beta)))**2)))\\\n + lambdat * np.linalg.norm(beta)**2", "def _derY(self, x, y):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y)\n i = self.argcompare(temp, axis=1)\n y = temp[np.arange(m), i]\n dfdy = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdy[c] = self.functions[j].derivativeY(x[c], y[c])\n return dfdy", "def cost_matrix(x, y, p=2):\n xc = tf.expand_dims(x, 1)\n yr = tf.expand_dims(y, 0)\n d = tf.math.pow(tf.abs(xc - yr), p)\n return tf.reduce_sum(d, axis=-1)", "def jacobian(self, dmin, src=None, rec=None, u=None, U=None, vp=None, **kwargs):\n # Source term is read-only, so re-use the default\n src = src or self.geometry.src\n # Create a new receiver object to store the result\n rec = rec or self.geometry.rec\n\n # Create the forward wavefields u and U if not provided\n u = u or TimeFunction(name='u', grid=self.model.grid,\n time_order=2, space_order=self.space_order)\n U = U or TimeFunction(name='U', grid=self.model.grid,\n time_order=2, space_order=self.space_order)\n\n # Pick vp from model unless explicitly provided\n vp = vp or self.model.vp\n\n # Execute operator and return wavefield and receiver data\n summary = self.op_born().apply(dm=dmin, u=u, U=U, src=src, rec=rec,\n vp=vp, dt=kwargs.pop('dt', self.dt), **kwargs)\n return rec, u, U, summary" ]
[ "0.8049506", "0.7586799", "0.7340082", "0.7092742", "0.70245093", "0.6984471", "0.6948992", "0.68618804", "0.6859211", "0.6838144", "0.6793028", "0.67266047", "0.6696106", "0.66701853", "0.6601887", "0.6596935", "0.65616024", "0.65616024", "0.6518664", "0.6505477", "0.6503522", "0.6448854", "0.64345807", "0.64069396", "0.6390168", "0.636842", "0.6347055", "0.63409215", "0.631703", "0.62610084", "0.6229196", "0.6214093", "0.620291", "0.6165825", "0.6126349", "0.6120626", "0.6120235", "0.61111534", "0.6078835", "0.6046641", "0.60140955", "0.596486", "0.5948192", "0.59437406", "0.59409016", "0.59247327", "0.59172976", "0.5897023", "0.5896898", "0.58882475", "0.58856094", "0.58811283", "0.58631694", "0.58585906", "0.5847809", "0.58405274", "0.5824085", "0.58017355", "0.5801266", "0.5790084", "0.57841396", "0.5779733", "0.5771831", "0.5767078", "0.5756869", "0.57385665", "0.572344", "0.57195836", "0.57050794", "0.5703148", "0.5703016", "0.5702324", "0.5686612", "0.5681273", "0.56695753", "0.5624606", "0.5624318", "0.562408", "0.5579041", "0.5572592", "0.5537406", "0.5534478", "0.5527271", "0.5516447", "0.55127466", "0.5511916", "0.54888904", "0.5486261", "0.5477655", "0.5475522", "0.5472235", "0.5472024", "0.5470377", "0.5462253", "0.546196", "0.5458835", "0.5458026", "0.5457421", "0.5452423", "0.5451759" ]
0.7515247
2
Return a Expression_obj whose name is gene_id
def __init__(self): # self.organism = [] self.weighting_dict = defaultdict(list) # self.codon_obj_dict = {} self.codon_dict = { 'UUU':'F','UUC':'F', 'UUA':'L','UUG':'L','CUU':'L','CUC':'L','CUA':'L','CUG':'L', 'AUU':'I','AUC':'I','AUA':'I', 'AUG':'M', 'GUU':'V', 'GUC':'V','GUA':'V','GUG':'V', 'UCU':'S','UCC':'S','UCA':'S','UCG':'S', 'CCU':'P','CCC':'P','CCA':'P','CCG':'P', 'ACU':'T','ACC':'T','ACA':'T','ACG':'T', 'GCU':'A','GCC':'A','GCA':'A','GCG':'A', 'UAU':'Y','UAC':'Y', 'UAA':'X','UAG':'X', 'CAU':'H','CAC':'H', 'CAA':'Q','CAG':'Q', 'AAU':'N','AAC':'N', 'AAA':'K','AAG':'K', 'GAU':'D','GAC':'D', 'GAA':'E','GAG':'E', 'UGU':'C','UGC':'C', 'UGA':'X', 'UGG':'W', 'CGU':'R','CGC':'R','CGA':'R','CGG':'R', 'AGU':'S','AGC':'S', 'AGA':'R','AGG':'R', 'GGU':'G','GGC':'G', 'GGA':'G','GGG':'G' }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_gene(self, gene_id, source=\"eid\"):\n\n gene_id = str(gene_id)\n\n \n try:\n valid_gene = self.gene_cache[self.source_cache[source][gene_id]]\n return valid_gene\n except KeyError:\n pass\n \n valid_eid = None\n\n if source == \"eid\":\n try:\n eid = int(gene_id)\n except ValueError:\n raise ValueError(\"gene_id must be an integer if source \" + \\\n \"is \\\"Entrez\\\"\")\n\n self.cursor.execute(\"\"\"\n SELECT EXISTS(\n SELECT * \n FROM genes \n WHERE entrez_id = %(eid)s\n )\"\"\", {'eid': eid})\n if self.cursor.fetchone()[0] == 1:\n valid_eid = eid\n\n else:\n\n self.cursor.execute(\"\"\"\n SELECT entrez_id\n FROM discontinued_genes\n WHERE discontinued_id = %(eid)s\"\"\", {'eid': eid})\n row = self.cursor.fetchone()\n if row is not None:\n valid_eid = row[0]\n else:\n raise KeyError(\"Entrez ID %d was not found in the database\" % eid)\n\n elif source == \"symbol\":\n\n args = {\"symbol\": gene_id}\n self.cursor.execute(\"\"\"\n SELECT entrez_id\n FROM genes\n WHERE symbol = %(symbol)s\"\"\", args)\n row = self.cursor.fetchone()\n if row is not None:\n valid_eid = row[0]\n else:\n self.cursor.execute(\"\"\"\n SELECT entrez_id\n FROM discontinued_genes\n WHERE discontinued_symbol = %(symbol)s\"\"\", args)\n row = self.cursor.fetchone()\n if row is not None:\n valid_eid = row[0]\n else:\n self.cursor.execute(\"\"\"\n SELECT entrez_id\n FROM gene_synonyms\n WHERE symbol = %(symbol)s\"\"\", args)\n row = self.cursor.fetchone()\n if row is not None:\n valid_eid = row[0]\n else:\n raise KeyError(\"Symbol %s not found in the database\" % gene_id)\n else:\n self.cursor.execute(\"\"\"\n SELECT entrez_id\n FROM gene_xrefs\n WHERE Xref_db = %(db)s\n AND Xref_id = %(id)s\"\"\", {'db': source, 'id': gene_id})\n row = self.cursor.fetchone()\n if row is not None:\n valid_eid = row[0]\n else:\n raise KeyError((\"Gene ID %s from source %s was not found \" + \\\n \"in the database\") % (gene_id, source))\n\n if valid_eid is None:\n raise KeyError(\"Unable to find a valid Entrez ID for %s from %s\" % (gene_id, source))\n\n valid_eid = int(valid_eid)\n if source not in self.source_cache:\n self.source_cache[source] = {}\n self.source_cache[source][gene_id] = valid_eid\n self.gene_cache[valid_eid] = Gene(valid_eid, self)\n\n return self.gene_cache[valid_eid]", "def id(self):\n return self.gene_id", "def find_gene_by_name(self, gene_name: str) -> Gene:\n return self.influence_graph.find_gene_by_name(gene_name)", "def get_gene_by_id(gene_query):\n\n\tgene_query_wildcard = [ gene+'%' for gene in gene_query ]\n\tsql_query = \"SELECT * FROM genes WHERE \" + \"gene_id LIKE %s OR \" * len(gene_query_wildcard)\n\tsql_query = sql_query[:-3]\n\n\tdf = pd.read_sql(sql_query, params=(gene_query_wildcard,), con=db.get_engine(current_app, 'methylation_data'))\n\n\t#reorder genes to original order since SQL doesn't keep order.\n\tnew_index = []\n\tfor index, row in df.iterrows():\n\t\tfor i, gene_id in enumerate(gene_query):\n\t\t\tif gene_id in row['gene_id']:\n\t\t\t\tnew_index.append(i)\n\t\t\t\tbreak\n\tdf.index = new_index\n\tdf.sort_index(inplace=True)\n\n\treturn df.to_dict('records')", "def gene(self, idx):\r\n return self.genes[idx]", "def getId(self):\n return _libsbml.GeneProduct_getId(self)", "def get_gene(self):\n return self._gene", "def name(self):\n return self.gene_name", "def init_gene():\n gene_details=dict(\n id = '', \n anno_id = [],\n confgenes_id = [],\n name = '',\n source = '',\n gene_info = {},\n alias = '',\n name2 = [],\n strand = '',\n chr = '',\n chr_num = [],\n paralogs = [],\n start = '',\n stop = '',\n transcripts = [],\n transcript_info = [],\n transcript_status = [],\n transcript_valid = [],\n exons = [],\n exons_confirmed = [],\n cds_exons = [],\n utr5_exons = [],\n utr3_exons = [],\n tis = [],\n tis_conf = [],\n tis_info = [],\n cdsStop = [],\n cdsStop_conf = [],\n cdsStop_info = [],\n tss = [],\n tss_info = [],\n tss_conf = [],\n cleave = [],\n cleave_info = [],\n cleave_conf = [],\n polya = [],\n polya_info = [],\n polya_conf = [],\n is_alt = [],\n is_alt_spliced = 0,\n is_valid = [],\n transcript_complete = [],\n is_complete = [],\n is_correctly_gff3_referenced = '',\n splicegraph = []\n )\n return gene_details", "def get_gene(identifier):\n for store in [GENES, ALIASES]:\n genes = store.get(identifier, None)\n if genes and len(genes) == 1:\n return genes\n else:\n raise ValueError('gene reference does not exist or refers to multiple genes')", "def get_gene_by_name(gene_query):\n\n\tgene_query = [ gene.lower()+\"%\" for gene in gene_query ]\n\n\tsql_query = \"SELECT * FROM genes WHERE \" + \"lower(gene_name) LIKE %s OR \" * len(gene_query)\n\tsql_query = sql_query[:-3]\n\n\tdf = pd.read_sql(sql_query, params=(gene_query,), con=db.get_engine(current_app, 'methylation_data'))\n\n\treturn df.to_dict('records')", "def entrez_gene_id(self) -> int:\n return self._entrez_gene_id", "def resolve_gene_id(cls, query):\n result = cls.query.filter(or_(cls.locus == query, cls.sequence_name == query)).first()\n if result:\n return result.gene_id", "def getId(self):\n return _libsbml.GeneProductRef_getId(self)", "def createGeneProductRef(self):\n return _libsbml.FbcOr_createGeneProductRef(self)", "def createGeneProductRef(self):\n return _libsbml.FbcAnd_createGeneProductRef(self)", "def entrez_gene_id(gene: GeneInfo):\n if (gene.identifiers is not None and gene.identifiers.entrez is not None):\n if (gene.identifiers.entrez.startswith('NCBIGene:')):\n return gene.identifiers.entrez[9:]\n else:\n return gene.identifiers.entrez\n return None", "def entrez_gene_id(self, entrez_gene_id: int):\n\n self._entrez_gene_id = entrez_gene_id", "def from_symbol_to_entrez_gene_id(row):\r\n\tgene_entry = annotation_client.get_entrez_gene_id_from_symbol(row['symb'])\r\n\t# import pdb; pdb.set_trace()\r\n\tegid = str(gene_entry['entrez_gene_id'][0]) if gene_entry is not None else \"0\"\r\n\treturn egid", "def get_symbol(self, entrez_id):\n\n try:\n entrez_id = int(entrez_id)\n except ValueError:\n raise ValueError(\"entrez_id must be an integer\")\n\n\n self.cursor.execute(\"\"\"\n SELECT symbol\n FROM genes\n WHERE entrez_id = %(eid)s\"\"\", {'eid': entrez_id})\n row = self.cursor.fetchone()\n if row is not None:\n return row[0]\n raise KeyError(\"Entrez ID %d was not found in the database\" % entrez_id)", "def createGeneProductRef(self):\n return _libsbml.GeneProductAssociation_createGeneProductRef(self)", "def get_gene(gene):\n\n return copy.deepcopy(gene)", "def get_gene_by_name_exact(gene_query):\n\n\tgene_query = [ gene.lower() for gene in gene_query ]\n\tplaceholders_str = \"%s, \" * len(gene_query)\n\tplaceholders_str = placeholders_str[:-2]\n\tsql_query = \"SELECT * FROM genes WHERE \" + \"lower(gene_name) IN (\" + placeholders_str+ \")\"\n\tsql_query += \" ORDER BY CASE lower(gene_name) \"\n\n\tfor i, gene in enumerate(gene_query):\n\t\tsql_query += \"WHEN '{}' THEN {} \".format(gene, i+1)\n\tsql_query += \"END\"\n\n\tdf = pd.read_sql(sql_query, params=(gene_query,), con=db.get_engine(current_app, 'methylation_data'))\n\n\treturn df.to_dict('records')", "def isgene(s, gene):\n test = s.query(Genes).filter(Genes.name.ilike(gene)).first()\n if test is None:\n gene_list = check_gene_name(gene)\n if len(gene_list) == 0:\n return None\n else:\n for g in gene_list:\n print(g)\n test = s.query(Genes).filter(Genes.name.ilike(str(g))).first()\n if test is not None:\n return test.name\n return None\n else:\n return test.name", "def getGeneProduct(self, *args):\n return _libsbml.FbcModelPlugin_getGeneProduct(self, *args)", "def get_entrezid(gene):\n entrezurl = \"http://mygene.info/v3/query?q=\"\n entrezurl = entrezurl+gene\n\n res = requests.get(entrezurl)\n results = pandas.read_json(StringIO(res.text))\n\n entrezid = []\n if results.empty:\n return entrezid\n\n for i in results.ix[:, 0]:\n key = i.keys()\n value = i.values()\n for cntr, k in enumerate(key):\n if k == 'entrezgene':\n entrezid.append(value[cntr])\n return entrezid", "def createGeneProduct(self):\n return _libsbml.ListOfGeneProducts_createGeneProduct(self)", "def get_id(self, expr):\n return self.table.inv[expr]", "def expression_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression_id\")", "def createGeneProduct(self):\n return _libsbml.FbcModelPlugin_createGeneProduct(self)", "def getGeneProduct(self):\n return _libsbml.GeneProductRef_getGeneProduct(self)", "def get_simple_node_gene(key, config):\n gene1 = SimpleNodeGene(key, config)\n gene1.activation = 'a'\n gene1.aggregation = 'a'\n gene1.bias = 0\n gene2 = SimpleNodeGene(key, config)\n gene2.activation = 'b'\n gene2.aggregation = 'b'\n gene2.bias = 1\n return gene1, gene2", "def limk2_gene_context():\n return {\n \"id\": \"normalize.gene:LIMK2\",\n \"type\": \"GeneDescriptor\",\n \"label\": \"LIMK2\",\n \"gene_id\": \"hgnc:6614\",\n \"xrefs\": [\n \"ncbigene:3985\",\n \"ensembl:ENSG00000182541\"\n ],\n \"extensions\": [\n {\n \"type\": \"Extension\",\n \"name\": \"symbol_status\",\n \"value\": \"approved\"\n },\n {\n \"name\": \"approved_name\",\n \"value\": \"LIM domain kinase 2\",\n \"type\": \"Extension\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"associated_with\",\n \"value\": [\n \"refseq:NM_016733\",\n \"ccds:CCDS33637\",\n \"ccds:CCDS13892\",\n \"ena.embl:D45906\",\n \"uniprot:P53671\",\n \"pubmed:10591208\",\n \"vega:OTTHUMG00000151251\",\n \"omim:601988\",\n \"iuphar:2055\",\n \"pubmed:8537403\",\n \"ccds:CCDS13891\",\n \"ucsc:uc003akh.4\"\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VCL.IoyhTh4PxvPx8yF9P3IecXDVs_XVbDe9\",\n \"type\": \"ChromosomeLocation\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"22\",\n \"interval\": {\n \"end\": \"q12.2\",\n \"start\": \"q12.2\",\n \"type\": \"CytobandInterval\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VSL.Mat3OiuXCd7vNWAGB5lOKaSf_ESc_xK9\",\n \"type\": \"SequenceLocation\",\n \"sequence_id\": \"ga4gh:SQ.7B7SHsmchAR0dFcDCuSFjJAo7tX87krQ\",\n \"interval\": {\n \"start\": {\"type\": \"Number\", \"value\": 31212238},\n \"end\": {\"type\": \"Number\", \"value\": 31280080},\n \"type\": \"SequenceInterval\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VCL.IoyhTh4PxvPx8yF9P3IecXDVs_XVbDe9\",\n \"type\": \"ChromosomeLocation\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"22\",\n \"interval\": {\n \"end\": \"q12.2\",\n \"start\": \"q12.2\",\n \"type\": \"CytobandInterval\"\n }\n },\n {\n \"_id\": \"ga4gh:VSL.N1dI0SlDciU-8OsmdjEfSKdVDp-Q5_lf\",\n \"type\": \"SequenceLocation\",\n \"sequence_id\": \"ga4gh:SQ.7B7SHsmchAR0dFcDCuSFjJAo7tX87krQ\",\n \"interval\": {\n \"start\": {\"type\": \"Number\", \"value\": 31212297},\n \"end\": {\"type\": \"Number\", \"value\": 31280080},\n \"type\": \"SequenceInterval\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locus_type\",\n \"value\": \"gene with protein product\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_gene_type\",\n \"value\": \"protein-coding\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_biotype\",\n \"value\": \"protein_coding\"\n }\n ]\n }", "def _get_obj_geneset(self, obj):\n obj_geneset = set(obj.input.get(\"mutations\", []))\n if not obj_geneset:\n # Geneset is given via geneset input:\n gs = self.resolwe.geneset.get(obj.input[\"geneset\"])\n obj_geneset = set(gs.genes)\n\n # Convert to gene symbols in case genes are given as feature ID's\n if gs.output[\"source\"] != \"UCSC\":\n qs = self.resolwe.feature.filter(feature_id__in=list(obj_geneset))\n id_2_name = {obj.feature_id: obj.name for obj in qs}\n obj_geneset = set([id_2_name[gene] for gene in obj_geneset])\n\n return obj_geneset", "def get_entity(obj):\n return obj.or_expression.and_expression.cmp_expression.arith_expression. \\\n mul_expression.unary_expression.pow_expression.primary_expression. \\\n entity", "def createGene(self, reference=\"\"):\n return _libsbml.Association_createGene(self, reference)", "def get_Entrez_id(gid,conn):\n\n get_Entrez = ('SELECT DISTINCT dx.accession '\n 'FROM feature f, feature_dbxref fd, db, dbxref dx '\n 'WHERE f.feature_id = fd.feature_id AND fd.dbxref_id = dx.dbxref_id '\n 'AND dx.db_id = db.db_id AND db.name = \\'EntrezGene\\' AND '\n 'fd.is_current = \\'t\\' AND f.uniquename = %s')\n Entrez_id = connect(get_Entrez,gid,conn)\n if Entrez_id:\n id = Entrez_id[0][0]\n else:\n id = None\n return(id)", "def getName(self):\n return _libsbml.GeneProduct_getName(self)", "def setId(self, *args):\n return _libsbml.GeneProduct_setId(self, *args)", "def process_gene_line(self, line):\n kwargs = self.extract_gene_args(line)\n if not kwargs:\n return\n gene_id = kwargs['identifier']\n self.genes[gene_id] = Gene(**kwargs)", "def get_expression(data_series, probes_to_genes):\n with open(data_series, 'r') as mtx:\n stage_columns = {'all_stages': {'sample_ids': []}} # will always need an average, other stages are determined by the file\n sample_ids = None\n for line in mtx:\n if line.startswith('!Sample_title'):\n sample_stages = [x.strip().replace('\"','').split(\",\")[0] for x in line.split(\"\\t\")[1:]] # this line is likely dataset specific.\n elif line.startswith('\"ID_REF\"'): # this comes after the sample titles\n sample_ids = [x.strip().replace('\"','') for x in line.split(\"\\t\")[1:]]\n # now have the ids and their stages, convert to dict\n \"\"\"\n if named differently, may need to modify this.\n ultimately, stage_columns should be a dictionary with the following properties:\n - the keys are the stage names. \n - each 'stage' dict should have a key 'sample_ids' that has a list the sample_ids belonging to that stage.\n {\n 'stage1': {\n 'sample_ids': ['sample_id1','sample_id2', ..., 'sample_idn']\n },\n 'stage2': {\n 'sample_ids': ['sample_idn+1', ...]\n },\n ...\n }\n \"\"\"\n for i in range(0, len(sample_stages)):\n if sample_stages[i] not in stage_columns:\n stage_columns[sample_stages[i]] = {'sample_ids': []}\n stage_columns[sample_stages[i]]['sample_ids'].append(sample_ids[i])\n stage_columns['all_stages']['sample_ids'].append(sample_ids[i]) # add every sample to this\n elif sample_ids is not None:\n row = [x.strip().replace('\"','') for x in line.split('\\t')]\n \"\"\"\n here, the stage_columns dictionary is being updated with the expression data for each gene.\n {\n 'stage1': {\n 'sample_ids': ['sample_id1','sample_id2', ..., 'sample_idn'],\n 'genes': { <- **NEW KEY**\n 'entrezID-1': ['sample_id1ExpLevel', 'sample_id2ExpLevel', ..., 'sample_idnExpLevel'],\n 'entrezID-2': ['sample_id1ExpLevel', 'sample_id2ExpLevel', ..., 'sample_idnExpLevel'],\n ... (if PERCENTILE_RANK is True, all in dataset are recorded otherwise, just the genes of interest )\n }\n },\n ...\n }\n \"\"\"\n if row[0] in probes_to_genes:\n # get gene from probe\n entrez_id = probes_to_genes[row[0]]\n # add the average expression for all the samples in a stage for the gene\n for stage, stage_data in stage_columns.items():\n stage_data['genes'] = {} if 'genes' not in stage_data else stage_data['genes'] # initialize\n for sample_id in stage_data['sample_ids']:\n # get the index of the sample_id in the row\n sample_idx = sample_ids.index(sample_id) + 1\n if entrez_id not in stage_data['genes']:\n stage_data['genes'][entrez_id] = [float(row[sample_idx])]\n else:\n stage_data['genes'][entrez_id].append(float(row[sample_idx]))\n\n return stage_columns", "def name_value(self) -> global___Expression.NameValue:", "def getName(self):\n return _libsbml.GeneProductRef_getName(self)", "def get_gene(search_text):\n gene_search = re.search(\"BRCA1|BRCA2|CHD1|PTEN|STK11|TP53|ATM|BRIP1|CHEK2|PALB2|RAD51C|RAD51D\", search_text)\n if gene_search:\n gene = gene_search.group(0)\n else:\n gene = \"\"\n\n return gene", "def name2Id(self, classExpression):\n out = classExpression\n name_id = self.gen_name_id()\n for k, v in name_id.items():\n out = re.sub(\"\\'\"+k+\"\\'\", v, out) # Suspect this not Pythonic. Could probably be done with a fancy map lambda combo. \n return out", "def official_gene_id(self):\n\n return self._official_gene_id", "def find_gene_name(qualifiers):\n if not isinstance(qualifiers, dict):\n raise TypeError(\"Expected qualifier dictionary\")\n for tag in [\"protein_id\", \"locus_tag\", \"id\", \"gene\", \"name\", \"label\"]:\n if tag in qualifiers:\n return qualifiers[tag][0]\n return \"N.A.\"", "def createGeneProductRef(self):\n return _libsbml.ListOfFbcAssociations_createGeneProductRef(self)", "def get_id(self, refobj):\n return cmds.getAttr(\"%s.identifier\" % refobj)", "def get_output_node_gene(key, config):\n gene1 = OutputNodeGene(key, config)\n gene1.aggregation = 'a'\n gene1.bias = 0\n gene2 = OutputNodeGene(key, config)\n gene2.aggregation = 'b'\n gene2.bias = 1\n return gene1, gene2", "def case_get_by_id(self, refresh_db_before):\n names_with_id = {\"tag1\": 1, \"tag2\": 2, \"tag3\": 3}\n\n for name in names_with_id.keys():\n TagOp.add(name)\n\n for exp_name, exp_id in names_with_id.items():\n tag_obj = TagOp.get(id=exp_id)\n self.assertTrue(len(tag_obj) is 1)\n self.assertEqual(exp_name, tag_obj[0].name)\n self.assertEqual(exp_id, tag_obj[0].id)", "def gene_detail(request, pk):\n\n try:\n gene = Gene.objects.get(pk=pk)\n except Gene.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'POST':\n serializer = GeneSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n gene.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n elif request.method == 'PUT':\n serializer = GeneSerializer(gene, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if request.method == 'GET':\n serializer = GeneSerializer(gene)\n return Response(serializer.data)", "def aqGetExpName(self):\n return self._expname", "def __init__(self, geneId, gtfFeature):\n\n self.geneId = geneId\n self.features = {}", "def geneProcess(self, name):\n self.fileHandle = open(self.fileName, 'r+b')\n self.mm = mmap.mmap(self.fileHandle.fileno(), 0)\n positions = self.geneFeatures[name]\n exons = []\n for position in positions:\n self.mm.seek(position)\n row = self.mm.readline().decode('utf-8').rstrip().split(\"\\t\")\n attributes = row[-1].split(\"; \")\n for attribute in attributes:\n if attribute.startswith(\"gene_type\"):\n _gt = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_id\"):\n _gid = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_name\"):\n _gn = attribute.split(\" \")[-1][1:-1]\n exons.append((row[0], int(row[3]), int(row[4]), row[6], _gt, _gid, _gn))\n self.fileHandle.close()\n exons_df = pd.DataFrame(exons, columns=['scaffold', 'start', 'end',\n 'strand', 'gene_type', 'gene_id', 'gene_name'])\n\n for record in self.geneExonicRegions(exons_df):\n yield record", "def getGeneProductByLabel(self, *args):\n return _libsbml.FbcModelPlugin_getGeneProductByLabel(self, *args)", "def __id__(self):\n return \"e_id=\" + str(self.__id) + \" e_value=\" + str(self.__edge_value)", "def setId(self, *args):\n return _libsbml.GeneProductRef_setId(self, *args)", "def get_identifier(self):", "def get_by_eid(cls, eid):\r\n results = execute_query('g.v(eid)', {'eid':eid})\r\n if not results:\r\n raise cls.DoesNotExist\r\n return Element.deserialize(results[0])", "def gene(self):\n\t\tif self._record is None:\n\t\t\treturn []\n\t\tgene_list =[i for i in self._record.features if i.type == 'gene']\n\t\treturn gene_list", "def convertId(sigma, op):\n return NodeSubexpression(sigma.parse_definition_byname(op.matched))", "def clone(self):\n return _libsbml.GeneProduct_clone(self)", "def get_by_eid(cls, eid):\r\n results = execute_query('g.e(eid)', {'eid':eid})\r\n if not results:\r\n raise cls.DoesNotExist\r\n return Element.deserialize(results[0])", "def identifier(self):", "def get_exoid(runid):\n cpu_ws = np.array([0])\n io_ws = np.array([0])\n exoname = runid + \".exo\"\n exoid, ierr = exolib.py_excre(exoname, EX_CLOBBER, cpu_ws, io_ws)\n if ierr:\n raise ExodusIIWriterError(\"Error creating exodus output\")\n return exoname, exoid", "def handle_gene_expression( event, model, arguments = DEFAULT_ARGUMENTS):\n # Transcription RNA from nothing- Caused by Gene\n # Translation Protein from nothing - Caused by RNA\n reaction = add_reaction( event, model, arguments = arguments);\n # for translation proteins are products (everything else is modifier)\n if event.type_lower == \"translation\":\n for theme in event.get_roles(\"theme\"):\n if theme.type == \"Protein\":\n add_product( theme.id, reaction, model, arguments = arguments);\n else:\n add_modifier( theme.id, reaction, model, arguments = arguments);\n # for gene_expression and transcription - Rna and proteins are products\n else:\n for theme in event.get_roles(\"theme\"):\n if theme.type_lower == \"rna\" or theme.type_lower == \"protein\":\n add_product( theme.id, reaction, model, arguments = arguments);\n else:\n add_modifier( theme.id, reaction, model, arguments = arguments);", "def expid(val,expt_name=None):\n global experiment_name\n if not expt_name:\n assert experiment_name, \"Must set experiment name\"\n expt_name = experiment_name\n return \"{}_{}\".format(expt_name, val)", "def getElementName(self):\n return _libsbml.GeneProduct_getElementName(self)", "def getElementName(self):\n return _libsbml.GeneProductRef_getElementName(self)", "def computed_identifier(o):\n\n pfx = vmc_model_prefixes[type(o)]\n dig = digest(o)\n accession = \"{pfx}_{dig}\".format(pfx=pfx, dig=dig)\n ir = models.Identifier(namespace=namespace, accession=accession)\n return ir", "def get_gene_ids():\n key = ('materialized', 'hugo')\n if data.exists(*key):\n return data.load(*key)\n d = hugo.get_huge_genes()\n d = d[~d['Approved Name'].str.lower().str.contains('symbol withdrawn|entry withdrawn')]\n d = d['Approved Symbol'].unique()\n data.save(*key, d)\n return d", "def expression(self) -> Expression:\n ...", "def get_genre(self, id):\n for row in self.db.cursor().execute('SELECT genre_id, name FROM genres WHERE genre_id=' + str(id)):\n genre = {\n 'id' : row[0],\n 'name' : row[1]\n }\n\n return genre", "def clone(self):\n return _libsbml.GeneProductRef_clone(self)", "def GetGeneName(arg):\n\n genbank = ChromUnzip(arg)\n \n p1=re.compile(r'(?:ACCESSION\\s+)(\\w+\\d+)')\n p6=re.compile(r'(?:/gene=\")(.+?)(?:\"\\s+)')\n\n gene_name_dict={}\n \n for entry in genbank:\n gene_list=[] \n gene_it_6=p6.finditer(entry)\n gene_it_1=p1.finditer(entry) \n for hit in gene_it_6:\n gene_list.append(hit.group(1))\n for item in gene_it_1:\n gene_name_dict[item.group(1)]=gene_list[0]\n \n return gene_name_dict", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def get_expressions( useful_genes, expr_file):\n\n\t#open expressions file\n\texpression_stream = gzip.open(expr_file, \"r\")\n \n\t#reset line number\n\tlinenum = 0\n\n\texpressions_dict = {}\n\n\texpressions_header = [] \n\n\t#initialize progress bar\n\tfor line in expression_stream:\n\n\t\tlinenum += 1\n \n\t\t#skip first line, as those are the labels\n\n\n\t\tif isinstance(line, bytes) and not isinstance(line, str):\n\n\t\t\t\t\tline = line.decode()\n\t\tif line[0] != \"#\":\n\n\t\t\t#parse line\n\t\t\tline_content = line.rstrip().split(\",\")\n\t\t\t#if variant pos and gene match some value\n\t\t\tif line_content[0].split(\".\")[0] in useful_genes :\n\n\t\t\t\t#save the expression data for all the samples\n\n\t\t\t\tvar_expr = line_content[1:]\n\t\t\t\texpressions_dict[line_content[0].split(\".\")[0]] = var_expr\n\t\t\t\t#processed another variant\n\n\n\n\n\t\t\telif line.split(',')[0] == 'Name':\n \n\t\t\t\t#this is our header\n\t\t\t\texpressions_header = line.replace(\"\\n\",\"\").split(',')\n\n\treturn [expressions_dict, expressions_header]", "def __init__(self, eid: str, name: str):\n self.eid = eid\n self.name = name", "def __getitem__(self, key: str) -> float64:\n\n # extract the gene\n geno_id, allele_variant = geno_to_idx(key)\n\n return self._store[geno_id][allele_variant]", "def FindProIDfromGeneID(geneName, strainName, mRNA_protein_dict=mRNA_protein):\n\n # in the first step here, we must find the right gene id based on the part gene id from raven\n dir0 = '../0_332yeast_genomes/332_genome_annotations/proteome_old_species_id/'\n # strain1 = 'candida_sorboxylosa'\n strain_dir = dir0 + strainName + '.max.pep'\n protein_faa = open(strain_dir, 'r').readlines()\n protein_faa_id = [x for x in protein_faa if '>' in x]\n # next based on the above gene0, we find the related right mRNAid\n gene1 = [x.replace('>', '').strip('\\n') for x in protein_faa_id if geneName in x]\n protein_id = []\n for x in gene1:\n print(mRNA_protein_dict[x])\n protein_id.append(mRNA_protein_dict[x])\n return protein_id", "def get_genome(self):\n genes_dict = OrderedDict()\n for name in self.protogenes:\n gene = self.protogenes[name].get_gene()\n genes_dict[name] = gene\n return Genome(genes_dict, **self.options)", "def __init__(self, taxid, species_name = None, lineage=None):\n self.genes = dict()\n self.taxid = taxid\n self.species = species_name\n self.lineage = lineage", "def get_cross_id(self, entrez_id, xref_db):\n \n try:\n entrez_id = int(entrez_id)\n except ValueError:\n raise ValueError(\"entrez_id must be an integer\")\n\n self.cursor.execute(\"\"\"\n SELECT entrez_id\n FROM gene_xrefs\n WHERE Xref_db = %(db)s\n AND entrez_id = %(eid)s\"\"\", {'db': xref_db, 'eid': entrez_id})\n row = self.cursor.fetchone()\n if row is not None:\n return row[0]\n \n raise KeyError(\"Unable to find an external identifer for database \" + \\\n \"%s using Entrez ID %d\" % (xref_db, entrez_id))", "def get_genre(id_genre):\n genre = factory.get_elem_solo(Genre, id_genre)\n return genre", "def getId(self):\n return _libsbml.GeneAssociation_getId(self)", "def split_gene_name(gene):\n gene_info = gene.replace(\"ID=\", \"\").split()[0]\n gene_info = gene_info.split(\".t\")[0]\n if \"-T\" in gene_info:\n gene_info = gene_info.split(\"-T\")[0] # funannotate models\n gene_info = gene_info.replace(\";\", \"\")\n gene_info = gene_info.replace(\"Parent=\", \"\")\n gene_info = gene_info.split(\"Note=\")[0]\n gene_info = gene_info.split(\"Name=\")[0]\n return gene_info", "def queryYM( self, geneName, level ):\n result = [] \n \n # Get a new query on the table\n query = service.new_query(\"Gene\")\n query.add_constraint(\"interactions.participant2\", \"Gene\") \n \n query.add_view( \"primaryIdentifier\", \"symbol\", \"secondaryIdentifier\", \"sgdAlias\", \"name\",\n \"organism.shortName\", \"interactions.details.annotationType\",\n \"interactions.details.phenotype\", \"interactions.details.role1\",\n \"interactions.details.experimentType\", \"interactions.participant2.symbol\",\n \"interactions.participant2.secondaryIdentifier\",\n \"interactions.details.experiment.name\",\n \"interactions.details.relationshipType\" )\n \n query.add_constraint(\"organism.shortName\", \"=\", \"S. cerevisiae\", code = \"B\")\n query.add_constraint(\"Gene\", \"LOOKUP\", geneName, code = \"A\")\n \n for row in query.rows():\n data = level, row[\"primaryIdentifier\"], row[\"symbol\"], row[\"secondaryIdentifier\"], row[\"sgdAlias\"], \\\n row[\"name\"], row[\"organism.shortName\"], row[\"interactions.details.annotationType\"], \\\n row[\"interactions.details.phenotype\"], row[\"interactions.details.role1\"], \\\n row[\"interactions.details.experimentType\"], row[\"interactions.participant2.symbol\"], \\\n row[\"interactions.participant2.secondaryIdentifier\"], \\\n row[\"interactions.details.experiment.name\"], row[\"interactions.details.relationshipType\"]\n item = list(data)\n result.append(item) \n \n return result", "def extract_gene_data(info):\n gene_id = None\n gene_type = None\n for i in info:\n if i.startswith('gene_id'):\n gene_id = i.split(\" \", 1)[1].replace('\"', '')\n elif i.startswith('gene_type'):\n gene_type = i.split(\" \", 1)[1].replace('\"', '')\n\n assert gene_id is not None, 'No gene_id found {0}'.format(info)\n assert gene_type is not None, 'No gene_type found {0}'.format(info)\n return gene_id, gene_type", "def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict", "def get_gene_values(hotel_ids):\n hotel_genes = {}\n subcats = get_subcat_axes()\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n SELECT hotel_id, genome\n FROM hotel_genome\n WHERE hotel_id in (%s)\n \"\"\" % \",\".join([str(h) for h in hotel_ids])\n )\n for hotel_id, genome_str in cursor.fetchall():\n genome = [float(g.strip()) for g in genome_str.split(\",\")]\n hotel_genes[hotel_id] = get_hotel_genes_by_subcat(\n subcats, genome)\n return subcats, hotel_genes", "def genre_key(genre_name=DEFAULT_GENRE):\n return ndb.Key('Genre', genre_name.lower())", "def oeid_to_existing_extid(self, cr, uid, referential_id, openerp_id, context=None):\n return self.get_extid(cr, uid, openerp_id, referential_id, context=context)", "def expand(self, individual: Dict[str, Union[str, Dict[str, List[int]], Callable]]):\n genes = individual[\"genome\"]\n\n pattern = [\"<expr_0>\"] # starts the pattern as the root symbol\n\n current_index = {i: 0 for i in self.grammar.keys()} # initializes the indexes for each gene respective\n # to a non terminal in the grammar\n\n i = 0\n while i < len(pattern): # while we have not reached the end of the expansion\n key = pattern[i]\n\n if key in self.grammar.keys():\n current_option = genes[key][current_index[key]] # option set by the gene\n\n out = self.grammar[key][current_option] \n out = out.split(\" \")\n \n pattern = pattern[:i] + out + pattern[i + 1:] # inserts the expantion into the current pattern\n\n current_index[key] += 1 # sets the index to look for the next gene\n continue\n i += 1\n\n individual[\"fenotype\"] = eval(\"lambda X1, X2: \" + \" \".join(pattern)) # generates the function as a lambda function\n # the idea is to speed up the evaluation process\n # while still having the flexibility of the\n # eval function in python", "def random_gene(self):\n size = random.randint(1,50)\n gene = \"\"\n for i in range(0,size,1):\n gene+=random.choice(self.instructions)\n return gene", "def case_get_by_name_one_result(self, refresh_db_before):\n names_with_id = {\"tag1\": 1, \"tag2\": 2, \"tag3\": 3}\n\n for name in names_with_id.keys():\n TagOp.add(name)\n\n for exp_name, exp_id in names_with_id.items():\n tag_obj = TagOp.get(name=exp_name)\n self.assertTrue(len(tag_obj) is 1)\n self.assertEqual(exp_name, tag_obj[0].name)\n self.assertEqual(exp_id, tag_obj[0].id)", "def _join_gene(query, gene_name, gene_symbol, gene_id):\n if gene_name or gene_symbol:\n query = query.join(models.Gene)\n\n if gene_symbol:\n query = query.filter(models.Gene.gene_symbol.like(gene_symbol))\n\n if gene_name:\n query = query.filter(models.Gene.gene_name.like(gene_name))\n\n if gene_id:\n query = query.filter(models.Gene.gene_id.like(gene_id))\n\n return query", "def getElementName(self):\n return _libsbml.ListOfGeneProducts_getElementName(self)", "def getGeneAssociation(self, *args):\n return _libsbml.FbcModelPlugin_getGeneAssociation(self, *args)", "def identity(self, *args, **kwargs):\n return {\n 'id': self.drone_id,\n }", "def id(obj):\n return obj" ]
[ "0.6313475", "0.63014066", "0.6045709", "0.5961227", "0.5909269", "0.5827519", "0.5769331", "0.5765576", "0.57507443", "0.57040155", "0.56940395", "0.5685017", "0.5614033", "0.55961704", "0.55855423", "0.5572768", "0.5556484", "0.5548169", "0.5545586", "0.5535128", "0.5527104", "0.5472447", "0.54660803", "0.5400741", "0.538702", "0.53777444", "0.5357266", "0.5348458", "0.5336783", "0.533067", "0.5320141", "0.53149325", "0.5308728", "0.5298447", "0.5290384", "0.52130634", "0.5194515", "0.51539165", "0.5146083", "0.5138999", "0.50609094", "0.50607157", "0.50581056", "0.50492775", "0.50300056", "0.5015322", "0.50120497", "0.5005112", "0.5001091", "0.49775535", "0.49638507", "0.49545595", "0.49407685", "0.49354243", "0.49286377", "0.49270964", "0.4907738", "0.48908922", "0.48847803", "0.48717067", "0.48658773", "0.48608312", "0.48603538", "0.4856984", "0.4847404", "0.48442593", "0.48437068", "0.4833064", "0.48294923", "0.48184317", "0.48182666", "0.48137218", "0.4807339", "0.48059613", "0.47737733", "0.47665155", "0.47639182", "0.47623962", "0.47513592", "0.47477922", "0.4724791", "0.472125", "0.4705552", "0.46961993", "0.4694616", "0.46939114", "0.46921894", "0.46905196", "0.46800318", "0.4677803", "0.46655512", "0.4656494", "0.46551993", "0.46547794", "0.46476156", "0.4639806", "0.46344638", "0.46327287", "0.46306977", "0.46297458", "0.46199232" ]
0.0
-1
Constructs a Octave ResNet26 model.
def pre_act_oct_resnet26(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [2, 2, 2, 2], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet10(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [1, 1, 1, 1], shortcut_type, num_classes, in_channels)\n return model", "def resnet34(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [3, 4, 6, 3], shortcut_type, num_classes, in_channels)\n return model", "def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')", "def resnet18(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [2, 2, 2, 2], shortcut_type, num_classes, in_channels)\n return model", "def resnet():\n return models.resnet152(pretrained=True)", "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def ResNeXt(**kwargs):\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def resnet18_custom(input_channels):\n model = ResNet(input_channels, BasicBlock, [2])\n\n return model", "def __init__(self, pretrained=True, freeze_weights=True):\n super(RaisinNet34, self).__init__()\n # Define the model's name for it's output files\n # Load a pre-trained ResNet-34 model and turn off autograd\n # so its weights won't change.\n architecture = resnet34(pretrained=pretrained)\n if freeze_weights:\n for layer in architecture.parameters():\n layer.requires_grad = False\n # Copy the convolutional layers of the model.\n self.conv1 = architecture.conv1\n self.bn1 = architecture.bn1\n self.relu = architecture.relu\n self.maxpool = architecture.maxpool\n self.layer1 = architecture.layer1\n self.layer2 = architecture.layer2\n self.layer3 = architecture.layer3\n self.layer4 = architecture.layer4\n # Copy the average pooling layer of the model.\n self.avgpool = architecture.avgpool\n # Redefine the classification block of ResNet-34.\n # Use LeakyReLU units instead of ReLU units.\n # Output layer has 2 nodes only for the 2 classes in the PCam dataset.\n in_ftrs = architecture.fc.in_features\n self.fc = nn.Linear(in_features=in_ftrs, out_features=2, bias=True)\n # Define a LogSoftmax layer for converting outputs to probabilities\n # Not needed in `forward()` because included in nn.CrossEntropyLoss\n self.log_softmax = nn.LogSoftmax(dim=1)", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def ResNet18(num_classes=10):\n return ResNet(BasicBlock, \n [2, 2, 2, 2],\n num_classes=num_classes)", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNN, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n modules = list(resnet.children())[:-2] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n \n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.pooling = nn.MaxPool2d(2,stride = 2)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)", "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()", "def __init__(self, embed_size):\n super(Encoder, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules) \n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def __init__(self, out_size=2, freeze=False, pretrained=True, arch='resnet50'):\n\n super().__init__()\n\n if arch == 'resnet50':\n model = torchvision.models.resnet50(pretrained=pretrained)\n self.model_name = 'resnet50'\n elif arch == 'resnet18':\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n elif arch == 'resnet34':\n model = torchvision.models.resnet34(pretrained=pretrained)\n self.model_name = 'resnet34'\n elif arch == 'resnet101':\n model = torchvision.models.resnet101(pretrained=pretrained)\n self.model_name = 'resnet101'\n elif arch == 'resnet152':\n model = torchvision.models.resnet152(pretrained=pretrained)\n self.model_name = 'resnet152'\n elif arch == 'wide_resnet50_2':\n model = torchvision.models.wide_resnet50_2(pretrained=pretrained)\n self.model_name = 'wide_resnet50_2'\n elif arch == 'wide_resnet101_2':\n model = torchvision.models.wide_resnet101_2(pretrained=pretrained)\n self.model_name = 'wide_resnet101_2'\n else:\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n\n if pretrained and freeze:\n for param in model.parameters():\n param.requires_grad = False\n\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, out_size)\n\n self.model = model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def ResNet34(num_classes=10):\n return ResNet(BasicBlock, \n [3,4,6,3], \n num_classes=num_classes)", "def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNNFeatures, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n\n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnext101(**kwargs):\n model = ResNeXt(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model", "def resnet18(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model", "def get_model():\n # Load the pretrained model.\n model = torchvision.models.resnet34(pretrained=True)\n\n # Resize model for our task.\n model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1,\n bias=False)\n model.avgpool = torch.nn.AvgPool2d(2)\n model.fc = torch.nn.Linear(in_features=512, out_features=10, bias=True)\n\n return model", "def resnext152(**kwargs):\n model = ResNeXt(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def get_model(args, num_classes):\n data_size = 224\n image = nn.Variable([1, 3, data_size, data_size])\n pimage = image_preprocess(image)\n pred, hidden = model_resnet.resnet_imagenet(\n pimage, num_classes, args.num_layers, args.shortcut_type, test=True, tiny=False)\n Model = namedtuple('Model', ['image', 'pred', 'hidden'])\n return Model(image, pred, hidden)", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def resnet200(shortcut_type, num_classes, in_channels):\n model = ResNet(Bottleneck, [3, 24, 36, 3], shortcut_type, num_classes, in_channels)\n return model", "def init_resnet(num_classes: int) -> nn.Module:\n model = models.resnet50(pretrained=True)\n num_features = model.fc.in_features\n model.fc = nn.Linear(num_features, num_classes)\n\n return model", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def ffc_resnet26(pretrained=False, **kwargs):\n model = FFCResNet(Bottleneck, [2, 2, 2, 2], **kwargs)\n return model", "def __init__(self,\n image_channels,\n num_classes):\n super().__init__()\n\n self.model = torchvision.models.resnet18(pretrained=True)\n self.model.fully_connected = nn.Linear(224, 10)", "def resnet18(num_classes, pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=1000)\n if pretrained:\n logger.info('Resnet18: Loading pretrained')\n _model_loaded = try_load_model('resnet18')\n model.load_state_dict(_model_loaded)\n if num_classes != 1000:\n model.reinit_fc(num_classes)\n\n layers = [model.fc, model.layer4, model.layer3]\n\n return model, layers", "def resnext34(**kwargs):\n model = ResNeXt(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnet34(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model", "def __init__(self,embedding_size):\n super(ResNetEncoder,self).__init__()\n resnet = models.resnet50(pretrained=True)\n modules = list(resnet.children())[:-1]\n #Create a sequential models upto top fc layer add a custom fc layer compatible with embedding size of decoder RNN\n self.resnet = nn.Sequential(*modules)\n self.embed = nn.Linear(resnet.fc.in_features,embedding_size)\n self.bn = nn.BatchNorm1d(embedding_size,momentum=0.01)\n self.init_weights()", "def resnet101(shortcut_type, num_classes, in_channels):\n model = ResNet(Bottleneck, [3, 4, 23, 3], shortcut_type, num_classes, in_channels)\n return model", "def resnet101(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def resnet34(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)\n return model", "def resnet34(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)\n return model", "def resnet34(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)\n return model", "def __init__(self, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300):\n super(ResnetEncoder, self).__init__()\n\n self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2\n self.drop_p = drop_p\n self.inter = 64\n self.final = 6\n\n self.resnet = models.resnet50(pretrained=True)\n modules = list(self.resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.fc1 = nn.Linear(resnet.fc.in_features, fc_hidden1)\n self.bn1 = nn.BatchNorm1d(fc_hidden1, momentum=0.01)\n self.fc2 = nn.Linear(fc_hidden1, fc_hidden2)\n self.bn2 = nn.BatchNorm1d(fc_hidden2, momentum=0.01)\n self.fc3 = nn.Linear(fc_hidden2, CNN_embed_dim)\n self.fc4 = nn.Linear(CNN_embed_dim, self.inter)\n self.fc5 = nn.Linear(self.inter, self.final)", "def __init__(self, num_models: int, num_classes: int):\n self.nun_models = num_models\n self.num_classes = num_classes\n self.model: keras.Model = self.init_model()", "def __init__(self):\n self.model = Sequential()\n self.model.add(AveragePooling2D(pool_size=(4, 4), input_shape=(224, 224, 3)))\n self.model.add(Conv2D(16, (9, 9)))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Conv2D(16, (5, 5)))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Flatten())\n self.model.add(Dropout(0.5))\n self.model.add(Dense(1, activation='sigmoid'))\n self.model.compile(loss=binary_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def __init__(self, embed_size):\n super(ImgEncoder, self).__init__()\n model = models.vgg19(pretrained=True)\n in_features = model.classifier[-1].in_features # input size of feature vector\n model.classifier = nn.Sequential(\n *list(model.classifier.children())[:-1]) # remove last fc layer\n\n self.model = model # loaded model without last fc layer\n self.fc = nn.Linear(in_features, embed_size) # feature vector of image", "def resnet152(shortcut_type, num_classes, in_channels):\n model = ResNet(Bottleneck, [3, 8, 36, 3], shortcut_type, num_classes, in_channels)\n return model", "def __init__(self, embedding_dim, n_classes, use_vmf, learn_temp, init_temp,\n kappa_confidence, n_digits):\n super(NDigitMNISTArch,\n self).__init__(embedding_dim, n_classes, use_vmf, learn_temp,\n init_temp, kappa_confidence)\n self.n_digits = n_digits\n self.encoder = self._create_encoder()", "def pre_act_oct_resnet101(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def resnet34(pretrained=False, mode='rgb', **kwargs):\n if mode == 'flow':\n model = ResNet(BasicBlock, [3, 4, 6, 3], inp=20, **kwargs)\n else:\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def build_model(cfg, char_voca, word_voca=None, gazet=None, pos_voca=None):\n\n # Build Embedder\n embedder = Embedder(\n window=cfg.window,\n char_voca=char_voca,\n word_voca=word_voca,\n jaso_dim=cfg.jaso_dim,\n char_dim=cfg.char_dim,\n word_dim=cfg.word_dim,\n gazet=gazet,\n gazet_embed=True,\n pos_enc=True,\n phoneme=True,\n pos_voca_size=len(pos_voca),\n pos_dim=cfg.pos_dim)\n\n print('Total Embedding_size: ', embedder.embed_dim)\n\n\n encoder_name, decoder_name = cfg.model_name.lower().split('-')\n\n # Build Encoder\n if encoder_name == 'fnn5':\n encoder = models.Fnn5(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name == 'cnn7':\n encoder = models.Cnn7(in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name == 'cnn8':\n encoder = models.Cnn8(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name in ['gru', 'lstm', 'sru']:\n encoder = models.RnnEncoder(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n out_dim=cfg.hidden_dim,\n cell=encoder_name)\n else:\n raise ValueError('unknown model name: %s' % cfg.model_name)\n\n # Build Decoder\n if decoder_name.lower() == 'fc':\n decoder = models.FCDecoder(in_dim=encoder.out_dim,\n hidden_dim=cfg.hidden_dim,\n n_tags=cfg.n_tags)\n elif decoder_name in ['gru', 'lstm', 'sru']:\n decoder = models.RnnDecoder(in_dim=encoder.out_dim,\n hidden_dim=cfg.hidden_dim,\n n_tags=cfg.n_tags,\n num_layers=cfg.num_layers,\n cell=decoder_name)\n\n model = models.Ner(embedder, encoder, decoder)\n\n return model", "def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model", "def resnet110g8r(**kwargs):\r\n return ResNet(BasicBlock, 110, groups=8, indices=\"random\", **kwargs)", "def __init__(self, n_lm, n_ang):\n super(MVCNet, self).__init__()\n self.convM1_sag = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.convM1_cor = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.xModule1 = xModule([64, 128, 64], 64, 4, 2, 1, 128, 128, 0.25)\n self.xModule2 = xModule([128, 64, 32], 128, 4, 2, 1, 256, 256, 0.25)\n self.xModule3 = xModule([256, 32, 16], 256, 4, 2, 1, 512, 512, 0.25)\n self.SLE_sag = SLE([512, 16, 8], 512, n_lm)\n self.SLE_cor = SLE([512, 16, 8], 512, n_lm)\n self.CAE_sag = CAE(512, n_lm, n_ang)\n self.CAE_cor = CAE(512, n_lm, n_ang)", "def resnet152(pretrained=False):\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def model_wrapper(self):\n original = self.args.rnn_type\n if(self.args.rnn_type=='DeepCoNN'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='TRANSNET'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM_TNET'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='DATT'):\n self.args.rnn_type ='RAW_MSE_DUAL_DOT'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='MPCN'):\n self.args.rnn_type = 'RAW_MSE_MPCN_FN_FM'\n self.args.base_encoder = 'NBOW'\n\n print(\"Conversion to {} | base:{}\".format(\n self.args.rnn_type,\n self.args.base_encoder))", "def ffc_resnet18(pretrained=False, **kwargs):\n model = FFCResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def define_model(input_shape=(32,32,3), depth=110, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet18']))\n return model", "def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,\n resnet_version=resnet_model.DEFAULT_VERSION,\n dtype=resnet_model.DEFAULT_DTYPE):\n if resnet_size % 6 != 2:\n raise ValueError('resnet_size must be 6n + 2:', resnet_size)\n\n num_blocks = (resnet_size - 2) // 6\n\n super(Model, self).__init__(\n resnet_size=resnet_size,\n bottleneck=False,\n num_classes=num_classes,\n num_filters=16,\n kernel_size=3,\n conv_stride=1,\n first_pool_size=None,\n first_pool_stride=None,\n block_sizes=[num_blocks] * 3,\n block_strides=[1, 2, 2],\n final_size=64,\n resnet_version=resnet_version,\n data_format=data_format,\n dtype=dtype\n )", "def build_examples():\n build_models([\n \"VGG_16\",\n \"VGG_19\",\n \"RESNET_50\",\n \"MOBILENET\",\n #\"INCEPTION_V3\",\n #\"INCEPTION_RESNET\",\n #\"DENSENET_121\",\n #\"DENSENET_169\",\n #\"DENSENET_201\"])\n ])", "def resnet18(pretrained: bool = False, include_top: bool = False, freeze: bool = False):\n model = torchvision.models.resnet18(pretrained=pretrained)\n if freeze:\n set_parameter_requires_grad(model, \"fc\")\n if not include_top:\n output_size = model.fc.in_features\n model.fc = nn.Identity()\n return BackboneModule(model, output_size)\n else:\n return model", "def attention_resnet32(**kwargs):\r\n model = CifarAttentionResNet(CifarAttentionBasicBlock, 5, **kwargs)\r\n return model", "def build(self) -> KM.Model:\n\n # For decoder number of features in opposite order of encoder\n decoder_features = self.encoder_features.copy()\n decoder_features.reverse()\n\n # build the encoder model\n self.encoder_model = self.encoder(\n features=self.encoder_features, name=\"encoder\"\n )\n\n # build the decoder model\n decoder = self.decoder(features=decoder_features, name=\"decoder\")\n\n input_tensor = KL.Input(\n shape=(32, 32, 3)\n ) # shape of images for cifar10 dataset\n\n # Encode the images\n encoded = self.encoder_model(input_tensor)\n # Decode the image\n decoded = decoder(encoded[-1])\n\n return KM.Model(inputs=input_tensor, outputs=decoded, name=\"AutoEncoder\")", "def __init__(\n self,\n encoder: Union[str, nn.Module] = \"resnet18\",\n head: Optional[nn.Module] = None,\n representation_size: int = 128,\n num_negatives: int = 65536,\n encoder_momentum: float = 0.999,\n temperature: float = 0.07,\n exclude_bn_bias: bool = False,\n optimizer: Type[optim.Optimizer] = optim.SGD,\n optimizer_params: Optional[Dict[str, Any]] = None,\n lr_scheduler: Type[LRScheduler] = optim.lr_scheduler.CosineAnnealingLR,\n lr_scheduler_params: Optional[Dict[str, Any]] = None,\n ) -> None:\n super().__init__()\n\n self.num_negatives = num_negatives\n self.encoder_momentum = encoder_momentum\n self.temperature = temperature\n self.exclude_bn_bias = exclude_bn_bias\n self.optimizer_class = optimizer\n if optimizer_params is not None:\n self.optimizer_params = optimizer_params\n else:\n self.optimizer_params = {\"lr\": 0.03, \"momentum\": 0.9, \"weight_decay\": 1e-4}\n self.lr_scheduler_class = lr_scheduler\n if lr_scheduler_params is not None:\n self.lr_scheduler_params = lr_scheduler_params\n else:\n self.lr_scheduler_params = {\"T_max\": 100}\n\n if isinstance(encoder, str):\n template_model = getattr(torchvision.models, encoder)\n self.encoder_q = template_model(num_classes=representation_size)\n else:\n self.encoder_q = encoder\n self.encoder_k = deepcopy(self.encoder_q)\n for param in self.encoder_k.parameters():\n param.requires_grad = False\n\n if head is not None:\n self.head_q: Optional[nn.Module] = head\n self.head_k: Optional[nn.Module] = deepcopy(head)\n for param in self.head_k.parameters():\n param.requires_grad = False\n else:\n self.head_q = None\n self.head_k = None\n\n # Two different queues of representations are needed, one for training and one for validation data.\n self.queue = RepresentationQueue(representation_size, num_negatives)\n self.val_queue = RepresentationQueue(representation_size, num_negatives)", "def __init__(self, version='ResNet50', dilation=None, **kwargs):\n super(ResNet, self).__init__(**kwargs)\n params = {'ResNet50': [2, 3, 5, 2],\n 'ResNet101': [2, 3, 22, 2],\n 'ResNet152': [2, 7, 35, 2]}\n self.version = version\n assert version in params\n self.params = params[version]\n\n if dilation is None:\n self.dilation = [1, 1]\n else:\n self.dilation = dilation\n assert len(self.dilation) == 2", "def init_model(\n sample_length: int, base_model: str, num_classes: int = None\n ) -> torchvision.models.video.resnet.VideoResNet:\n if base_model not in (\"ig65m\", \"kinetics\"):\n raise ValueError(\n f\"Not supported model {base_model}. Should be 'ig65m' or 'kinetics'\"\n )\n\n # Decide if to use pre-trained weights for DNN trained using 8 or for 32 frames\n model_name = f\"r2plus1d_34_{sample_length}_{base_model}\"\n\n print(f\"Loading {model_name} model\")\n\n model = torch.hub.load(\n TORCH_R2PLUS1D,\n model_name,\n num_classes=MODELS[model_name],\n pretrained=True,\n )\n\n # Replace head\n if num_classes is not None:\n model.fc = nn.Linear(model.fc.in_features, num_classes)\n\n return model, model_name", "def __init__(self, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300):\n super(ResCNNEncoder, self).__init__()\n\n self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2\n self.drop_p = drop_p\n\n resnet = models.resnet50(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.fc1 = nn.Linear(resnet.fc.in_features, fc_hidden1)\n self.bn1 = nn.BatchNorm1d(fc_hidden1, momentum=0.01)\n self.fc2 = nn.Linear(fc_hidden1, fc_hidden2)\n self.bn2 = nn.BatchNorm1d(fc_hidden2, momentum=0.01)\n self.fc3 = nn.Linear(fc_hidden2, CNN_embed_dim)", "def __init__(self, embed_size):\n super(ImgAttentionEncoder, self).__init__()\n vggnet_feat = models.vgg19(pretrained=True).features\n modules = list(vggnet_feat.children())[:-2]\n self.cnn = nn.Sequential(*modules)\n self.fc = nn.Sequential(nn.Linear(self.cnn[-3].out_channels, embed_size),\n nn.Tanh()) # feature vector of image", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def oct_resnet101(**kwargs):\n return _oct_resnet(Bottleneck, [3, 4, 23, 3], **kwargs)", "def __init__(self):\n super(BaseRNNEncoder, self).__init__()", "def resnet34(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet34']))\n return model" ]
[ "0.65736985", "0.6366523", "0.6366468", "0.6340577", "0.6290355", "0.62448686", "0.6211965", "0.62079406", "0.6202202", "0.61859906", "0.617765", "0.6167639", "0.6150246", "0.6131579", "0.61293316", "0.6126124", "0.60872185", "0.60808474", "0.60243255", "0.6022623", "0.6018663", "0.6012224", "0.60103196", "0.59991026", "0.5980352", "0.59795797", "0.5978402", "0.5970605", "0.5970605", "0.5970605", "0.5970605", "0.5970605", "0.5967633", "0.5954504", "0.5918928", "0.5918928", "0.5918928", "0.5911149", "0.5898257", "0.5897255", "0.5897255", "0.5897255", "0.5866359", "0.58650285", "0.58613294", "0.58604795", "0.5854079", "0.5852248", "0.58485234", "0.5844048", "0.5841845", "0.5831329", "0.5815051", "0.5802969", "0.5800922", "0.5799978", "0.5787661", "0.577999", "0.5759582", "0.5757672", "0.575083", "0.57487744", "0.57487744", "0.57487744", "0.5741802", "0.5736683", "0.57308155", "0.5729301", "0.57290316", "0.57184535", "0.5715245", "0.56919456", "0.5686511", "0.5680565", "0.5679594", "0.5674349", "0.5672322", "0.56632525", "0.566292", "0.5660622", "0.5659613", "0.565923", "0.56590164", "0.5656093", "0.5647468", "0.5641001", "0.5638767", "0.56286687", "0.5623067", "0.5622588", "0.5619312", "0.5615829", "0.5607118", "0.56038064", "0.56038064", "0.56038064", "0.56038064", "0.560301", "0.5597648", "0.55959773" ]
0.64773536
1
Constructs a Octave ResNet50 model.
def pre_act_oct_resnet50(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [3, 4, 6, 3], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def ResNet50_model(input_shape, pooling):\n from keras.applications.resnet import ResNet50\n return ResNet50(include_top=False, weights='imagenet', input_shape=input_shape, pooling=pooling)", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet50(pretrained=False):\n model = ResNet(Bottleneck, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def resnext50(**kwargs):\n model = ResNeXt(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet50(pretrained=False, mode='rgb', **kwargs):\n if mode == 'flow':\n model = ResNet('resnet50', Bottleneck, [3, 4, 6, 3], inp=20, **kwargs)\n else:\n model = ResNet('resnet50', Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def bl_resnet50(pretrained=False, **kwargs):\n model = bL_ResNet([2, 3, 5, 3], **kwargs)\n # print ('model created')\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def oct_resnet50(**kwargs):\n return _oct_resnet(Bottleneck, [3, 4, 6, 3], **kwargs)", "def resnet50(scale=1, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], scale=scale, **kwargs)\n return model", "def resnet50(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False)\n return model", "def resnet50(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False)\n return model", "def resnet50(num_classes, pretrained=False, **kwargs):\n model = RetinaNet(num_classes, Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False)\n return model", "def resnext50(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnext50_32x4d']))\n return model", "def resnet50(pretrained=False, mode='rgb', **kwargs):\n if mode == 'flow':\n model = ResNet(Bottleneck, [3, 4, 6, 3], inp=20, **kwargs)\n else:\n model = ResNet(Bottleneck, [3, 4, 6, 3]) #, **kwargs)\n return model", "def resnet50():\n initializer = K.initializers.he_normal(seed=None)\n\n X = K.Input(shape=(224, 224, 3))\n\n # conv1\n layer = K.layers.Conv2D(filters=64,\n kernel_size=(7, 7),\n strides=(2, 2),\n padding='same',\n kernel_initializer=initializer,\n )(X)\n\n layer = K.layers.BatchNormalization(axis=3)(layer)\n\n layer = K.layers.Activation('relu')(layer)\n\n # conv2_x\n layer = K.layers.MaxPool2D(pool_size=(3, 3),\n strides=(2, 2),\n padding='same')(layer)\n\n layer = projection_block(layer, [64, 64, 256], 1)\n for _ in range(2):\n layer = identity_block(layer, [64, 64, 256])\n\n # conv3_x\n layer = projection_block(layer, [128, 128, 512])\n for _ in range(3):\n layer = identity_block(layer, [128, 128, 512])\n\n # conv4_x\n layer = projection_block(layer, [256, 256, 1024])\n for _ in range(5):\n layer = identity_block(layer, [256, 256, 1024])\n\n # conv5_x\n layer = projection_block(layer, [512, 512, 2048])\n for _ in range(2):\n layer = identity_block(layer, [512, 512, 2048])\n\n layer = K.layers.AveragePooling2D(pool_size=(7, 7),\n padding='same')(layer)\n\n layer = K.layers.Dense(units=1000,\n activation='softmax',\n kernel_initializer=initializer,\n )(layer)\n\n model = K.models.Model(inputs=X, outputs=layer)\n return model", "def resnet50(pretrained=True, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n print(\"=> using pre-trained model '{}'\".format('resnet_50'))\n pretrained_state = model_zoo.load_url(model_urls['resnet50'])\n model_state = model.state_dict()\n pretrained_state = { k:v for k,v in pretrained_state.items() if k in model_state and v.size() == model_state[k].size() }\n model_state.update(pretrained_state)\n model.load_state_dict(model_state)\n return model", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def resnet():\n return models.resnet152(pretrained=True)", "def model_fn(model_dir):\n model = models.resnet50(pretrained=True)\n\n _ = model.eval()\n\n modules=list(model.children())[:-1]\n model=nn.Sequential(*modules)\n for p in model.parameters():\n p.requires_grad = False\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else \"cpu\")\n\n model = model.to(device)\n\n return model", "def resnet50(num_classes=1000, pretrained='imagenet'):\n model = resnet50_srelu(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['resnet50'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_resnets(model)\n return model", "def resnet50_cifar10(pretrained=True, progress=True, use_data_parallel=False, **kwargs):\n return _model('resnet50_cifar10', resnet50, pretrained, progress, use_data_parallel, **kwargs)", "def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')", "def resnet50(pretrained: bool = False, include_top: bool = False, freeze: bool = False):\n model = torchvision.models.resnet50(pretrained)\n if freeze:\n set_parameter_requires_grad(model, \"fc\")\n if not include_top:\n output_size = model.fc.in_features\n model.fc = nn.Identity()\n return BackboneModule(model, output_size)\n else:\n return model", "def resnet10(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [1, 1, 1, 1], shortcut_type, num_classes, in_channels)\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def get_model():\n # Load the pretrained model.\n model = torchvision.models.resnet34(pretrained=True)\n\n # Resize model for our task.\n model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1,\n bias=False)\n model.avgpool = torch.nn.AvgPool2d(2)\n model.fc = torch.nn.Linear(in_features=512, out_features=10, bias=True)\n\n return model", "def init_resnet(num_classes: int) -> nn.Module:\n model = models.resnet50(pretrained=True)\n num_features = model.fc.in_features\n model.fc = nn.Linear(num_features, num_classes)\n\n return model", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def create_basic_cnn_model(num_classes: int):\n model = Sequential()\n\n # Convolutional + spooling layers\n model.add(Conv2D(64, (5, 5), input_shape=(config.ROI_IMG_SIZE['HEIGHT'], config.ROI_IMG_SIZE['WIDTH'], 1)))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n model.add(Conv2D(32, (5, 5), padding='same'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n model.add(Flatten())\n\n # Dropout\n model.add(Dropout(0.5, seed=config.RANDOM_SEED, name=\"Dropout_1\"))\n\n # FC\n model.add(Dense(1024, activation='relu', name='Dense_2'))\n\n # Output\n if num_classes == 2:\n model.add(Dense(1, activation='sigmoid', kernel_initializer=\"random_uniform\", name='Output'))\n else:\n model.add(Dense(num_classes, activation='softmax', kernel_initializer=\"random_uniform\", name='Output'))\n\n # Print model details if running in debug mode.\n if config.verbose_mode:\n print(model.summary())\n\n return model", "def ResNeXt50(\n include_top=True,\n weights=\"imagenet\",\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n use_mixstyle=False,\n **kwargs\n):\n\n def stack_fn(x):\n x = stack1(x, 64, 3, stride1=1, groups=32, base_width=4, name=\"conv2\")\n if use_mixstyle:\n x = MixStyle(p=0.5, alpha=0.1, name=\"conv2_mixstyle\")(x)\n x = stack1(x, 128, 4, groups=32, base_width=4, name=\"conv3\")\n if use_mixstyle:\n x = MixStyle(p=0.5, alpha=0.1, name=\"conv3_mixstyle\")(x)\n x = stack1(x, 256, 6, groups=32, base_width=4, name=\"conv4\")\n if use_mixstyle:\n x = MixStyle(p=0.5, alpha=0.1, name=\"conv4_mixstyle\")(x)\n return stack1(x, 512, 3, groups=32, base_width=4, name=\"conv5\")\n\n return ResNet(\n stack_fn,\n False,\n \"resnext50\",\n include_top,\n weights,\n input_tensor,\n input_shape,\n pooling,\n False,\n None,\n classes,\n **kwargs\n )", "def define_model(input_shape=(32,32,3), depth=110, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def make_model():\n # create the base pre-trained model\n base_model = efn.EfficientNetB0(input_shape=(img_width, img_height, 3), include_top=False)\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(num_classes, activation=\"softmax\")(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n\n model.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n return base_model, model", "def pre_act_oct_resnet200(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "def resnet50(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], bitW, bitA, **kwargs)\n if pretrained:\n load_dict = torch.load('./resnet50.pth')\n load_state_dict(model, load_dict)\n else:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n return model", "def get_model(args, num_classes):\n data_size = 224\n image = nn.Variable([1, 3, data_size, data_size])\n pimage = image_preprocess(image)\n pred, hidden = model_resnet.resnet_imagenet(\n pimage, num_classes, args.num_layers, args.shortcut_type, test=True, tiny=False)\n Model = namedtuple('Model', ['image', 'pred', 'hidden'])\n return Model(image, pred, hidden)", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model", "def resnet50(shortcut_type, num_classes, in_channels):\n model = ResNet(Bottleneck, [3, 4, 6, 3], shortcut_type, num_classes, in_channels)\n return model", "def resnet200(**kwargs):\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "def __init__(self, x, num_classes=15, is_training=False):\n\n super(resnet_v2_50, self).__init__()\n\n self.x = x\n self.num_classes = num_classes\n\n # populating the tensorflow graph\n with slim.arg_scope(arg_scopes_map['resnet_v2_50']()):\n net, end_points = networks_map['resnet_v2_50'](\n x, num_classes=num_classes,\n is_training=is_training, reuse=None)\n\n self.end_points = _get_updated_endpoints(end_points, 'resnet_v2_50/logits')\n self.variables_to_restore = slim.get_variables_to_restore(exclude=[])", "def resnet50():\n\n X = K.Input(shape=(224, 224, 3))\n init = K.initializers.he_normal(seed=None)\n\n conv1 = K.layers.Conv2D(\n filters=64,\n kernel_size=(\n 7,\n 7),\n padding='same',\n strides=2,\n kernel_initializer=init)(X)\n\n bn1 = K.layers.BatchNormalization(axis=3)(conv1)\n\n activation1 = K.layers.Activation('relu')(bn1)\n\n maxpool1 = K.layers.MaxPooling2D(\n pool_size=(\n 3, 3), strides=(\n 2, 2), padding='same',)(activation1)\n\n Projection1 = projection_block(maxpool1, [64, 64, 256], s=1)\n IdenBlock1 = identity_block(Projection1, [64, 64, 256])\n IdenBlock2 = identity_block(IdenBlock1, [64, 64, 256])\n\n Projection2 = projection_block(IdenBlock2, [128, 128, 512])\n IdenBlock3 = identity_block(Projection2, [128, 128, 512])\n IdenBlock4 = identity_block(IdenBlock3, [128, 128, 512])\n IdenBlock5 = identity_block(IdenBlock4, [128, 128, 512])\n\n Projection3 = projection_block(IdenBlock5, [256, 256, 1024])\n IdenBlock6 = identity_block(Projection3, [256, 256, 1024])\n IdenBlock7 = identity_block(IdenBlock6, [256, 256, 1024])\n IdenBlock8 = identity_block(IdenBlock7, [256, 256, 1024])\n IdenBlock9 = identity_block(IdenBlock8, [256, 256, 1024])\n IdenBlock10 = identity_block(IdenBlock9, [256, 256, 1024])\n\n Projection4 = projection_block(IdenBlock10, [512, 512, 2048])\n IdenBlock11 = identity_block(Projection4, [512, 512, 2048])\n IdenBlock12 = identity_block(IdenBlock11, [512, 512, 2048])\n\n avgpool = K.layers.AveragePooling2D(\n pool_size=(\n 1, 1), strides=(\n 7, 7), padding='same',)(IdenBlock12)\n\n SoftMax = K.layers.Dense(\n units=1000,\n kernel_initializer=init,\n activation='softmax',\n )(avgpool)\n\n Keras = K.Model(inputs=X, outputs=SoftMax)\n\n return Keras", "def ResNet20(inputShape):\n inputs = Input(shape=inputShape)\n x = resLayer(inputs) # resLayer1\n\n # resBlocks\n for nStage in range(3):\n for nBlock in range(3):\n x = resBlock(x, nStage, nBlock)\n\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(10, activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Generate model\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def eca_resnet50(pretrained=False, k_size=(3, 3, 3, 3), **kwargs):\n model = EcaResnet(Bottleneck, [3, 4, 6, 3], k_size=k_size, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['eca_resnet50']), strict=False)\n return model", "def __init__(self, out_size=2, freeze=False, pretrained=True, arch='resnet50'):\n\n super().__init__()\n\n if arch == 'resnet50':\n model = torchvision.models.resnet50(pretrained=pretrained)\n self.model_name = 'resnet50'\n elif arch == 'resnet18':\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n elif arch == 'resnet34':\n model = torchvision.models.resnet34(pretrained=pretrained)\n self.model_name = 'resnet34'\n elif arch == 'resnet101':\n model = torchvision.models.resnet101(pretrained=pretrained)\n self.model_name = 'resnet101'\n elif arch == 'resnet152':\n model = torchvision.models.resnet152(pretrained=pretrained)\n self.model_name = 'resnet152'\n elif arch == 'wide_resnet50_2':\n model = torchvision.models.wide_resnet50_2(pretrained=pretrained)\n self.model_name = 'wide_resnet50_2'\n elif arch == 'wide_resnet101_2':\n model = torchvision.models.wide_resnet101_2(pretrained=pretrained)\n self.model_name = 'wide_resnet101_2'\n else:\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n\n if pretrained and freeze:\n for param in model.parameters():\n param.requires_grad = False\n\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, out_size)\n\n self.model = model", "def model_fn(model_dir):\n \n model = resnet18Basic(num_classes=10)\n net.load_params('%s/model.params' % model_dir, ctx=mx.cpu())\n return net", "def ffc_resnet50(pretrained=False, **kwargs):\n model = FFCResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def ResNeXt(**kwargs):\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def raw_model():\n model = cobra.Model(id_or_model=\"raw_model\", name=\"raw_model\")\n rxn_1 = cobra.Reaction(\"BIOMASS_TEST\")\n rxn_2 = cobra.Reaction(\"RXN2\")\n rxn_3 = cobra.Reaction(\"RXN3\")\n rxn_4 = cobra.Reaction(\"RXN4\")\n model.add_reactions([rxn_1, rxn_2, rxn_3, rxn_4])\n model.objective = rxn_3\n return model", "def __init__(self, path, epochs, batch_size):\n\n\t\tX_train, X_val, Y_train, Y_val = self._load_dataset(path)\n\n\t\tmodel = self._Resnet50(input_shape = (64, 64, 3), classes = 10)\n\t\tmodel.summary()\n\t\tcheckpointer = ModelCheckpoint(filepath=\"./data/model.h5\", verbose=0, save_best_only=True)\n\t\ttensorboard = TensorBoard(log_dir='data/./logs', histogram_freq=0, write_graph=True, write_images=True)\n\t\tmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\t\thistory = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size,shuffle=True, \n\t\tvalidation_data=(X_val, Y_val), verbose=1, callbacks=[checkpointer, tensorboard]).history", "def Non_Local_50(last_stride, pretrained=False, **kwargs):\n model = ResNet_IBN(last_stride, Bottleneck_IBN, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def create_model():\n model = Sequential()\n\n model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))\n\n # Now we are going to add some Convulation Layers identical to paper\n\n model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n\n # And now finally we will Flatten our layers and eventually use Fully Connected Layers to reduce features.\n\n model.add(Dropout(0.4))\n model.add(Flatten())\n\n model.add(Dense(256, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(100, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(25, activation='elu'))\n model.add(Dense(1))\n\n model.summary()\n\n return model", "def create_vision_model(image_dims=(224, 224, 3), weights='imagenet'):\n base_model = tf.keras.applications.ResNet50V2(\n weights=weights, include_top=False, input_shape=image_dims)\n inp = tf.keras.layers.Input(shape=image_dims)\n x = base_model(inp)\n x = tfkl.GlobalAveragePooling2D()(x)\n len_visual_description = x.shape[-1]\n vision_model = tf.keras.Model(inp, x)\n return vision_model, len_visual_description", "def resnet34(pretrained=False, mode='rgb', **kwargs):\n if mode == 'flow':\n model = ResNet(BasicBlock, [3, 4, 6, 3], inp=20, **kwargs)\n else:\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def get_model():\r\n model = Sequential([\r\n\r\n Lambda(normalize, input_shape=(66, 200, 3)),\r\n\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(1, (3, 3), padding='same', activation='relu', strides=2),\r\n Flatten(),\r\n\r\n\r\n ])\r\n\r\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\r\n return model", "def make_model():\n model = Sequential()\n model.add(Dense(1000, input_shape=(INPUT_SIZE,), activation='relu'))\n model.add(Dense(1000, activation='relu'))\n model.add(Dense(4, activation='sigmoid'))\n model.compile(loss='mse', metrics=['accuracy'])\n return model", "def keras_resnet50_imagenet_model(logits=False, input_range_type=1):\n input_shape = (224, 224, 3)\n # if scaling:\n # x = x + 0.5\n # x_bgr = scaling_tf(x)\n model = ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=input_shape, pooling=None, classes=1000, logits=logits, input_range_type=input_range_type)\n # predictions = model.outputs[0]\n # return predictions\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet50_rpn(base_model, weight_regularizer=None, bias_regularizer=None, include_conv=False,\n anchors_per_loc=DEFAULT_ANCHORS_PER_LOC):\n net = Conv2D(512, (3, 3), padding='same', activation='relu',kernel_initializer='normal',\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n name='rpn_conv1')(base_model.output)\n\n gaussian_initializer = TruncatedNormal(stddev=0.01)\n x_class = Conv2D(anchors_per_loc, (1, 1), activation='sigmoid', kernel_initializer=gaussian_initializer,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n name='rpn_out_cls')(net)\n x_regr = Conv2D(anchors_per_loc * 4, (1, 1), activation='linear', kernel_initializer=gaussian_initializer,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n name='rpn_out_bbreg')(net)\n\n outputs = [x_class, x_regr]\n if include_conv:\n outputs.append(base_model.output)\n\n rpn_model = Model(inputs = base_model.inputs, outputs = outputs)\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n rpn_model.load_weights(weights_path, by_name=True)\n return rpn_model", "def __init__(self, version='ResNet50', dilation=None, **kwargs):\n super(ResNet, self).__init__(**kwargs)\n params = {'ResNet50': [2, 3, 5, 2],\n 'ResNet101': [2, 3, 22, 2],\n 'ResNet152': [2, 7, 35, 2]}\n self.version = version\n assert version in params\n self.params = params[version]\n\n if dilation is None:\n self.dilation = [1, 1]\n else:\n self.dilation = dilation\n assert len(self.dilation) == 2", "def pre_act_oct_resnet101(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def pre_act_oct_resnet152(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def get_model(self, number_of_classes, weight_path):\n\n if K.image_dim_ordering() == 'th':\n input_shape_img = (3, None, None)\n else:\n input_shape_img = (self.config.crop_height, self.config.crop_width, 3)\n\n img_input = Input(shape=input_shape_img, name=\"image_input\")\n\n # Define ResNet50 model Without Top\n net = ModelZoo()\n model_resnet50 = net.resnet50_base(img_input, trainable=True)\n model_resnet50 = GlobalAveragePooling2D(name='global_avg_pool')(model_resnet50)\n output_resnet50 = Dense(number_of_classes, kernel_initializer=\"he_normal\", activation='softmax', name='fc')(\n model_resnet50)\n\n # Define the model\n model = Model(inputs=img_input, outputs=output_resnet50, name='resnet50')\n # In the summary, weights and layers from ResNet50 part will be hidden, but they will be fit during the training\n model.summary()\n\n # Load pre-trained weights for ResNet50\n try:\n print(\"Start loading Weights\")\n model.load_weights(weight_path, by_name=True)\n print('Finished successfully loading weights from {}'.format(weight_path))\n\n except Exception as e:\n print('Could not load pretrained model weights. Weights can be found at {} and {}'.format(\n 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5',\n 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n ))\n raise Exception(e)\n\n print('Finished successfully loading Model')\n return model", "def SimpleRNN(self):\n # Model.\n model = Sequential()\n model.add(SimpleRNN(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n #model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,\n resnet_version=resnet_model.DEFAULT_VERSION,\n dtype=resnet_model.DEFAULT_DTYPE):\n if resnet_size % 6 != 2:\n raise ValueError('resnet_size must be 6n + 2:', resnet_size)\n\n num_blocks = (resnet_size - 2) // 6\n\n super(Model, self).__init__(\n resnet_size=resnet_size,\n bottleneck=False,\n num_classes=num_classes,\n num_filters=16,\n kernel_size=3,\n conv_stride=1,\n first_pool_size=None,\n first_pool_stride=None,\n block_sizes=[num_blocks] * 3,\n block_strides=[1, 2, 2],\n final_size=64,\n resnet_version=resnet_version,\n data_format=data_format,\n dtype=dtype\n )", "def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def ResNet18(num_classes=10):\n return ResNet(BasicBlock, \n [2, 2, 2, 2],\n num_classes=num_classes)", "def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model", "def resnet50_earlyexit(**kwargs):\n model = ResNetEarlyExit(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def create_simple_model():\n\n input_shape = (160, 320, 3)\n \n m = Sequential()\n\n # 1. Add Normalization\n m.add(Lambda(lambda x: x/255.0 - 0.5,\n input_shape=input_shape,\n ))\n\n # 2. Flatten + 1 fully connected layer\n m.add(Flatten())\n m.add(Dense(10, activation='relu', init=my_init))\n \n # 3. Output Layer is a Dense layer with no activation function\n m.add(Dense(1))\n \n return m", "def resnet101(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def pre_act_oct_resnet26(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [2, 2, 2, 2], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def ResNet34(num_classes=10):\n return ResNet(BasicBlock, \n [3,4,6,3], \n num_classes=num_classes)", "def create_original_model():\n model = Sequential()\n model.add(Embedding(max_features,\n embedding_dims,\n input_length=maxlen))\n model.add(Dropout(0.2))\n model.add(Conv1D(filters,\n kernel_size,\n padding='valid',\n activation='relu',\n strides=1))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Dropout(0.2))\n model.add(Activation('relu'))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model", "def get_model():\n \n # load a model pre-trained pre-trained on COCO\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained = True)\n \n # replace the classifier with a new one, that has num_classes which is user-defined\n num_classes = 2 # 1 class (person) + background\n \n # get number of input features for the classifier\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n \n # replace the pre-trained head with a new one\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n \n return model", "def build_resnet_generator(self, model_shape, filters=32, k_size=3, last_act='tanh', summary=False, model_file=None, name='gan_g_'):\n if (model_file):\n \"\"\"\n Load pretreined model\n \"\"\"\n model = self.utils.build_pretrained_model(model_file)\n if (summary):\n model.summary()\n return model\n else:\n init = RandomNormal(stddev=0.02)\n n_rows = model_shape[0]\n n_cols = model_shape[1]\n in_c_dims = model_shape[2]\n out_c_dims = model_shape[3]\n \n n_rows_e1, n_rows_e2, n_rows_e4, n_rows_e8 = n_rows//1, n_rows//2, n_rows//4, n_rows//8\n rows_matching = np.equal([2*n_rows_e2, 2*n_rows_e4, 2*n_rows_e8], [n_rows_e1, n_rows_e2, n_rows_e4])\n index_rows = np.where(np.logical_not(rows_matching))[0]\n \n n_cols_e1, n_cols_e2, n_cols_e4, n_cols_e8 = n_cols//1, n_cols//2, n_cols//4, n_cols//8\n cols_matching = np.equal([2*n_cols_e2, 2*n_cols_e4, 2*n_cols_e8], [n_cols_e1, n_cols_e2, n_cols_e4])\n index_cols = np.where(np.logical_not(cols_matching))[0]\n \n input_shape = (n_rows, n_cols, in_c_dims)\n input_layer = Input(shape=input_shape, name=name+'_input')\n \n e1 = self.Conv2D_Block(input_layer, n_kernels=filters, k_size=7, strides=1, bn=False,name=name+'e1') # rows, cols\n e2 = self.Conv2D_Block(e1, 2*filters, k_size=k_size, bn_training=True, name=name+'e2') # rows/2, cols/2\n e3 = self.Conv2D_Block(e2, 4*filters, k_size=k_size, bn_training=True, name=name+'e3') # rows/4, cols/4\n e4 = self.Conv2D_Block(e3, 8*filters, k_size=k_size, bn=False, name=name+'e4') # rows/8, cols/8\n\n rb1 = self.residual_block(e4, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'1_')\n rb2 = self.residual_block(rb1, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'2_')\n rb3 = self.residual_block(rb2, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'3_')\n rb3 = Dropout(rate=0.5, name=name+'drop_1')(rb3, training=True)\n \n rb4 = self.residual_block(rb3, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'4_')\n rb4 = Dropout(rate=0.5, name=name+'drop_2')(rb4, training=True) \n \n rb5 = self.residual_block(rb4, n_kernels=8*filters, k_size=k_size, bn_training=True, name=name+'5_')\n rb5 = Dropout(rate=0.5, name=name+'drop_3')(rb5, training=True) \n \n d1 = self.Conv2DTranspose_Block(rb5, 4*filters, k_size=k_size, activation='linear', name=name+'d1') # rows/4, cols/4\n if index_rows==2 or index_cols==2:\n d1 = BilinearUpsampling(output_size=(n_rows//4, n_cols//4), name=name+'_bilinear')(d1)\n d1 = Concatenate(name=name+'conc_1')([d1, e3])\n d1 = Activation('relu', name=name+'_act_1')(d1)\n \n d2 = self.Conv2DTranspose_Block(d1, 2*filters, k_size=k_size, activation='linear', name=name+'d2') # rows/2, cols/2\n if index_rows==1 or index_cols==1:\n d2 = BilinearUpsampling(output_size=(n_rows//2, n_cols//2), name=name+'_bilinear')(d2)\n d2 = Concatenate(name=name+'conc_2')([d2, e2])\n d2 = Activation('relu', name=name+'_act_2')(d2)\n \n d3 = self.Conv2DTranspose_Block(d2, 1*filters, k_size=k_size, activation='linear', name=name+'d3') # rows, cols\n if index_rows==0 or index_cols==0:\n d3 = BilinearUpsampling(output_size=(n_rows, n_cols), name=name+'_bilinear')(d2)\n d3 = Concatenate(name=name+'conc_3')([d3, e1])\n d3 = Activation('relu', name=name+'act_3')(d3)\n\n output = Conv2DTranspose(out_c_dims, 7, strides=1, padding='same', kernel_initializer=init, name=name+'d_out')(d3) # rows, cols\n output = Activation(last_act, name=name+last_act)(output)\n\n model = Model(inputs=[input_layer], outputs=[output], name='Generator'+name[-3:])\n if (summary):\n model.summary()\n return model", "def resnet152(pretrained=False):\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model" ]
[ "0.6788361", "0.67803186", "0.674441", "0.6740062", "0.6740062", "0.6740062", "0.6740062", "0.67154825", "0.67154825", "0.6712815", "0.6572177", "0.6566482", "0.6549519", "0.6523207", "0.64100075", "0.6408448", "0.6368369", "0.6368369", "0.6367889", "0.63454634", "0.6303097", "0.6297696", "0.62958664", "0.62292665", "0.6204837", "0.62022847", "0.6143107", "0.6130562", "0.6110048", "0.60912937", "0.6053555", "0.60315", "0.60263795", "0.6010358", "0.60089344", "0.60005796", "0.5980604", "0.5972943", "0.59347427", "0.59230304", "0.59092504", "0.5892401", "0.5884614", "0.5876171", "0.58755714", "0.58575064", "0.58469355", "0.5844884", "0.58366853", "0.5813016", "0.5812342", "0.5796785", "0.5794243", "0.5775985", "0.57649076", "0.5759663", "0.57545793", "0.5741877", "0.57383335", "0.57358724", "0.5735239", "0.5725533", "0.5722035", "0.5720618", "0.57156456", "0.57142025", "0.5695327", "0.56898063", "0.5688144", "0.5688144", "0.5688144", "0.56857157", "0.5679666", "0.5675347", "0.56714237", "0.5666861", "0.5663463", "0.5658298", "0.565627", "0.5653515", "0.5652046", "0.5652046", "0.5652046", "0.5652046", "0.5652046", "0.565134", "0.5650711", "0.56503385", "0.56417555", "0.5637739", "0.5635669", "0.56354654", "0.56348747", "0.5621829", "0.56214505", "0.5606292", "0.5599814", "0.55965334", "0.5592859", "0.55919236" ]
0.66204983
10
Constructs a Octave ResNet101 model.
def pre_act_oct_resnet101(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [3, 4, 23, 3], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet101(pretrained=False, **kwargs):\n model = ResNet('resnet101', Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet101(pretrained=False):\n model = ResNet(Bottleneck, [3, 4, 23, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet101(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def resnext101(**kwargs):\n model = ResNeXt(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n\n return model", "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained and os.path.exists(model_urls['resnet101']):\n model.load_state_dict(torch.load(model_urls['resnet101']), strict=False)\n return model", "def resnet():\n return models.resnet152(pretrained=True)", "def bl_resnet101(pretrained=False, **kwargs):\n model = bL_ResNet([3, 7, 17, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet101(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir='.'), strict=False)\n return model", "def resnet101(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir='.'), strict=False)\n return model", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def resnet10(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [1, 1, 1, 1], shortcut_type, num_classes, in_channels)\n return model", "def resnet101(scale=1, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], scale=scale, **kwargs)\n return model", "def resnet101(num_classes, pretrained=False, **kwargs):\n model = RetinaNet(num_classes, Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir='.'), strict=False)\n return model", "def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')", "def oct_resnet101(**kwargs):\n return _oct_resnet(Bottleneck, [3, 4, 23, 3], **kwargs)", "def ResNeXt(**kwargs):\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet101(shortcut_type, num_classes, in_channels):\n model = ResNet(Bottleneck, [3, 4, 23, 3], shortcut_type, num_classes, in_channels)\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def resnet101_of(withexpression, pretrained=False, nb_classes=1000, channel=20, **kwargs):\n\tmodel = ResNet_of(Bottleneck, [3, 4, 23, 3], nb_classes, withexpression, channel, **kwargs)\n\tif pretrained:\n\t\tpretrain_dict = model_zoo.load_url(model_urls['resnet101']) # modify pretrain code\n\t\tmodel_dict = model.state_dict()\n\t\tmodel_dict = weight_transform(model_dict, pretrain_dict, channel)\n\t\tmodel.load_state_dict(model_dict)\n\n\treturn model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model", "def __init__(self, pretrained=True, freeze_weights=True):\n super(RaisinNet34, self).__init__()\n # Define the model's name for it's output files\n # Load a pre-trained ResNet-34 model and turn off autograd\n # so its weights won't change.\n architecture = resnet34(pretrained=pretrained)\n if freeze_weights:\n for layer in architecture.parameters():\n layer.requires_grad = False\n # Copy the convolutional layers of the model.\n self.conv1 = architecture.conv1\n self.bn1 = architecture.bn1\n self.relu = architecture.relu\n self.maxpool = architecture.maxpool\n self.layer1 = architecture.layer1\n self.layer2 = architecture.layer2\n self.layer3 = architecture.layer3\n self.layer4 = architecture.layer4\n # Copy the average pooling layer of the model.\n self.avgpool = architecture.avgpool\n # Redefine the classification block of ResNet-34.\n # Use LeakyReLU units instead of ReLU units.\n # Output layer has 2 nodes only for the 2 classes in the PCam dataset.\n in_ftrs = architecture.fc.in_features\n self.fc = nn.Linear(in_features=in_ftrs, out_features=2, bias=True)\n # Define a LogSoftmax layer for converting outputs to probabilities\n # Not needed in `forward()` because included in nn.CrossEntropyLoss\n self.log_softmax = nn.LogSoftmax(dim=1)", "def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def init_resnet(num_classes: int) -> nn.Module:\n model = models.resnet50(pretrained=True)\n num_features = model.fc.in_features\n model.fc = nn.Linear(num_features, num_classes)\n\n return model", "def Non_Local_101(last_stride, pretrained=False, **kwargs):\n model = ResNet_IBN(last_stride, Bottleneck_IBN, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet18(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model", "def res_inabp_101(imagenet_name=False, **kwargs):\n if imagenet_name:\n imagenet_name = 'resnet101'\n else:\n imagenet_name = None\n model = res_INABP(Bottleneck, [3, 4, 23, 3], **kwargs)\n model.load_pretrained_weights(imagenet_name)\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet34(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model", "def __init__(self, out_size=2, freeze=False, pretrained=True, arch='resnet50'):\n\n super().__init__()\n\n if arch == 'resnet50':\n model = torchvision.models.resnet50(pretrained=pretrained)\n self.model_name = 'resnet50'\n elif arch == 'resnet18':\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n elif arch == 'resnet34':\n model = torchvision.models.resnet34(pretrained=pretrained)\n self.model_name = 'resnet34'\n elif arch == 'resnet101':\n model = torchvision.models.resnet101(pretrained=pretrained)\n self.model_name = 'resnet101'\n elif arch == 'resnet152':\n model = torchvision.models.resnet152(pretrained=pretrained)\n self.model_name = 'resnet152'\n elif arch == 'wide_resnet50_2':\n model = torchvision.models.wide_resnet50_2(pretrained=pretrained)\n self.model_name = 'wide_resnet50_2'\n elif arch == 'wide_resnet101_2':\n model = torchvision.models.wide_resnet101_2(pretrained=pretrained)\n self.model_name = 'wide_resnet101_2'\n else:\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n\n if pretrained and freeze:\n for param in model.parameters():\n param.requires_grad = False\n\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, out_size)\n\n self.model = model", "def ResNeXt101(\n include_top=True,\n weights=\"imagenet\",\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n use_mixstyle=False,\n **kwargs\n):\n\n def stack_fn(x):\n x = stack1(x, 64, 3, stride1=1, groups=32, base_width=8, name=\"conv2\")\n if use_mixstyle:\n x = MixStyle(p=0.5, alpha=0.1, name=\"conv2_mixstyle\")(x)\n x = stack1(x, 128, 4, groups=32, base_width=8, name=\"conv3\")\n if use_mixstyle:\n x = MixStyle(p=0.5, alpha=0.1, name=\"conv3_mixstyle\")(x)\n x = stack1(x, 256, 23, groups=32, base_width=8, name=\"conv4\")\n if use_mixstyle:\n x = MixStyle(p=0.5, alpha=0.1, name=\"conv4_mixstyle\")(x)\n return stack1(x, 512, 3, groups=32, base_width=8, name=\"conv5\")\n\n return ResNet(\n stack_fn,\n False,\n \"resnext101\",\n include_top,\n weights,\n input_tensor,\n input_shape,\n pooling,\n False,\n None,\n classes,\n **kwargs\n )", "def _resnet(arch, block, layers, pretrained, progress, **kwargs):\n model = ResNet(block, layers, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)\n model.load_state_dict(state_dict)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model", "def resnet152(pretrained=False):\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet101_rgb(withexpression, pretrained=False, nb_classes=1000, **kwargs):\n\tmodel = ResNet_rgb(Bottleneck, [3, 4, 23, 3], withexpression, nb_classes, **kwargs)\n\tif pretrained:\n\t\tpretrain_dict = model_zoo.load_url(model_urls['resnet101']) # modify pretrain code\n\n\t\tmodel_dict = model.state_dict()\n\t\tpretrained_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}\n\t\tmodel_dict.update(pretrained_dict)\n\t\tmodel.load_state_dict(model_dict)\n\n\treturn model", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet110g8r(**kwargs):\r\n return ResNet(BasicBlock, 110, groups=8, indices=\"random\", **kwargs)", "def ffc_resnet101(pretrained=False, **kwargs):\n model = FFCResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def define_model(input_shape=(32,32,3), depth=110, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def pre_act_oct_resnet26(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [2, 2, 2, 2], **kwargs)\n return model", "def bl_resnet152(pretrained=False, **kwargs):\n model = bL_ResNet([4, 11, 29, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet18(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [2, 2, 2, 2], shortcut_type, num_classes, in_channels)\n return model", "def bl_resnet50(pretrained=False, **kwargs):\n model = bL_ResNet([2, 3, 5, 3], **kwargs)\n # print ('model created')\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet('resnet152', Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnext152(**kwargs):\n model = ResNeXt(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def sphere_resnet110(**kwargs):\n model = ResNet(110, **kwargs)\n return model", "def resnet110m(**kwargs):\r\n return ResNet(BasicBlock, 110, mask=True, **kwargs)", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def model_fn(model_dir):\n \n model = resnet18Basic(num_classes=10)\n net.load_params('%s/model.params' % model_dir, ctx=mx.cpu())\n return net", "def resnet110(**kwargs):\r\n return ResNet(BasicBlock, 110, **kwargs)", "def resnet34(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [3, 4, 6, 3], shortcut_type, num_classes, in_channels)\n return model", "def get_model():\n # Load the pretrained model.\n model = torchvision.models.resnet34(pretrained=True)\n\n # Resize model for our task.\n model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1,\n bias=False)\n model.avgpool = torch.nn.AvgPool2d(2)\n model.fc = torch.nn.Linear(in_features=512, out_features=10, bias=True)\n\n return model", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNN, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n modules = list(resnet.children())[:-2] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n \n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def resnet18_custom(input_channels):\n model = ResNet(input_channels, BasicBlock, [2])\n\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def __init__(self, version='ResNet50', dilation=None, **kwargs):\n super(ResNet, self).__init__(**kwargs)\n params = {'ResNet50': [2, 3, 5, 2],\n 'ResNet101': [2, 3, 22, 2],\n 'ResNet152': [2, 7, 35, 2]}\n self.version = version\n assert version in params\n self.params = params[version]\n\n if dilation is None:\n self.dilation = [1, 1]\n else:\n self.dilation = dilation\n assert len(self.dilation) == 2", "def pre_act_oct_resnet152(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def ResNet18(num_classes=10):\n return ResNet(BasicBlock, \n [2, 2, 2, 2],\n num_classes=num_classes)", "def resnext34(**kwargs):\n model = ResNeXt(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def main(Args):\n norm = [1.9844158727667542, 413.83759806375525,\n 51.2789974336363, 1038.4760551905683]\n input_pull = False\n input_model_mapping = False\n max_number = 2\n count = 40000\n catalog_name = os.path.join(DATA_PATH, 'OneDegSq.fits')\n # Define parameters for mrcnn model with btk here\n resid_model = btk_utils.Resid_btk_model(\n Args.model_name, Args.model_path, MODEL_DIR, training=True,\n images_per_gpu=4, validation_for_training=True)\n # Load parameters for dataset and load model\n resid_model.config.WEIGHT_DECAY = 0.001\n resid_model.config.STEPS_PER_EPOCH = 1000\n resid_model.config.VALIDATION_STEPS = 20\n sampling_function = None\n layers = 'all'\n if Args.model_name == 'model1':\n resid_model.config.BACKBONE = 'resnet41'\n elif Args.model_name == 'model2':\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model3':\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model5':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet35'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4_large':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = '4+' # '3+'\n elif Args.model_name == 'model6':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 51.2789974336363, 1038.4760551905683]\n input_pull = True\n elif Args.model_name == 'model7':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model8': # stretch = 0.1, Q = 3\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model9': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again3': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model12': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n elif Args.model_name == 'model12_again': # stretch = 2000, Q = 0.5 # larger learning rate\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 10 # changed from 6 to 10 for run 4\n elif Args.model_name == 'model12_again2': # stretch = 2000, Q = 0.5 # larger learning rate val set reduced to 10\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n resid_model.config.VALIDATION_STEPS = 10\n else:\n raise AttributeError(\"model not found\", Args.model_name)\n print(\"Train in model:\", Args.model_name)\n resid_model.config.display()\n resid_model.make_resid_model(catalog_name, count=count,\n max_number=max_number, augmentation=True,\n norm_val=norm, input_pull=input_pull,\n sampling_function=sampling_function,\n input_model_mapping=input_model_mapping)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs,\n layers=layers)\n name = Args.model_name + '_run2'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs + 10)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs+10,\n layers=layers)\n name = Args.model_name + '_run3'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)", "def pre_act_oct_resnet200(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "def pre_act_oct_resnet50(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def load_resnet_image_encoder(model_stage=2):\n \n print('Load pretrained ResNet 101.')\n model = resnet101(pretrained=True)\n layers = [model.conv1, model.bn1, model.relu, model.maxpool]\n layers += [getattr(model, 'layer{}'.format(i+1)) for i in range(model_stage)]\n model = torch.nn.Sequential(*layers)\n if torch.cuda.is_available():\n model.cuda()\n\n for p in model.parameters():\n p.requires_grad = False\n return model.eval()", "def get_model(args, num_classes):\n data_size = 224\n image = nn.Variable([1, 3, data_size, data_size])\n pimage = image_preprocess(image)\n pred, hidden = model_resnet.resnet_imagenet(\n pimage, num_classes, args.num_layers, args.shortcut_type, test=True, tiny=False)\n Model = namedtuple('Model', ['image', 'pred', 'hidden'])\n return Model(image, pred, hidden)", "def __init__(self,\n image_channels,\n num_classes):\n super().__init__()\n\n self.model = torchvision.models.resnet18(pretrained=True)\n self.model.fully_connected = nn.Linear(224, 10)", "def resnext101(pretrained=False, progress=True, **kwargs):\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 8\n return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)", "def resnext50(**kwargs):\n model = ResNeXt(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n # model_dict = model.state_dict()\n\n if pretrained:\n # pretrained_dict=model_zoo.load_url(model_urls['resnet34'],model_dir='/home/FENGsl/JBHI/Pretrain_model')\n # pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n # model_dict.update(pretrained_dict)\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='/home/FENGsl/JBHI/Pretrain_model'))\n print('===> Pretrain Model Have Been Loaded, Please fasten your seat belt and get ready to take off!')\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def model_creator(config):\n return nn.Linear(1, 1)", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def raw_model():\n model = cobra.Model(id_or_model=\"raw_model\", name=\"raw_model\")\n rxn_1 = cobra.Reaction(\"BIOMASS_TEST\")\n rxn_2 = cobra.Reaction(\"RXN2\")\n rxn_3 = cobra.Reaction(\"RXN3\")\n rxn_4 = cobra.Reaction(\"RXN4\")\n model.add_reactions([rxn_1, rxn_2, rxn_3, rxn_4])\n model.objective = rxn_3\n return model", "def resnet110g8(**kwargs):\r\n return ResNet(BasicBlock, 110, groups=8, **kwargs)" ]
[ "0.6975478", "0.6963279", "0.6944051", "0.6944051", "0.6944051", "0.6944051", "0.69128805", "0.68887615", "0.6869963", "0.6868124", "0.68124294", "0.6709491", "0.6664259", "0.6648746", "0.66283625", "0.66283625", "0.65992206", "0.65794027", "0.6534455", "0.6504741", "0.64674777", "0.64346147", "0.6409692", "0.6354781", "0.6312421", "0.6267552", "0.62591344", "0.61776143", "0.61734843", "0.61692923", "0.6159398", "0.6144146", "0.6140972", "0.61295915", "0.612663", "0.6084366", "0.6067245", "0.6058819", "0.6058819", "0.6058819", "0.6058819", "0.6058819", "0.60569334", "0.60083485", "0.59974873", "0.5995822", "0.59831023", "0.5980861", "0.59682393", "0.59669185", "0.5962634", "0.59397805", "0.59382683", "0.59382683", "0.59382683", "0.5928619", "0.5928619", "0.5928619", "0.5928619", "0.5928272", "0.59229904", "0.5902152", "0.5882836", "0.5882176", "0.587117", "0.58707863", "0.5870635", "0.58700496", "0.58655334", "0.5864335", "0.58629894", "0.585687", "0.58526593", "0.5842876", "0.5835667", "0.58161503", "0.5815491", "0.5812326", "0.5810818", "0.5797983", "0.5797185", "0.57840586", "0.57819223", "0.57816494", "0.5770341", "0.5763776", "0.57596624", "0.5759135", "0.5758977", "0.57537067", "0.57494986", "0.5729797", "0.5728888", "0.5728885", "0.57264173", "0.57249796", "0.57249796", "0.57249796", "0.57169217", "0.5706946" ]
0.6689366
12
Constructs a Octave ResNet152 model.
def pre_act_oct_resnet152(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [3, 8, 36, 3], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')", "def resnet152(pretrained=False, **kwargs):\n model = ResNet('resnet152', Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def oct_resnet152(**kwargs):\n return _oct_resnet(Bottleneck, [3, 8, 36, 3], **kwargs)", "def resnet152(**kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnext152(**kwargs):\n model = ResNeXt(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def resnet152(scale=1, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], scale=scale, **kwargs)\n return model", "def resnet152(pretrained=False):\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet():\n return models.resnet152(pretrained=True)", "def resnet152(num_classes, pretrained=False, **kwargs):\n model = RetinaNet(num_classes, Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False)\n return model", "def resnet152(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False)\n return model", "def resnet152(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False)\n return model", "def bl_resnet152(pretrained=False, **kwargs):\n model = bL_ResNet([4, 11, 29, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(shortcut_type, num_classes, in_channels):\n model = ResNet(Bottleneck, [3, 8, 36, 3], shortcut_type, num_classes, in_channels)\n return model", "def __init__(self, x, num_classes=15, is_training=False):\n\n super(resnet_v2_152, self).__init__()\n\n self.x = x\n self.num_classes = num_classes\n\n # populating the tensorflow graph\n with slim.arg_scope(arg_scopes_map['resnet_v2_152']()):\n net, end_points = networks_map['resnet_v2_152'](\n x, num_classes=num_classes,\n is_training=is_training, reuse=None)\n\n self.end_points = _get_updated_endpoints(end_points, 'resnet_v2_152/logits')\n self.variables_to_restore = slim.get_variables_to_restore(exclude=[])", "def res_inabp_152(imagenet_name=False, **kwargs):\n if imagenet_name:\n imagenet_name = 'resnet152'\n else:\n imagenet_name = None\n model = res_INABP(Bottleneck, [3, 8, 36, 3], **kwargs)\n model.load_pretrained_weights(imagenet_name)\n return model", "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def ffc_resnet152(pretrained=False, **kwargs):\n model = FFCResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def eca_resnet152(pretrained=False, k_size=(3, 3, 3, 3), **kwargs):\n model = EcaResnet(Bottleneck, [3, 8, 36, 3], k_size=k_size, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['eca_resnet152']), strict=False)\n return model", "def resnet152_earlyexit(**kwargs):\n model = ResNetEarlyExit(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def sg_resnet_152(x, opt):\n opt += tf.sg_opt(num_class=1000, conv_only=False, squeeze=True)\n\n # convolution layers ( residual net v2 arch )\n conv = (x\n .sg_conv(dim=64, size=7, stride=2)\n .sg_pool(size=3, stride=2, pad='SAME')\n .sg_resnet_layer(dim=64, num=3, stride=1)\n .sg_resnet_layer(dim=128, num=8, stride=2)\n .sg_resnet_layer(dim=256, num=36, stride=2)\n .sg_resnet_layer(dim=512, num=3, stride=2)\n .sg_bypass(act='relu', bn=True)\n .sg_pool(size=7, stride=1, avg=True)) # global average pool\n\n # fully convolution layers\n fc = (conv\n .sg_conv(dim=opt.num_class, size=1, act='linear', bn=False))\n\n if opt.conv_only:\n return conv\n else:\n if opt.squeeze:\n return fc.sg_squeeze(dim=(1, 2))\n else:\n return fc", "def __init__(self, version='ResNet50', dilation=None, **kwargs):\n super(ResNet, self).__init__(**kwargs)\n params = {'ResNet50': [2, 3, 5, 2],\n 'ResNet101': [2, 3, 22, 2],\n 'ResNet152': [2, 7, 35, 2]}\n self.version = version\n assert version in params\n self.params = params[version]\n\n if dilation is None:\n self.dilation = [1, 1]\n else:\n self.dilation = dilation\n assert len(self.dilation) == 2", "def ResNet18(num_classes=10):\n return ResNet(BasicBlock, \n [2, 2, 2, 2],\n num_classes=num_classes)", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model", "def build_resnet152(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 8):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b7_feats = temp\n \n res4a_feats = self.basic_block(res3b7_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 36):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b35_feats = temp\n\n res5a_feats = self.basic_block(res4b35_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def genresnet152(**kwargs):\n return EresNet(resnetblocks.EresNetBottleneck, [3, 8, 36, 3], **kwargs)", "def oct_resnet101(**kwargs):\n return _oct_resnet(Bottleneck, [3, 4, 23, 3], **kwargs)", "def oct_resnet50(**kwargs):\n return _oct_resnet(Bottleneck, [3, 4, 6, 3], **kwargs)", "def resnet10(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [1, 1, 1, 1], shortcut_type, num_classes, in_channels)\n return model", "def resnet200(**kwargs):\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "def Non_Local_152(last_stride, pretrained=False, **kwargs):\n model = ResNet_IBN(last_stride, Bottleneck_IBN, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet110g8r(**kwargs):\r\n return ResNet(BasicBlock, 110, groups=8, indices=\"random\", **kwargs)", "def __init__(self, model, input_shape, batch_size, ae_flag, **kwargs):\n super(ReconstructionLayer, self).__init__(**kwargs)\n self.rec_model = model\n self.rec_obj = Reconstructor(model)\n self.version = 'v2' if isinstance(model, InvertorDefenseGAN) else 'v1'\n\n self.input_shape = input_shape\n self.batch_size = batch_size\n self.name = 'reconstruction'\n self.ae_flag = ae_flag", "def ResNet20(inputShape):\n inputs = Input(shape=inputShape)\n x = resLayer(inputs) # resLayer1\n\n # resBlocks\n for nStage in range(3):\n for nBlock in range(3):\n x = resBlock(x, nStage, nBlock)\n\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(10, activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Generate model\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def resnet101(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNN, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n modules = list(resnet.children())[:-2] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n \n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def define_model(input_shape=(32,32,3), depth=110, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def resnet18(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [2, 2, 2, 2], shortcut_type, num_classes, in_channels)\n return model", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNNFeatures, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n\n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def resnet110m(**kwargs):\r\n return ResNet(BasicBlock, 110, mask=True, **kwargs)", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet110g8(**kwargs):\r\n return ResNet(BasicBlock, 110, groups=8, **kwargs)", "def resnet34(pretrained=False, mode='rgb', **kwargs):\n if mode == 'flow':\n model = ResNet(BasicBlock, [3, 4, 6, 3], inp=20, **kwargs)\n else:\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def ResNeXt(**kwargs):\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def ResNet34(num_classes=10):\n return ResNet(BasicBlock, \n [3,4,6,3], \n num_classes=num_classes)", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def L14_Net112(mode=\"train\"):\n data = mx.symbol.Variable(name=\"data\")\n landmark_target = mx.symbol.Variable(name=\"landmark_target\")\n landmark_vis = mx.symbol.Variable(name=\"landmark_vis\")\n \n # data = 112X112\n # conv1 = 56X56\n conv1 = Conv(data, num_filter=res_base_dim, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=\"conv1\")\n conv2 = Residual(conv1, num_block=1, num_out= res_base_dim, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim, name=\"res2\")\n \n\t#conv23 = 28X28\n conv23 = DResidual(conv2, num_out=res_base_dim*2, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*2, name=\"dconv23\")\n conv3 = Residual(conv23, num_block=2, num_out=res_base_dim*2, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*2, name=\"res3\")\n \n\t#conv34 = 14X14\n conv34 = DResidual(conv3, num_out=res_base_dim*4, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*4, name=\"dconv34\")\n conv4 = Residual(conv34, num_block=3, num_out=res_base_dim*4, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*4, name=\"res4\")\n \n\t#conv45 = 7X7\n conv45 = DResidual(conv4, num_out=res_base_dim*8, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*8, name=\"dconv45\")\n conv5 = Residual(conv45, num_block=2, num_out=res_base_dim*8, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*8, name=\"res5\")\n \n\t# conv6 = 1x1\n conv6 = Conv(conv5, num_filter=res_base_dim*8, kernel=(7, 7), pad=(0, 0), stride=(1, 1), name=\"conv6\")\n fc1 = Conv(conv6, num_filter=res_base_dim*16, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name=\"fc1\")\n fc2 = Conv(fc1, num_filter=res_base_dim*32, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name=\"fc2\")\t\n conv6_3 = mx.symbol.FullyConnected(data=fc2, num_hidden=42, name=\"conv6_3\")\t\n bn6_3 = mx.sym.BatchNorm(data=conv6_3, name='bn6_3', fix_gamma=False,momentum=0.9)\n\t\n if mode == \"test\":\n landmark_pred = bn6_3\n group = mx.symbol.Group([landmark_pred])\n else:\n \n out = mx.symbol.Custom(landmark_vis = landmark_vis, landmark_pred=bn6_3, landmark_target=landmark_target, \n op_type='negativemining_hand21', name=\"negative_mining\")\n group = mx.symbol.Group([out])\n \n return group", "def pre_act_oct_resnet26(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [2, 2, 2, 2], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model", "def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,\n resnet_version=resnet_model.DEFAULT_VERSION,\n dtype=resnet_model.DEFAULT_DTYPE):\n if resnet_size % 6 != 2:\n raise ValueError('resnet_size must be 6n + 2:', resnet_size)\n\n num_blocks = (resnet_size - 2) // 6\n\n super(Model, self).__init__(\n resnet_size=resnet_size,\n bottleneck=False,\n num_classes=num_classes,\n num_filters=16,\n kernel_size=3,\n conv_stride=1,\n first_pool_size=None,\n first_pool_stride=None,\n block_sizes=[num_blocks] * 3,\n block_strides=[1, 2, 2],\n final_size=64,\n resnet_version=resnet_version,\n data_format=data_format,\n dtype=dtype\n )", "def resnet18_custom(input_channels):\n model = ResNet(input_channels, BasicBlock, [2])\n\n return model", "def pre_act_oct_resnet200(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet34(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [3, 4, 6, 3], shortcut_type, num_classes, in_channels)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def _model_definition(self, net):\n \n # Input filtering and downsampling with max pooling\n print(net.shape) #channels must be specified first otherwise keras assumes channels last\n print('resnet17_scp')\n \n net = Conv2D( filters=128, kernel_size=5, activation=None, padding='same', \n data_format=\"channels_first\", input_shape=(1, 100, 100))(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3, activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3,activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels \n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n\n\n \n return net", "def resnet110(**kwargs):\r\n return ResNet(BasicBlock, 110, **kwargs)", "def disresnet152(**kwargs):\n return Discriminator(resnetblocks.DresNetBottleneck, [3, 8, 36, 3], **kwargs)", "def __init__(self):\n # TODO\n self.confThreshold = 0.6\n self.nmsThreshold = 0.5\n self.inpWidth = 320\n self.inpHeight = 320\n classesFile = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/coco.names\"\n self.classes = None\n with open(classesFile,'rt') as f:\n self.classes = f.read().rstrip('\\n').split('\\n')\n\n modelConfiguration = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.cfg\"\n modelWeights = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.weights\"\n self.net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)", "def __init__(self, size: torch.Size, root: str = None, limit_var=np.infty, logger: Logger = None):\n assert len(size) == 4 and size[2] == size[3]\n assert size[1] in [1, 3]\n root = pt.join(root, 'imagenet22k') if not root.endswith('imagenet') else pt.join(root, '..', 'imagenet22k')\n root = pt.join(root, 'fall11_whole_extracted') # important to have a second layer, to speed up load meta file\n self.root = root\n self.logger = logger\n with logger.timeit('Loading ImageNet22k'):\n super().__init__(root=root, size=size, logger=logger)\n\n self.transform = transforms.Compose([\n transforms.Resize(size[2]),\n transforms.ToTensor()\n ])\n self.picks = None\n if limit_var is not None and limit_var < len(self):\n self.picks = np.random.choice(len(self.samples), size=limit_var, replace=False)\n if limit_var is not None and limit_var > len(self):\n self.logprint(\n 'OEImageNet22 shall be limited to {} samples, but ImageNet22k contains only {} samples, thus using all.'\n .format(limit_var, len(self)), fps=False\n )\n if len(self) < size[0]:\n raise NotImplementedError()", "def pre_act_oct_resnet50(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def ResNet18(input_shape = (28, 28, 1), classes = 24):\n \n # Define the input as a tensor with shape input_shape\n X = X_input = Input(input_shape)\n\n \n # Zero-Padding\n X = ZeroPadding2D((3, 3))(X_input)\n \n # Stage 1\n X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)\n X = Activation('relu')(X)\n #X = MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = convolutional_block(X, [64, 64], stage=2, block='a')\n X = identity_block(X, [64, 64], stage=2, block='b')\n\n # Stage 3\n X = convolutional_block(X, [128, 128], stage=3, block='a')\n X = identity_block(X, [128, 128], stage=3, block='b')\n\n # Stage 4\n X = convolutional_block(X, [256, 256], stage=4, block='a')\n X = identity_block(X, [256, 256], stage=4, block='b')\n\n # Stage 5\n X = convolutional_block(X, [512, 512], stage=5, block='a')\n X = identity_block(X, [512, 512], stage=5, block='b')\n\n # AVGPOOL\n # X = AveragePooling2D(pool_size=(2,2), name='avg_pool')(X)\n\n # output layer\n X = Flatten()(X)\n X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)\n \n # Create model\n model = Model(inputs = X_input, outputs = X, name='ResNet18')\n\n return model", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def pre_act_oct_resnet101(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def make_res_layer(self, **kwargs):\n return ViPNAS_ResLayer(**kwargs)", "def build_resnet(self):\r\n\r\n # INPUTS\r\n inputs_data = Input((self.data_rows, self.data_cols, 1),name='inputs_data')\r\n\r\n\r\n def residual_block(input, output_channels=64, kernel_size=(3, 3), stride=(1, 1)):\r\n x = Conv2D(output_channels, kernel_size, padding='same', strides=stride)(input)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(output_channels, kernel_size, padding='same', strides=stride)(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Add()([x, input])\r\n\r\n residual_block.counter += 1\r\n return x\r\n\r\n residual_block.counter = 0\r\n\r\n conv1=Conv2D(64,(3,3),strides=(1,1),padding='same',activation='relu')(inputs_data)\r\n res_block1=residual_block(conv1,output_channels=64)\r\n res_block2 =residual_block(res_block1, output_channels=64)\r\n res_block3 =residual_block(res_block2, output_channels=64)\r\n conv2=Conv2D(1,(3,3),strides=(1,1),padding='same')(res_block3)\r\n outputs=Add()([conv2,inputs_data])\r\n\r\n\r\n model = Model(inputs=inputs_data, outputs=outputs)\r\n\r\n\r\n return model", "def dilated_resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model", "def pyconvhgresnet152(pretrained=False, **kwargs):\n model = PyConvHGResNet(PyConvBlock, [3, 8, 36, 3], **kwargs)\n if pretrained:\n os.makedirs(default_cache_path, exist_ok=True)\n model.load_state_dict(\n torch.load(\n download_from_url(model_urls['pyconvhgresnet152'],\n root=default_cache_path)))\n return model", "def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet110g4(**kwargs):\r\n return ResNet(BasicBlock, 110, groups=4, **kwargs)", "def resnet110g4r(**kwargs):\r\n return ResNet(BasicBlock, 110, groups=4, indices=\"random\", **kwargs)", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.pooling = nn.MaxPool2d(2,stride = 2)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()", "def build(self, input_image, num_class):\n x = build_resnet(101)\n # add classifier\n x = Conv2D(num_class, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)", "def __init__(self):\n self.model = Sequential()\n self.model.add(AveragePooling2D(pool_size=(4, 4), input_shape=(224, 224, 3)))\n self.model.add(Conv2D(16, (9, 9)))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Conv2D(16, (5, 5)))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Flatten())\n self.model.add(Dropout(0.5))\n self.model.add(Dense(1, activation='sigmoid'))\n self.model.compile(loss=binary_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])", "def ResNet50_model(input_shape, pooling):\n from keras.applications.resnet import ResNet50\n return ResNet50(include_top=False, weights='imagenet', input_shape=input_shape, pooling=pooling)", "def resnet18_origin(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnext101(**kwargs):\n model = ResNeXt(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model" ]
[ "0.71429336", "0.71426964", "0.70883363", "0.7048324", "0.70366234", "0.70190185", "0.6981999", "0.6981999", "0.6981999", "0.6981999", "0.6969816", "0.6943194", "0.6817958", "0.67309207", "0.6713831", "0.6711944", "0.6711944", "0.6689598", "0.6594432", "0.65649575", "0.6242708", "0.6179486", "0.6167743", "0.6154081", "0.61370635", "0.6110053", "0.59808666", "0.59747535", "0.5944446", "0.5897309", "0.58223933", "0.5820205", "0.58115333", "0.5799474", "0.57376105", "0.5729744", "0.5724398", "0.5707016", "0.5695803", "0.56556314", "0.5650045", "0.5628494", "0.5612747", "0.5611346", "0.5597012", "0.55956095", "0.5572138", "0.5561195", "0.55585974", "0.5540749", "0.5536298", "0.5536298", "0.5536298", "0.5536298", "0.5536298", "0.55362445", "0.55341345", "0.55086875", "0.5503644", "0.54795873", "0.54794675", "0.5473152", "0.5462912", "0.5462551", "0.5451858", "0.54305035", "0.54116464", "0.5400486", "0.5393166", "0.5389609", "0.53876674", "0.53831667", "0.5350598", "0.5350542", "0.53373784", "0.533578", "0.5320323", "0.53200257", "0.53200257", "0.53188616", "0.52836496", "0.5281931", "0.5270317", "0.52676946", "0.5260236", "0.5258697", "0.52574396", "0.525332", "0.52512044", "0.52512044", "0.52512044", "0.52346873", "0.5232938", "0.5230417", "0.5228914", "0.5228668", "0.52284366", "0.52284366", "0.52284366", "0.52121544" ]
0.6930058
12
Constructs a Octave ResNet200 model.
def pre_act_oct_resnet200(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [3, 24, 36, 3], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet200(**kwargs):\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet():\n return models.resnet152(pretrained=True)", "def ResNet18(num_classes=10):\n return ResNet(BasicBlock, \n [2, 2, 2, 2],\n num_classes=num_classes)", "def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')", "def pre_act_oct_resnet50(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def pre_act_oct_resnet101(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def ResNeXt(**kwargs):\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def pre_act_oct_resnet152(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def pre_act_oct_resnet26(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [2, 2, 2, 2], **kwargs)\n return model", "def oct_resnet101(**kwargs):\n return _oct_resnet(Bottleneck, [3, 4, 23, 3], **kwargs)", "def oct_resnet50(**kwargs):\n return _oct_resnet(Bottleneck, [3, 4, 6, 3], **kwargs)", "def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet10(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [1, 1, 1, 1], shortcut_type, num_classes, in_channels)\n return model", "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def resnet101(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def __init__(self, version='ResNet50', dilation=None, **kwargs):\n super(ResNet, self).__init__(**kwargs)\n params = {'ResNet50': [2, 3, 5, 2],\n 'ResNet101': [2, 3, 22, 2],\n 'ResNet152': [2, 7, 35, 2]}\n self.version = version\n assert version in params\n self.params = params[version]\n\n if dilation is None:\n self.dilation = [1, 1]\n else:\n self.dilation = dilation\n assert len(self.dilation) == 2", "def ResNet20(inputShape):\n inputs = Input(shape=inputShape)\n x = resLayer(inputs) # resLayer1\n\n # resBlocks\n for nStage in range(3):\n for nBlock in range(3):\n x = resBlock(x, nStage, nBlock)\n\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(10, activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Generate model\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def _resnet(arch, block, layers, pretrained, progress, **kwargs):\n model = ResNet(block, layers, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)\n model.load_state_dict(state_dict)\n return model", "def resnet200(shortcut_type, num_classes, in_channels):\n model = ResNet(Bottleneck, [3, 24, 36, 3], shortcut_type, num_classes, in_channels)\n return model", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def resnext101(**kwargs):\n model = ResNeXt(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def resnet110(**kwargs):\r\n return ResNet(BasicBlock, 110, **kwargs)", "def resnet18(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model", "def oct_resnet152(**kwargs):\n return _oct_resnet(Bottleneck, [3, 8, 36, 3], **kwargs)", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def resnext152(**kwargs):\n model = ResNeXt(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def ResNet34(num_classes=10):\n return ResNet(BasicBlock, \n [3,4,6,3], \n num_classes=num_classes)", "def create_network():\n net = ln.models.TinyYolo(CLASSES, CONF_THRESH, NMS_THRESH)\n\n net.load(args.weight)\n net.eval()\n net.postprocess.append(ln.data.transform.TensorToBrambox(NETWORK_SIZE, LABELS))\n net = net.to(device)\n return net", "def resnext50(**kwargs):\n model = ResNeXt(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def dilated_resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def resnet152(**kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet152(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def __init__(self, out_size=2, freeze=False, pretrained=True, arch='resnet50'):\n\n super().__init__()\n\n if arch == 'resnet50':\n model = torchvision.models.resnet50(pretrained=pretrained)\n self.model_name = 'resnet50'\n elif arch == 'resnet18':\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n elif arch == 'resnet34':\n model = torchvision.models.resnet34(pretrained=pretrained)\n self.model_name = 'resnet34'\n elif arch == 'resnet101':\n model = torchvision.models.resnet101(pretrained=pretrained)\n self.model_name = 'resnet101'\n elif arch == 'resnet152':\n model = torchvision.models.resnet152(pretrained=pretrained)\n self.model_name = 'resnet152'\n elif arch == 'wide_resnet50_2':\n model = torchvision.models.wide_resnet50_2(pretrained=pretrained)\n self.model_name = 'wide_resnet50_2'\n elif arch == 'wide_resnet101_2':\n model = torchvision.models.wide_resnet101_2(pretrained=pretrained)\n self.model_name = 'wide_resnet101_2'\n else:\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n\n if pretrained and freeze:\n for param in model.parameters():\n param.requires_grad = False\n\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, out_size)\n\n self.model = model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n\n return model", "def resnet101(num_classes, pretrained=False, **kwargs):\n model = RetinaNet(num_classes, Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir='.'), strict=False)\n return model", "def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnext34(**kwargs):\n model = ResNeXt(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnet152(pretrained=False):\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def define_model(input_shape=(32,32,3), depth=110, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def make_res_layer(self, **kwargs):\n return ViPNAS_ResLayer(**kwargs)", "def init_resnet(num_classes: int) -> nn.Module:\n model = models.resnet50(pretrained=True)\n num_features = model.fc.in_features\n model.fc = nn.Linear(num_features, num_classes)\n\n return model", "def ffc_resnet200(pretrained=False, **kwargs):\n model = FFCResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet18']))\n return model", "def make_NN(n_hidden, n_epoch, labelsdict, lr, device, model_name, trainloader, validloader, train_data, pretrain, finetune_whole, custom_model):\n if custom_model == 2:\n # Use custom two-layer convolution model\n print(\"Using Two-Layer CNN\")\n model = TwoLayerConvNet()\n elif custom_model == 5:\n print(\"Using Five-Layer CNN\")\n # Use custom five-layer convolution model\n model = FiveLayerConvNet()\n else:\n # Import NN model (either pretrained or not)\n model = getattr(models, model_name)(pretrained=pretrain)\n \"\"\" ===================================================================================== \"\"\"\"\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER FREEZE THE PARAMETERS OR NOT (WILBERT ARISTO) \"\"\"\n # If we do not need to finetune whole model, freeze parameters that we don't need to re-train\n if not finetune_whole:\n for param in model.parameters():\n param.requires_grad = False\n \"\"\" ===================================================================================== \"\"\"\"\n\n n_out = len(labelsdict)\n\n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Make classifier\n n_in = next(model.fc.modules()).in_features\n model.fc = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.fc.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n else:\n # Make classifier\n n_in = next(model.classifier.modules()).in_features\n model.classifier = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.classifier.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n # Define criterion\n criterion = nn.NLLLoss() \n\n model.to(device)\n start = time.time()\n\n epochs = n_epoch\n steps = 0 \n running_loss = 0\n print_every = 40\n for e in range(epochs):\n model.train()\n for images, labels in trainloader:\n images, labels = images.to(device), labels.to(device)\n\n steps += 1\n\n optimizer.zero_grad()\n\n output = model.forward(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n # Eval mode for predictions\n model.eval()\n\n # Turn off gradients for validation\n with torch.no_grad():\n test_loss, accuracy = validation(model, validloader, criterion, device)\n\n print(\"Epoch: {}/{} - \".format(e+1, epochs),\n \"Training Loss: {:.3f} - \".format(running_loss/print_every),\n \"Validation Loss: {:.3f} - \".format(test_loss/len(validloader)),\n \"Validation Accuracy: {:.3f}\".format(accuracy/len(validloader)))\n\n running_loss = 0\n\n # Make sure training is back on\n model.train()\n \n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Add model info \n model.fc.n_in = n_in\n model.fc.n_hidden = n_hidden\n model.fc.n_out = n_out\n model.fc.labelsdict = labelsdict\n model.fc.lr = lr\n model.fc.optimizer_state_dict = optimizer.state_dict\n model.fc.model_name = model_name\n model.fc.class_to_idx = train_data.class_to_idx\n else:\n # Add model info \n model.classifier.n_in = n_in\n model.classifier.n_hidden = n_hidden\n model.classifier.n_out = n_out\n model.classifier.labelsdict = labelsdict\n model.classifier.lr = lr\n model.classifier.optimizer_state_dict = optimizer.state_dict\n model.classifier.model_name = model_name\n model.classifier.class_to_idx = train_data.class_to_idx\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n print('model:', model_name, '- hidden layers:', n_hidden, '- epochs:', n_epoch, '- lr:', lr)\n print(f\"Run time: {(time.time() - start)/60:.3f} min\")\n return model\n\n# Define function to save checkpoint\ndef save_checkpoint(model, path):\n checkpoint = {'c_input': model.classifier.n_in,\n 'c_hidden': model.classifier.n_hidden,\n 'c_out': model.classifier.n_out,\n 'labelsdict': model.classifier.labelsdict,\n 'c_lr': model.classifier.lr,\n 'state_dict': model.state_dict(),\n 'c_state_dict': model.classifier.state_dict(),\n 'opti_state_dict': model.classifier.optimizer_state_dict,\n 'model_name': model.classifier.model_name,\n 'class_to_idx': model.classifier.class_to_idx\n }\n torch.save(checkpoint, path)\n \n# Define function to load model\ndef load_model(path):\n cp = torch.load(path)\n \n # Import pre-trained NN model \n model = getattr(models, cp['model_name'])(pretrained=True)\n \n # Freeze parameters that we don't need to re-train \n for param in model.parameters():\n param.requires_grad = False\n \n # Make classifier\n model.classifier = NN_Classifier(input_size=cp['c_input'], output_size=cp['c_out'], \\\n hidden_layers=cp['c_hidden'])\n \n # Add model info \n model.classifier.n_in = cp['c_input']\n model.classifier.n_hidden = cp['c_hidden']\n model.classifier.n_out = cp['c_out']\n model.classifier.labelsdict = cp['labelsdict']\n model.classifier.lr = cp['c_lr']\n model.classifier.optimizer_state_dict = cp['opti_state_dict']\n model.classifier.model_name = cp['model_name']\n model.classifier.class_to_idx = cp['class_to_idx']\n model.load_state_dict(cp['state_dict'])\n \n return model", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnext50(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnext50_32x4d']))\n return model", "def bl_resnet50(pretrained=False, **kwargs):\n model = bL_ResNet([2, 3, 5, 3], **kwargs)\n # print ('model created')\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def get_model(args, num_classes):\n data_size = 224\n image = nn.Variable([1, 3, data_size, data_size])\n pimage = image_preprocess(image)\n pred, hidden = model_resnet.resnet_imagenet(\n pimage, num_classes, args.num_layers, args.shortcut_type, test=True, tiny=False)\n Model = namedtuple('Model', ['image', 'pred', 'hidden'])\n return Model(image, pred, hidden)", "def resnet152(pretrained=False, **kwargs):\n model = ResNet('resnet152', Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "def resnet18(num_classes, pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=1000)\n if pretrained:\n logger.info('Resnet18: Loading pretrained')\n _model_loaded = try_load_model('resnet18')\n model.load_state_dict(_model_loaded)\n if num_classes != 1000:\n model.reinit_fc(num_classes)\n\n layers = [model.fc, model.layer4, model.layer3]\n\n return model, layers", "def resnet110m(**kwargs):\r\n return ResNet(BasicBlock, 110, mask=True, **kwargs)", "def resnet110g8t(**kwargs):\r\n return ResNet(BasicBlock, 110, groups=8, indices=\"trans\", **kwargs)", "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def ffc_resnet18(pretrained=False, **kwargs):\n model = FFCResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def sphere_resnet20(**kwargs):\n model = ResNet(20, **kwargs)\n return model", "def build_resnet(self):\r\n\r\n # INPUTS\r\n inputs_data = Input((self.data_rows, self.data_cols, 1),name='inputs_data')\r\n\r\n\r\n def residual_block(input, output_channels=64, kernel_size=(3, 3), stride=(1, 1)):\r\n x = Conv2D(output_channels, kernel_size, padding='same', strides=stride)(input)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(output_channels, kernel_size, padding='same', strides=stride)(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Add()([x, input])\r\n\r\n residual_block.counter += 1\r\n return x\r\n\r\n residual_block.counter = 0\r\n\r\n conv1=Conv2D(64,(3,3),strides=(1,1),padding='same',activation='relu')(inputs_data)\r\n res_block1=residual_block(conv1,output_channels=64)\r\n res_block2 =residual_block(res_block1, output_channels=64)\r\n res_block3 =residual_block(res_block2, output_channels=64)\r\n conv2=Conv2D(1,(3,3),strides=(1,1),padding='same')(res_block3)\r\n outputs=Add()([conv2,inputs_data])\r\n\r\n\r\n model = Model(inputs=inputs_data, outputs=outputs)\r\n\r\n\r\n return model", "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def resnet18(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [2, 2, 2, 2], shortcut_type, num_classes, in_channels)\n return model", "def construct_resnet_nostage(chkpt_path):\n # type: (str) -> (resnet_nostage.ResnetNoStage, resnet_nostage.ResnetNoStageConfig)\n # The state dict of the network\n state_dict = torch.load(chkpt_path)\n n_keypoint = state_dict['head_net.features.9.weight'].shape[0] // 2\n assert n_keypoint * 2 == state_dict['head_net.features.9.weight'].shape[0]\n\n # Construct the network\n net_config = resnet_nostage.ResnetNoStageConfig()\n net_config.num_keypoints = n_keypoint\n net_config.image_channels = 4\n net_config.depth_per_keypoint = 2\n net_config.num_layers = 34\n network = resnet_nostage.ResnetNoStage(net_config)\n\n # Load the network\n network.load_state_dict(state_dict)\n network.cuda()\n network.eval()\n return network, net_config", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def sphere_resnet110(**kwargs):\n model = ResNet(110, **kwargs)\n return model", "def resnet110g8r(**kwargs):\r\n return ResNet(BasicBlock, 110, groups=8, indices=\"random\", **kwargs)", "def resnet110g8(**kwargs):\r\n return ResNet(BasicBlock, 110, groups=8, **kwargs)", "def resnet152(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False)\n return model", "def resnet152(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False)\n return model", "def create_model(self): # noqa: D103\n # reference for creation of the model https://yilundu.github.io/2016/12/24/Deep-Q-Learning-on-Space-Invaders.html\n model=Sequential()\n model.add(Flatten( input_shape=(84,84,4)))\n model.add(Dense(self.num_actions)) \n\n return model", "def resnet101(pretrained=False):\n model = ResNet(Bottleneck, [3, 4, 23, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model" ]
[ "0.6754732", "0.6618465", "0.65908283", "0.63108593", "0.63079596", "0.62873125", "0.62037766", "0.61919415", "0.61760974", "0.61187875", "0.6101522", "0.60922974", "0.6071278", "0.6071278", "0.6071278", "0.6071278", "0.6071278", "0.60647464", "0.6059056", "0.6056413", "0.60291654", "0.60282534", "0.6025971", "0.6019859", "0.60081387", "0.599906", "0.5998235", "0.5995848", "0.59931815", "0.5992309", "0.5933626", "0.5910385", "0.5876448", "0.5843026", "0.58258307", "0.5819682", "0.57926095", "0.5792251", "0.57770777", "0.5765682", "0.57505566", "0.57467663", "0.5733243", "0.57325214", "0.57324976", "0.57200056", "0.5715488", "0.57080984", "0.57080984", "0.57080984", "0.57053447", "0.57017946", "0.569621", "0.5682372", "0.5682372", "0.5682372", "0.5682372", "0.5681259", "0.56811", "0.56752557", "0.5674492", "0.567103", "0.5670809", "0.5665506", "0.566399", "0.56599563", "0.5658989", "0.56572473", "0.5652663", "0.5637883", "0.56279325", "0.56279325", "0.5622966", "0.56212735", "0.56178886", "0.5614355", "0.56086046", "0.56071085", "0.5606451", "0.56057006", "0.56051135", "0.5603968", "0.5603968", "0.5603968", "0.5600332", "0.5600054", "0.55991614", "0.55865306", "0.5573619", "0.5573407", "0.5573407", "0.5573407", "0.5573407", "0.5567747", "0.55564183", "0.5554041", "0.55437267", "0.55437267", "0.55436045", "0.5543218" ]
0.66531056
1
The standard size of a tile sprite in 2D screen space.
def tile_size_2d(self): return 32.0, 32.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cellsize_2d(self):\t\r\n return self.dx * self.dy", "def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()", "def get_pixel_size(self):\n p0 = core.PointF(0, 0)\n p1 = core.PointF(1, 1)\n tr = self.transform().inverted()[0]\n p01 = tr.map(p0)\n p11 = tr.map(p1)\n return core.PointF(p11 - p01)", "def getSize(self):\n return self.screen.get_size()", "def getSize(self):\n return self.__width * self.__height;", "def tileWidth(self):\n return self._tileWidth", "def pix_size(self):\n return self._pix_size", "def calc_image_size(spr):\n return int(max(spr.label_safe_width(), 1)), \\\n int(max(spr.label_safe_height(), 1))", "def tileHeight(self):\n return self._tileHeight", "def size(self) -> (float, float):\n\n return self.screen.get_surface().get_size()", "def _rect_size(self):\n bnd = self._bounds\n return (bnd[1][0] - bnd[0][0], bnd[1][1] - bnd[0][1])", "def get_tilesize(self, sampling):\n xsize = {\n 'T6': 600000,\n 'T3': 300000,\n 'T1': 100000\n }[self.get_tiletype(sampling)]\n ysize = {\n 'T6': 600000,\n 'T3': 300000,\n 'T1': 100000\n }[self.get_tiletype(sampling)]\n return xsize, ysize", "def expected_width(self):\n\t\treturn self.expected_tile_width * TILE_SIZE", "def getPixelSize(self):\n return (0.000013, 0.000013)", "def get_pixel_size(self):\n raise NotImplementedError", "def get_size(self):\n return self._surf.get_size()", "def getNumTiles(self):\n return self.w * self.h", "def get_size(self) -> Tuple2IntType:\n return self.get_width(), self.get_height()", "def get_tile_size(self, map_size = None, show_info = None):\n if not map_size: map_size = self.map_size\n w,h = self.img_size\n x_tiles,y_tiles = map_size\n\n tile_raw_w = w / x_tiles\n tile_raw_h = h / y_tiles\n\n if self.debug:\n print(f' ► Raw tile width: {tile_raw_w}\\n ► Raw tile height: {tile_raw_h}')\n\n tile_w = int(round(tile_raw_w))\n tile_h = int(round(tile_raw_h))\n\n if show_info:\n print(f' Image Size: {w} x {h} px\\n Tile Size: {tile_w} x {tile_h} px\\n Map Size: {x_tiles} x {y_tiles} tiles')\n\n error_w = tile_w - tile_raw_w\n error_h = tile_h - tile_raw_h\n print(f'\\n -=ERROR INFO=-\\n Tile Size Width Error: {round(error_w,4)} px \\n Tile Size Height Error: {round(error_h,4)} px \\n Total Width Rounding Error: {round(error_w * x_tiles,4)} px \\n Total Height Rounding Error: {round(error_h * y_tiles,4)} px\\n')\n\n return (tile_raw_w,tile_raw_h)", "def get_map_size(self, map_major_dim=None):\n w, h = self.img_size\n mmd = map_major_dim\n if w >= h:\n x_tiles = mmd\n y_tiles = round(h / w * mmd)\n else:\n x_tiles = round(w / h * mmd)\n y_tiles = mmd\n\n return (x_tiles, y_tiles)", "def getSize(self):\n return (int(self.getWidth()), int(self.getHeight()))", "def expected_height(self):\n\t\treturn self.expected_tile_height * TILE_SIZE", "def getNumTiles(self):\n return (self.width) * (self.height)", "def get_display_px(self):\n return self.image.size", "def _size_pixels(self, renderer):\n return renderer.points_to_pixels(self.size)", "def getSize(self):\n return GDimension(frameWidth, frameHeight)", "def get_combined_size(tiles):\n # TODO: Refactor calculating layout to avoid repetition.\n columns, rows = calc_columns_rows(len(tiles))\n tile_size = tiles[0].image.size\n return (tile_size[0] * columns, tile_size[1] * rows)", "def sprite_source_size(self):\n if self.trimmed:\n return {\n 'x': self.trim_offsets[0],\n 'y': self.trim_offsets[1],\n 'w': self.trim_offsets[2] - self.trim_offsets[0],\n 'h': self.trim_offsets[3] - self.trim_offsets[1],\n }\n else:\n return {\n 'x': 0,\n 'y': 0,\n 'w': self.width,\n 'h': self.height\n }", "def size(self):\n return (self.width)", "def getNumTiles(self):\n return self.height * self.width", "def world_size(self):\n return self._wsize", "def image_size(cls):\n return random.randint(250000, 80000000000)", "def storage_size( self ):\n if self.max_height+1 <= 8:\n return 1\n elif self.max_height+1 <= 16:\n \treturn 2\n else:\n return 3 # Max 24 pixels height", "def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])", "def chunk(self):\n return int((self.center_x + config.SPRITE_PIXEL_SIZE / 2) // 320)", "def twidth(self) -> int:\n return self.isize[0].to_pixels(self.parent.width)", "def get_tile_size(num_pixels, tile_size=400):\n\n # How many times can we repeat a tile of the desired size.\n num_tiles = int(round(num_pixels / tile_size))\n\n # Ensure that there is at least 1 tile.\n num_tiles = max(1, num_tiles)\n\n # The actual tile-size.\n actual_tile_size = math.ceil(num_pixels / num_tiles)\n\n return actual_tile_size", "def calculate_size(self):\n top_left_y = 0\n top_left_x = 0\n\n bottom_right_y = 1\n bottom_right_x = 1\n\n # TODO: calculate the correct bounds of the threat zone.\n\n raise NotImplementedError\n\n # if there is a sight_range for this map_obstacle then increase the size of the zone.\n if self.sight_range > 0:\n top_left_y += self.sight_range\n top_left_x += self.sight_range\n bottom_right_y += self.sight_range\n bottom_right_x += self.sight_range\n\n top_left = (top_left_y, top_left_x)\n bottom_right = (bottom_right_y, bottom_right_x)\n\n height = bottom_right_y - top_left_y\n width = bottom_right_x - top_left_x\n\n self.top_left_y = top_left_y\n self.top_left_x = top_left_x\n self.bottom_right_y = bottom_right_y\n self.bottom_right_x = bottom_right_x\n self.height = height\n self.width = width\n\n return (top_left, bottom_right, height, width)", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def getNumTiles(self):\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html\n return self.width * self.height", "def __len__(self) -> int:\n return self.width * self.height", "def height(self):\n return self.__size[1]", "def tsize(self) -> Tuple[int, int]:\n return (self.twidth, self.theight)", "def _get_image_size(self):\n return (3, 224, 224)", "def sn_size(self):\n return self._faux._sn_size", "def GetBestSize(self):\n bmp = self._bitmap\n return wx.Size(bmp.GetWidth(), bmp.GetHeight())", "def get_screen_size(self):\n return self.__screen_size", "def Size(self) -> \"unsigned long long\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_Size(self)", "def width(self) -> int:\r\n return self.rect_uv.w", "def GetTextureDimensions(self):\n ...", "def Size(self) -> \"unsigned long long\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_Size(self)", "def get_dimension_height(self):\n pass", "def _step_size(self, renderer):\n return (self.symbol_width + self.spacing + self._padding) * self._size_pixels(renderer)", "def _step_size(self, renderer):\n return (self.symbol_width + self.spacing + self._padding) * self._size_pixels(renderer)", "def YSize(self):\n return self.dataset.RasterYSize if self.dataset else None", "def __len__(self):\n return self.width * self.height", "def size(self):\n\n return self.width", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def height(self) -> int:\r\n return self.rect_uv.h", "def size(self):\n bbox = self.bbox\n return bbox[1] - bbox[0]", "def get_state_size(self) -> Tuple[int, int]:\n return self.height, self.width", "def castSize(self, scale):\n return self.camera.sensorSize * scale", "def __getSideLength(self, width, height):\n\n # Get screen size from config file.\n with open(\"config.txt\") as f:\n config = json.loads(f.read())\n\n tileWidth = config[\"screenWidth\"]\n tileHeight = config[\"screenHeight\"]\n\n # Get max tile height and width.\n tileHeight = math.floor(tileHeight / (height+2))\n tileWidth = math.floor(tileWidth / (width+2))\n\n # Get the smallest of the two so the tile can be square.\n if tileHeight > tileWidth:\n sideLength = tileWidth\n else:\n sideLength = tileHeight\n\n return sideLength", "def getGridSize(self):\n # This is set by the mosaic module, but other modules need to\n # know the values to take the proper size grid.\n return self.grid_size", "def _step_size(self, renderer):\n return (self.symbol_width + self.spacing) * self._size_pixels(renderer)", "def get_image_size(self):", "def size(self) -> Tuple[int, int]:\n return (self.width, self.height)", "def pixelsize(self):\n if hasattr(self, \"_pixelsize\"):\n return self._pixelsize\n\n try:\n return self.header[\"PixSize\"] # [arcsec]\n except KeyError:\n try:\n return abs(self.header[\"CDELT1\"]) * 3600 # [deg] -> [arcsec]\n except KeyError:\n return None", "def y_size(self):\n pass", "def pixwidth(self):\n return self._labelWidth * self.transform.scale[0]", "def size_in(self):\n return self.dimensions", "def featured_lane_size(self):\n value = self.setting(self.FEATURED_LANE_SIZE).int_value\n if value is None:\n value = 15\n return value", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def h(self):\r\n return self.size.y", "def state(self):\n decimals = 2\n size_mb = round(self._size/1e6, decimals)\n return size_mb", "def get_size_inches(self):\n width, height = self.figure.get_size_inches()\n bbox = self.get_position()\n width = width * abs(bbox.width)\n height = height * abs(bbox.height)\n return width, height", "def grid_size(self):\n return self._grid_size", "def getSize(self):\n\n return self.size", "def size(cls):\n return (cls.num_properties()*2 + 2)", "def size(self) -> Point:\n\t\treturn self._size", "def pixheight(self):\n return self._labelHeight * self.y_sign * self.transform.scale[1]", "def width(self):\n return self.__size[0]", "def size(self):\n return self.width, self.height", "def descender_size( self ):\n if self.face.descender == 0:\n return 0\n\n _desc_pixels = self.max_height * ( abs(self.face.descender) / self.face.height )\n return round( _desc_pixels ) # over 2.4 -> 2 ; 2.5 -> 2 ; 2.51 -> 3", "def default_size(self):\n if self._default_size is None:\n try:\n im = self.images['MUSE_WHITE']\n except KeyError:\n raise ValueError('Size of the image is required')\n else:\n self._default_size = (im.shape[0] *\n im.wcs.get_step(unit=u.arcsec)[0])\n return self._default_size", "def size():\n return int(os.environ['WORLD_SIZE'])", "def width(self):\n return len(self.mine_map[0])", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def size(self) -> Tuple[int, int]:\n return self._width, self._height", "def get_obj_size(self, name):\n\t\t# get handle\n\t\t# size of red blood cell\n\t\twidth = 60.35\n\t\treturn width", "def get_work_size(self):\n # TODO: Extract this value from the source.\n return (4 << 20) * 16", "def numPixels(self):\n\t\treturn self.size", "def numPixels(self):\n\t\treturn self.size" ]
[ "0.677379", "0.6732188", "0.6701107", "0.6691744", "0.6675488", "0.66745335", "0.666135", "0.6641239", "0.6588583", "0.6561213", "0.6488441", "0.64793986", "0.6465913", "0.64484566", "0.6422894", "0.6421343", "0.6418597", "0.6404673", "0.639991", "0.6364815", "0.63445616", "0.63170356", "0.63035", "0.62958", "0.62863356", "0.6284568", "0.6260523", "0.6237247", "0.62331027", "0.6224137", "0.62039375", "0.6199531", "0.6198759", "0.61821425", "0.6177568", "0.61693823", "0.6161447", "0.614596", "0.61342967", "0.61342967", "0.61342967", "0.61342967", "0.61342967", "0.61342967", "0.61342967", "0.61291003", "0.61259735", "0.6098212", "0.6092719", "0.60815215", "0.6076971", "0.6037889", "0.60157245", "0.6006317", "0.6005586", "0.60017204", "0.59931386", "0.59886646", "0.598521", "0.598521", "0.5984012", "0.5980486", "0.5975433", "0.59717315", "0.5967036", "0.5964831", "0.5953303", "0.59452975", "0.5944947", "0.59234315", "0.59224004", "0.59223145", "0.59190243", "0.59078157", "0.59074247", "0.5904806", "0.59010494", "0.5894964", "0.5893372", "0.58865887", "0.58860505", "0.5884855", "0.5880628", "0.58743596", "0.58686244", "0.5865101", "0.58647823", "0.5861996", "0.5860068", "0.5859655", "0.58596003", "0.58583003", "0.58559847", "0.5845249", "0.5845249", "0.5841956", "0.58369803", "0.5834018", "0.58322906", "0.58322906" ]
0.79170084
0
Sets the cell at the given position to the tile given via the tile index.
def set_cell(self, x, y, tile_index): data_index = x + y * self._size[0] # type: int # self._data[data_index] = tile_index # # if self._sprites[data_index]: # self._sprites[data_index].delete() # self._sprites[data_index] = None # Release resources if self._tiles[data_index]: self._tiles[data_index].delete() self._tiles[data_index] = None # Only create sprite when not zero if tile_index: tile_prototype = self._tile_set.get(tile_index, None) # type: Optional[Tile] if not tile_prototype: raise TileSetError("tile set does not contain tile for index %s" % tile_index) tile_w, tile_h = self._tile_size_2d i, j, _k = cart_to_iso(x, y, 0) ax, ay = tile_prototype.anchor tile_x, tile_y = i * tile_w - ax, j * tile_h - ay tile = deepcopy(tile_prototype) tile.sprite = pyglet.sprite.Sprite(tile.image, tile_x, tile_y) tile.aabb3d.pos = float(x), float(y), 0.0 tile.aabb2d.pos = tile_x, tile_y self._tiles[data_index] = tile # self._sprites[data_index] = pyglet.sprite.Sprite(tile.image, tile_x, tile_y) # Currently only supports a single level, so everything is on z-level 0 # self._aabb3d[data_index] = AABB3D(float(x), float(y), 0.0, tile.size[0], tile.size[1], tile.size[2]) # self._aabb2d[data_index] = AABB2D(tile_x, tile_y, tile_w, tile_h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._cells[row][col] = value", "def set_tile(self, row, col, value):\n self._cells[row][col] = value", "def set_tile(self, row, col, value):\r\n self._cells[row][col] = value", "def changeTile (self, posY, posX, tile=\"t\"):\r\n self.grid[posY][posX] = tile", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\r\n self._board[row][col] = value", "def set_tile(self, row, col, value):\r\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\r\n self._grid[row][col]=value", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._grid_tile[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value;", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid_2048[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n if col < self.grid_height and row < self.grid_width:\n self.board[row][col] = value", "def set_tile(self, row, column, tile_number):\n \n current_tile = self.get_tile(row, column)\n \n bits_to_shift = tile_offsets[row][column]\n new_mask = tile_number << bits_to_shift\n old_mask = current_tile << bits_to_shift\n self.tiles = self.tiles ^ old_mask\n self.tiles = self.tiles ^ new_mask", "def set_tile(self, row, col, value):\n # replace with your code\n pass", "def set_tile(self, row, col, value):\n if row >= 0 and row < self.get_grid_height():\n if col >= 0 and col < self.get_grid_width():\n # Only set if the row and column are ok\n self._grid[row][col] = value", "def set_cell(self, point, cell):\n self._grid[point.x][point.y] = cell", "def setTile(self, cell, tile):\n assert isinstance(cell, tuple)\n cellx, celly = cell\n\n if cellx < 0 or cellx > self.map_array.shape[0]-1 or celly < 0 or celly > self.map_array.shape[1]-1:\n return\n\n if self.tile_dict.get((cellx, celly)):\n self.canvas.delete(self.tile_dict[(cellx, celly)])\n\n if tile:\n self.map_array[cellx, celly] = tile.tid\n if tile.tid == 0.0:\n return\n map_posx, map_posy = iso(cellx * self.cell_width, celly * self.cell_height)\n image = self.main.main_tilelist.images[tile.tid]\n self.tile_dict[(cellx, celly)] = self.canvas.create_image(map_posx, map_posy, image=image, anchor=tk.N)", "def set_tile(self, row, col, value):\r\n del self.board[row][col]\r\n self.board[row].insert(col,value)\r\n return self.board", "def set_cell(self, pos, value):\n\t\tpos = Point(pos)\n\t\tif not self.valid(pos):\n\t\t\traise KeyError('Invalid cell position: {0}'.format(pos))\n\t\tself.data[pos.x + pos.y * self.dims.width] = value", "def set_tile(self, point, glyph=\".\"):\n self.matrix[point.y][point.x] = glyph", "def set_our_tile(self, x, y, value):\n\t\tif x >= 0 and x < self.w and y >= 0 and y < self.h:\n\t\t\tself.our_tiles[x][y] = value", "def set_cell(self, x, y, val):\n pass", "def set_cell(self, x, y, val):\n pass", "def set_cell(self, cell, val):\n a = b = 0\n try:\n a, b = self.__ret_cell(cell)\n self._grid[a][b] = val\n except IndexError as e:\n self.perror(\"Error: '%s'.\" % e, cell, a, b, 5)\n self.perror(\"Error.\", cell, a, b, 5)\n sys.exit()", "def set_cell(self, index, column, value):\n try:\n idx = self.index_location(index)\n except (IndexError, ValueError):\n idx = self._add_row(index)\n\n try:\n col = self.column_location(column)\n except (IndexError, ValueError):\n col = self._add_column(column)\n\n self._data[idx][col] = value", "def __setitem__(self, index, value):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # let my tile do the rest\n self.data[self.tile.offset(index)] = value\n # otherwise\n else:\n # set the item directly in my container\n self.data[index] = value\n # all done\n return", "def place_tile(self, rack_ind, row, col):\n self.board.board[col][row].letter = self.rack[rack_ind]\n self.placed_tiles[self.selected_tile] = (self.rack[self.selected_tile], (col, row))\n # set the rack tile to an empty string\n self.rack[self.selected_tile] = ''", "def set_item(self, row, col, value):\n self.board[row][col] = value", "def set_node(self, index, node):\r\n self.loc.coord[index] = node", "def setCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == False\n self.grid[xIndex][yIndex] = True\n if changed:\n self.drawSquare((xIndex, yIndex))", "def set_cell_by_index(self, column_index, cell):\n while len(self) <= column_index:\n self.append(None)\n self[column_index] = cell", "def set_cell_value(self, row, column, value):\n self.sudoku_matrix[row][column].set_cell_value(value)", "def add_tile(self, coordinate, tile):\n self._maze[coordinate] = tile", "def set(self,argument):\n if argument == \"X\" or \"O\":\n self.tile=argument", "def set_tile_marker(self, x, y, marker):\n self.__tile_grid[y][x].configure(image=self.__marker_images[marker])", "def test_set_cell(self):\n self.sudoku.set_cell((2, 2), 0)\n self.assertEqual(0, self.sudoku.get_cell((2, 2)))", "def set(self, coord, value):\n layer, row, column = tuple(coord)\n self.validPosition(layer, row, column)\n self._state['visible']['board'][layer][row][column] = value", "def put_cell(self, x, y, num):\n if self.is_empty(x,y):\n self.grid[y][x] = num\n return True\n return False", "def setData(self, index, value):\n \n self.state[index.row()][index.column()] = value\n return value", "def set_table_cell(self, table: Table, row: Index, column: Column, value: Any):\n self._requires_table(table)\n table.set_cell(row, column, value)", "def set_at(self,x,y,set=True):\n\t\tif ( not self._validate(x,y )):\n\t\t\treturn\n\n\t\t# set the bit in the grid\n\t\tif set:\n\t\t\tself.Grid[y] = self.Grid[y] | (1 << x)\n\t\telse:\n\t\t\tself.Grid[y] = self.Grid[y] & ~(1 << x)", "def set_move(self, position: Point, mark: Mark) -> None:\n\t\tif mark == Mark.X:\n\t\t\tself.tiles[position.x][position.y] = 1\n\t\telse:\n\t\t\tself.tiles[position.x][position.y] = -1", "def set_number(self, row, col, value):\r\n self._grid[row][col] = value", "def set_number(self, row, col, value):\r\n self._grid[row][col] = value", "def set_number(self, row, col, value):\r\n self._grid[row][col] = value", "def setItem(self, i, j, val):\n if i < 0:\n raise IndexError('Row index must be nonnegative.')\n if j < 0:\n raise IndexError('Column index must be nonnegative.')\n\n self.__m[i - 1][j - 1] = val", "def set_piece(self, square, piece):\n self.board[square.row][square.col] = piece", "def set_piece(self, square, piece):\n self.board[square.row][square.col] = piece", "def new_tile(self):\r\n random_row = random.randrange(0, self._grid_height)\r\n random_col = random.randrange(0, self._grid_width)\r\n random_choice = random.choice([2]*90 + [4] * 10)\r\n \r\n if 0 in [num for elem in self._cells for num in elem]: \r\n if self._cells[random_row][random_col] == 0:\r\n self._cells[random_row][random_col] = random_choice \r\n else:\r\n self.new_tile()\r\n else:\r\n pass", "def set_number(self, row, col, value):\n self._grid[row][col] = value", "def set_number(self, row, col, value):\n self._grid[row][col] = value", "def set_number(self, row, col, value):\n self._grid[row][col] = value", "def set_number(self, row, col, value):\n self._grid[row][col] = value", "def set_xy(self, x, y, val):\r\n\t\tself.grid[y, x] = val", "def set_data(self, data, *pos):\n r, c = pos\n self._grid[r][c] = data", "def position_to_tile(self, position):\r\n return position[1] + self.width * position[0]", "def place_at(self, row, col, piece):\n self.board[row + PADDING][col + PADDING] = piece", "def __setitem__(self, idx, val):\n self.rows[idx[0]][idx[1]] = val", "def setBlank(self, pos):\n self.tiles[-1] = pos", "def new_tile(self):\n rowm, colm = self.get_ava_index()\n value = 2 if random() <= 0.90 else 4\n self.set_tile(rowm, colm, value)\n print rowm,colm,value", "def __setitem__(self, index, value):\n if isinstance(index, tuple):\n list.__getitem__(self, index[0])[index[1]] = value\n elif isinstance(index, int):\n self.pop(index)\n self.insert(index, value)\n else:\n raise TypeError, \"Table indices must be int or tuple\"", "def set_player_tile(self, tile_coords):\n #self.tiles[ self.player_position[0] ][ self.player_position[1] ].has_player = False\n self.player_position = list(tile_coords)\n #self.tiles[ self.player_position[0] ][ self.player_position[1] ].has_player = True", "def set_cell(frame, data):\n\twith data.cell_:\n\t\tdata.cell_[:,0] = [L, 0., 0.]\n\t\tdata.cell_[:,1] = [0., L, 0.]\n\t\tdata.cell_[:,2] = [0., 0., L]\n\t\t#cell origin\n\t\tdata.cell_[:,3] = [0, 0 , 0]\n\t\t#set periodic boundary conditions\n\t\tdata.cell_.pbc = (True, True, True)", "def set_cells(self, val=None):\t\r\n self._cells = \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def new_tile(self):\n \n # get random corordinates for new tile\n row = random.randint(0,self._grid_width)\n col = random.randint(0,self._grid_height)\n # keeps generating random tile corordinates for non-empty tile\n while self.get_tile(row,col) != 0:\n row = random.randint(0,self._grid_width)\n col = random.randint(0,self._grid_height)\n \n # get random index of new tile value\n freq = random.randint(0,9)\n if freq == 9:\n self.set_tile(row, col, 4)\n else:\n self.set_tile(row, col, 2)", "def update_board_with_move(self, cell, mark):\n row, col = cell\n self.storage[row][col] = mark", "def setItem(self, column_number, row_number, value):\n self.data[column_number, row_number] = value\n return", "def __setitem__(self, index, value):\n self.position[index] = value", "def setPiece(self, column, rank, piece):\n try:\n self.values[int(int(rank)-1)*8+self.getColIdx(column)] = piece\n except:\n print column, rank\n rospy.loginfo(\"setPiece: invalid row/column\")", "def setPiece(self, column, rank, piece):\n try:\n self.values[int(int(rank)-1)*8+self.getColIdx(column)] = piece\n except:\n print column, rank\n rospy.loginfo(\"setPiece: invalid row/column\")", "def set_square(self, col, row, value):\n row_index = row - 1\n col_index = ord(col.lower()) - 97 # ord('a') is 97\n self.state[row_index][col_index] = value", "def set_tile_color(self, x, y, color):\n self.__tile_grid[y][x].configure(bg=color)", "def change_color(board, tile, color):\n for el in tile:\n el_x = el[0]\n el_y = el[1]\n board[el_x,el_y] = color", "def mark_pos(self, position, marker):\n i, j = self.board[position]\n self.grid[i][j] = marker", "def setitem(self, i, j, value):\n # XXX: flint matrices do not support negative indices\n # XXX: They also raise ValueError instead of IndexError\n m, n = self.shape\n if i < 0:\n i += m\n if j < 0:\n j += n\n try:\n self.rep[i, j] = value\n except ValueError:\n raise IndexError(f\"Invalid indices ({i}, {j}) for Matrix of shape {self.shape}\")", "def set_column(grid, column_index, new):\n for i,row in enumerate(grid):\n row[column_index] = new[i]", "def switch(self, tile):\n self.tiles[self.tiles.index(tile)], self.opentile, self.prev = self.opentile, tile, self.opentile\n self.nb_move += 1", "def __setitem__(self, pos, val):\n self._coords[pos] = val", "def put(self, choice, token):\n x, y = choice\n self.grid[x][y] = token", "def setCell(self, row = None, column = None, value = None, *, cell = None):\n\n\t\t\t\tif (cell is None):\n\t\t\t\t\tcell = self.getCell(row = row, column = column)\n\n\t\t\t\tif (value is None):\n\t\t\t\t\tvalue = \"\"\n\n\t\t\t\t#Write Value\n\t\t\t\tfor _cell in self.ensure_container(cell):\n\t\t\t\t\t_cell.value = f\"{value}\" #Make sure input is a valid ascii", "def put_piece(self, x: int, y: int, piece: int):\n self.board_values[x, y] = piece\n self.tiles_taken[x, y] = True", "def new_tile(self):\r\n # replace with your code\r\n empty_square_lists = []\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if(self.get_tile(row, col) == 0):\r\n empty_square_lists.append((row, col))\r\n \r\n if len(empty_square_lists) == 0:\r\n return \"game over!\"\r\n \r\n random_cell = random.choice(empty_square_lists)\r\n random_cell_row = random_cell[0]\r\n random_cell_col = random_cell[1]\r\n \r\n values = [2] * 90 + [4] * 10\r\n value = random.choice(values)\r\n \r\n self.set_tile(random_cell_row, random_cell_col, value)", "def set_at_index(self, index: int, value: object) -> None:\n self.data[index] = value", "def cleanTileAtPosition(self, pos):\n self.tiles[pos] = 'clean'", "def new_tile(self):\n col = random.choice(range(self.grid_width))\n row = random.choice(range(self.grid_height))\n if self.grid[row][col] == 0:\n if random.random() >= 0.9:\n self.grid[row][col] = 4\n else:\n self.grid[row][col] = 2\n else:\n self.new_tile()", "def setPosition(position):", "def new_tile(self):\n # Getting the list of positions of empty tiles\n indices_list = [(i, j) for i, l in enumerate(self._grid)\n for j in xrange(len(l)) if not l[j]]\n \n # Filling the the empty tile with a 2 or a 4\n if indices_list:\n self.set_tile(*choice(indices_list),\n value = 2 if random() <.9 else 4)", "def set_cell(state: State) -> State:\n assert state.index < state.array_len\n return state._replace(\n array=state.array[: state.index] + [state.acc] + state.array[state.index + 1 :]\n )", "def update_pit(self, value, pit_index, index):\n if index == 1:\n self.state[pit_index] = value\n else:\n self.state[pit_index + self.M + 1] = value", "def play_tile(self, value, location):\n if location in self.empty:\n x, y = self.locations[location]\n self.tiles[x][y] = value\n self.empty[location-1] = 0\n return 1\n return 0", "def new_tile(self):\n zero_list = []\n zero_cell = ()\n # self._cells = [[0 for col in range(self._grid_width)] for row in range(self._grid_height)]\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self._cells[row][col] == 0:\n zero_cell = (row, col)\n zero_list.append(zero_cell)\n if len(zero_list) > 0:\n chance = random.randrange(0,10)\n cell_idx = random.randrange(len(zero_list))\n if chance == 9:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 4\n else:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 2\n else:\n print(\"You lost! Better luck next time!\")", "def __setitem__(self, pos, is_on):\n row, column = pos\n self.bits[row][column] = is_on", "def _set_mine(self,index):\n game.get_cell(index).set_mine() #set current index as mine\n game.add_mine(index) #add index to mine_index\n\n # add its neighbor's neighbor_num \n temp_r=index/self._col_num\n temp_c=index%self._col_num\n shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts\n if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]\n and temp_r+dr in range(0,self._row_num)\n and temp_c+dc in range(0,self._col_num)]\n for s in shift:\n game.get_cell(s[0]*self._col_num+s[1]).add_neighbor()", "def __setitem__(self, index, item):\n if item not in self.REPRESENTATION: raise Exception('Grids can only \\'X\\', \\'O\\' and \\'.\\'')\n self.data[index[0] - 1][index[1] - 1] = item" ]
[ "0.79476476", "0.793965", "0.792472", "0.7922317", "0.7811975", "0.77657914", "0.77657914", "0.77415025", "0.77413976", "0.77362347", "0.77177304", "0.7665559", "0.76413506", "0.76413506", "0.7600459", "0.7590378", "0.7555909", "0.7555414", "0.7547121", "0.74911577", "0.7387744", "0.71248376", "0.71229863", "0.7080512", "0.7063564", "0.7050899", "0.69008976", "0.6891472", "0.6891472", "0.67770505", "0.6759904", "0.6696362", "0.6681466", "0.64937234", "0.6432774", "0.6416625", "0.63309914", "0.6319038", "0.6279482", "0.62412494", "0.62225163", "0.6189513", "0.6064435", "0.6063667", "0.6057226", "0.59904444", "0.59324956", "0.59317607", "0.5923582", "0.5923582", "0.5923582", "0.5918015", "0.59067065", "0.59067065", "0.5902148", "0.5901744", "0.5901744", "0.5901744", "0.5901744", "0.5893066", "0.5890101", "0.5888155", "0.5887238", "0.58680904", "0.5856163", "0.58552736", "0.5812574", "0.58106244", "0.5804833", "0.57956403", "0.5792169", "0.5788715", "0.5775484", "0.5773423", "0.5717405", "0.5717405", "0.5708945", "0.570271", "0.56894237", "0.568826", "0.5679154", "0.5676474", "0.56757146", "0.5673081", "0.56720877", "0.5671885", "0.566994", "0.5668287", "0.5644277", "0.5642842", "0.5633637", "0.56290424", "0.56279534", "0.562478", "0.56176823", "0.56124157", "0.5600324", "0.5594166", "0.5593665", "0.5581403" ]
0.77405584
9
Adds an object, to be managed and drawn by the tile map.
def add_object(self, obj): self._objects.append(obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addObject(self,object):\n object.screen = self.screen\n object.parent = self\n self.addList.append(object)", "def add_object(self, obj):\n\t\tself.objects.append(obj)", "def add(self, obj):\n if isinstance(obj, Drawable):\n self._drawables.add(obj)\n if isinstance(obj, Updateable):\n self._updateables.add(obj)\n if isinstance(obj, Collidable) and not isinstance(obj, Projectile):\n self._collidables.add(obj)\n if isinstance(obj, Collidable) and isinstance(obj, Projectile):\n self._projectiles.add(obj)\n if isinstance(obj, Textbox):\n self._textboxes.add(obj)\n # Always make sure the newest textbox is on top.\n obj.z = zlayer.TEXT + max(t.z for t in self._textboxes) + 1\n self.__len__.cache_clear()", "def addObject(self, name, object):\n self.map[name] = object", "def add_object(self, obj): # DEFINE OBJ!\n obj.spritesheet_width = self.spritesheet.size['width']\n obj.spritesheet_height = self.spritesheet.size['height']\n \n obj._layer_added(self)\n \n\n obj.buffer_index = len(self.objects)\n self.objects.append(obj)\n\n x = obj.x\n y = obj.y\n \n self.verts.extend(((x, y, 0.0), (x+obj.width, y, 0.0), (x+obj.width, y-obj.height, 0.0), (x, y-obj.height, 0.0)))\n self.texcoords.extend(obj.uv_texture)\n self.norms.extend(((0, 0, -1), (0, 0, -1), (0, 0, -1), (0, 0, -1)))\n\n if pi3d.PLATFORM == pi3d.PLATFORM_PI:\n self.inds.append((self.a,self.b,self.c))\n self.inds.append((self.d,self.a,self.c))\n else:\n self.inds.extend((self.a,self.b,self.c))\n self.inds.extend((self.d,self.a,self.c))\n\n self.a += 4\n self.b += 4\n self.c += 4\n self.d += 4\n\n \n #~ return len(self.sprites)-1", "def add_object(self, screen, object, amount=1, pos=None):\n\n State.restore(screen)\n State.screen.add_object(object, amount, pos)\n State.save(screen)", "def add_object(self, obj_data, obj_name, obj_orientation, qpmi, entity):\n self.objects.append((obj_data, obj_name, obj_orientation, qpmi, entity))\n if len(self.objects) == 1:\n self.set_default_brush()", "def addObject(self):\n\t\tsel = mc.ls( sl = True, typ = 'transform' )\n\t\tif sel:\n\t\t\tself.objects_lw.addItems( sel )", "def add(self, obj):\n raise NotImplementedError", "def add_object(self, obj):\n if self.it_fits(obj):\n self.content.append(obj)\n return self\n else:\n raise Exception(f\"Object {obj.name} does not fit on the box\")", "def add(self, obj):\n self.objects.append(obj)\n if obj.gravity == 0:\n obj.gravity = self.gravity\n if obj.gravity_z == 0:\n obj.gravity_z = self.gravity_z", "def add(self, game_obj):\r\n self.game_objects_for_adding.append(game_obj)", "def addObject(self, item, row, column, gameGrid=None):\n if not gameGrid:\n gameGrid = self.gameGrid\n if row > self.rows-1 or row < 0 or column > self.columns-1 or column < 0:\n print \"addObject could not add %s: \\\n Location out of bounds\" % str(item)\n return None\n gameGrid.setItem(item, row, column)", "def add_object(self, object):\n object.save()", "def add_object(self, object_to_be_added):\n new_mapping = Map.add_object(self.id, object_to_be_added)\n if new_mapping:\n object_to_be_added.save()\n new_mapping.ref_id = object_to_be_added.id\n return True\n else:\n return False", "def add_entity(self, ent):\n self.tiles[ent.position[x]][ent.position[y]].add_entity(ent)", "def add_object(self, name, obj):\n if not isinstance(obj, SceneObject):\n raise ValueError('obj must be an object of type SceneObject')\n self._objects[name] = obj\n self.close_renderer()", "def add_ui_object(self, UI_object: object):\n self.__UI_objects.append(UI_object)", "def addobj(self, obj):\n self._objslock.acquire()\n if obj.objid in self._objs:\n self._objslock.release()\n raise KeyError(\"non-unique EMANE object id %s for %s\" % (obj.objid, obj))\n self._objs[obj.objid] = obj\n self._objslock.release()", "def add(obj):", "def add(self, obj: T) -> None:\n self._items.append(obj)\n self._size += 1", "def add(self, obj):\n self.session.add(obj)", "def add(self, object):\n self.lock.acquire()\n self.__Session.add(object)\n self.__Session.commit()\n self.lock.release()", "def add(self, obj, x, y, angle, mirror):\n # we make a tuple to put in the list\n self.children.append( (obj, x, y, angle, mirror) )", "def add_physics_object(self, physics_object):\n if hasattr(physics_object, 'material'):\n color = physics_object.material.color\n else:\n color = 'blue'\n x0 = physics_object.displacement.x - physics_object.side + self.origin_x\n x1 = physics_object.displacement.x + physics_object.side + self.origin_x\n y0 = physics_object.displacement.y - physics_object.side + self.origin_y\n y1 = physics_object.displacement.y + physics_object.side + self.origin_y\n # down the line, the physics object should draw itself\n physics_object.canvas_id = self.canvas.create_rectangle(x0, y0, x1, y1, fill=color)\n physics_object.physics_canvas = self\n self.physics_objects.append(physics_object)\n\n for plugin in self.new_physics_object_plugins:\n plugin(physics_object)\n\n self.move_physics_object(physics_object)", "def _add_object(self, object_dict):\n # Attempt to map the object first. This will raise an\n # ItemExistsError if a named object of the same type already\n # exists.\n self._add_object_to_map(self.append_key, object_dict)\n\n # Add the object to the end of the model.\n # TODO: which objects need added to the beginning?\n self.model_dict[self.append_key] = object_dict\n\n # Update append key.\n self._update_append_key()", "def add(self, obj: object) -> None:\n self._contains.append(obj)", "def append(self, obj: Any) -> None:\n from ..pane import panel\n new_objects = list(self)\n new_objects.append(panel(obj))\n self.objects = new_objects", "def associateObject (self, obj):\n self.__associatedObjects.add(obj)", "def add(self, obj):\n try:\n EditMedia(self.dbstate, self.uistate, [], MediaObject())\n except WindowActiveError:\n pass", "def addObject(self, new_object_location):\n\n # store new object location\n self.objects[self.nextObjectID] = new_object_location\n\n # initialize frame_counts for when new object is undetected\n self.lost[self.nextObjectID] = 0\n\n self.nextObjectID += 1", "def add_to_world(self, thing):\n\t\tthing.set_world_info(self.current_id, self)\n\t\tself.gameObjects.append(thing)\n\t\tself.current_id += 1", "def add(self, obj):\n ID = id(obj)\n self.pDict[ID] = obj\n return ID", "def add_object(self, obj: str):\n if obj not in self._objects:\n self._objects.append(obj)\n else:\n raise IDAlreadyExists", "def __add__(self, obj):\n if isinstance(obj, vtk.vtkProp3D):\n self.AddPart(obj)\n\n self.actors.append(obj)\n\n if hasattr(obj, \"scalarbar\") and obj.scalarbar is not None:\n if self.scalarbar is None:\n self.scalarbar = obj.scalarbar\n return self\n\n def unpack_group(scalarbar):\n if isinstance(scalarbar, Group):\n return scalarbar.unpack()\n else:\n return scalarbar\n\n if isinstance(self.scalarbar, Group):\n self.scalarbar += unpack_group(obj.scalarbar)\n else:\n self.scalarbar = Group([unpack_group(self.scalarbar), unpack_group(obj.scalarbar)])\n self.pipeline = vedo.utils.OperationNode(\"add mesh\", parents=[self, obj], c=\"#f08080\")\n return self", "def add_object(self, object_type, data=None, read_from_netbox=False, source=None):\n\n # create new object\n new_object = object_type(data, read_from_netbox=read_from_netbox, inventory=self, source=source)\n\n # add to inventory\n self.base_structure[object_type.name].append(new_object)\n\n if read_from_netbox is False:\n log.info(f\"Created new {new_object.name} object: {new_object.get_display_name()}\")\n\n return new_object", "def register_object(self, obj):\n self.modules.append(obj)", "def add_object(world_id, object_to_be_added):\n try:\n new_mapping = Map(world_id,\n object_to_be_added.x,\n object_to_be_added.y,\n object_to_be_added.__class__.__name__)\n new_mapping.save()\n return new_mapping\n except (sqlalchemy.orm.exc.FlushError, sqlalchemy.exc.IntegrityError) as e:\n db.session.rollback()\n return None", "def add(self, obj):\n self.getSession().add(obj)\n self.commit() # paranoially\n return obj", "def add(self, mp):\n \n self.tile_contents.append(mp)\n if(self.tile_contents[-1].raised == False):\n self.paint_blocks += 1.00", "def Add(self, obj_type, name, node=None, obj=None):\n print \"Adding object %s, node: %s\" % (name, node)\n #check for duplicate object\n # also raise error if no such object type\n if self.ObjectExists(obj_type, name):\n raise DuplicateObjectError(name)\n \n #find out where we need to put it and stick it in there\n idx = bisect.bisect(self.objects[obj_type], name)\n if not node:\n node = game_objects.ObjectUtilities.ObjectNode(self, name, self.object_modules[obj_type])\n if obj:\n node.CopyObject(obj)\n self.objects[obj_type].insert(idx, node)\n \n #let our listeners know we added a new object and let them\n # know the parent in terms of alphabetical order\n if idx == 0:\n #if we're inserting at the start there is no preceding element\n self.sendODBEvent(ODBAdd(node, obj_type, None))\n else:\n self.sendODBEvent(ODBAdd(node, obj_type, self.objects[obj_type][idx-1].name))\n \n node.SetModified(True)\n self.MarkModified(node)", "def add_object(self, object):\n if isinstance(object, DSSDataset):\n data = {\"reference\": {\"projectKey\": object.project_key, \"type\": \"DATASET\", \"id\": object.dataset_name}}\n elif isinstance(object, DSSWikiArticle):\n data = {\"reference\": {\"projectKey\": object.project_key, \"type\": \"ARTICLE\", \"id\": object.article_id}}\n elif isinstance(object, DSSApp):\n data = {\"appId\": object.app_id}\n elif isinstance(object, DSSWorkspaceHtmlLinkObject):\n data = {\"htmlLink\": {\"name\": object.name, \"url\": object.url, \"description\": object.description}}\n elif isinstance(object, dict):\n data = object\n else:\n raise ValueError(\"Unsupported object type\")\n self.client._perform_json(\"POST\", \"/workspaces/%s/objects\" % self.workspace_key, body=data)", "def contribute_to_object(self, obj):\n pass", "def add_drawable(self, gameObject):\r\n if not self.sprite_group.has(gameObject):\r\n self.sprite_group.add(gameObject)", "def add_plant(self, desc, obj_list):\n self.plants.append((desc, obj_list))\n if len(self.plants) == 1:\n self.set_default_brush()", "def add(self, idx, obj):\n if idx in self._objects:\n warning('overriding object %s - use update() instead?' % idx)\n self._objects[idx] = obj\n self._last_insert_idx = idx", "def append(self, object):\r\n raise NotImplementedError()", "def _place_new_obj(self, (screen_width, screen_height)):\n old_tree = self.objects.get()\n new_x = (-old_tree.position[0]) + old_tree.max_width*2 + screen_width\n another_tree = Grass((new_x, screen_height), self.width, self.height)\n self.objects.put(another_tree)", "def insert_object(self, object: ObjectHandle):\n # Serialize the object descriptor and data part. Both items are stored\n # as separate objects.\n descriptor, data = self.factory.serialize(object)\n object_id = self.store.write_object(descriptor)\n data_id = self.store.write_object(data)\n # Add the object information to the index and write the modified index\n # to the data store.\n self.index[object.namespace][object.name] = StoredObject(\n object_id=object_id,\n data_id=data_id,\n name=object.name,\n descriptor=descriptor\n )\n self._write_index()\n # If the object refers to a default object that object is removed since\n # it has been overwritten by the new object.\n try:\n del self.defaults.get(object.namespace, {})[object.name]\n except KeyError:\n pass", "def add(self):\n pass", "def add(self, *args, **kwargs):\n obj = self._class(*args, **kwargs)\n self._items.append(obj)", "def place_object(self, thing):\n color = [i * 255 for i in thing.color.rgb]\n size = (20, 20)\n if thing.name == \"luna\":\n size = (5, 5)\n if self.is_visible(thing.position, max(size)):\n position = self.get_position(thing.position, size)\n pygame.draw.ellipse(self.screen, color, (position, size))", "def addidfobject(self, new_object):\n key = new_object.key.upper()\n self.idfobjects[key].append(new_object)\n self._reset_dependant_vars(\"idfobjects\")", "def add(self, octree_chunk: OctreeChunk, atlas_tile: AtlasTile) -> None:\n tile_index = atlas_tile.index\n\n self._tiles[tile_index] = TileData(octree_chunk, atlas_tile)\n self._chunks.add(octree_chunk)", "def register_game_object(self, game_object):\n game_object.game_engine = self\n self.game_objects.append(game_object)", "def addObjectMap(self,fromMod,toMod,objectMap):\n if self.objectMaps == None: self.loadObjectMaps()\n self.objectMaps[(fromMod,toMod)] = objectMap", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def new(self, obj):\n self.__session.add(obj)", "def add_context(self, objmap, ctx):\n zing_state = self._get_zing_tx_state()\n zing_state.datamaps_contexts[objmap] = ObjectMapContext(ctx)", "def new(self, obj):\n\n self.__session.add(obj)", "def new(self, obj):\n\n self.__session.add(obj)", "def append(self, obj):\r\n raise NotImplementedError", "def append( self, obj ):\n self[obj.getType()] = obj\n obj.setParent( self.parent )\n return obj", "def add(self, quadkey, asset):\n self.tiles[quadkey] = self.tiles.get(quadkey, set())\n self.tiles[quadkey].add(asset)", "def addAdditionalGraphicalObject(self, *args):\n return _libsbml.Layout_addAdditionalGraphicalObject(self, *args)", "def new_tile(self):\n # replace with your code\n pass", "def add_tile(self, coordinate, tile):\n self._maze[coordinate] = tile", "def add_object(self, row, obj_name):\n # not optimised: not expected to be a usual operation\n if self.table.shape[0] > 0:\n new_table = np.vstack((self.table, [row]))\n else:\n new_table = np.array([row])\n new_objects = self.objects + [obj_name]\n self.__init__(new_table, new_objects, self.attributes)", "def add_box(self):\n self.scenes[self.current_scene].add_object(Box())\n self.redraw()", "def _drawObject(self, object):\n if object.draw:\n Game.Screen.blit(self._getCurrentObjectFrame(object), (object.position.x, Game.ScreenHeight - (object.position.y + object.objectType.height)))", "def add_object(_object):\n print('add_object: ' + str(_object))\n try_insert_or_update(\n models.objects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n project_id=_object['project_id'], filename=_object['filename'])])", "def addAttachedObject(self, attachedObject):\n\t\tself.attachedObjects.append(attachedObject)", "def _add_object(self, name, model, *args, **kwargs):\n logger.debug('Adding object with name \"{}\" to model.'.format(name))\n obj = model(weakref.proxy(self), name, *args, **kwargs) # Add hidden hard reference\n self._objects.append(obj)\n return self.get_object(obj.name)", "def add_object(self, content, object_id = None):\n if object_id is None:\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"POST\", \"/1/indexes/%s\" % self.url_index_name, self.client.timeout, content)\n else:\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/%s\" % (self.url_index_name, quote((\"%s\" % object_id).encode('utf8'), safe='')), self.client.timeout, content)", "def add(self, component) -> None:\n pass", "def add(self, key, obj):\n with self._lock:\n slot = self._dict.get(key, None)\n if slot is None:\n slot = [obj, 0]\n else:\n slot[1] += 1\n self._dict[key] = slot", "def addPiece(self, piece):\r\n \r\n self.pieces[(piece.x, piece.y)] = piece", "def addPoint(self, *args, **kwargs):\n ...", "def add(self, obj):\n self._pkcache[obj.pk] = obj\n for ctype in obj._content_types:\n self._typecache[ctype][obj.pk] = True", "def new(self, obj):\n if obj:\n self.__session.add(obj)", "def new(self, obj):\n if obj:\n self.__session.add(obj)", "def add_node(self, obj, typ_sofi, layer):\n\n n = Node(obj)\n n.layer = layer\n\n self.nodes.add(n)", "def add_detected_object(self, detected_object: DetectedObject):\n self.detected_objects.append(detected_object)", "def attach(self, obj):\n self.Object = obj.Object", "def insert(self, index: int, obj: Any) -> None:\n from ..pane import panel\n new_objects = list(self)\n new_objects.insert(index, panel(obj))\n self.objects = new_objects", "def append(self, object):\n self.data['object'].append(object)\n self.data['id'].append(self.start_id)\n for col in self.cols:\n if col != 'object' and col != 'id':\n self.data[col].append(None)\n self.start_id += 1\n return self", "def add(self, obj: object) -> None:\n with self._cache_lock:\n self.cache.append(obj)\n if self.cache_limit is not None and len(self.cache) > self.cache_limit:\n with self.transaction():\n # Starting a transaction will flush the cache\n pass", "def new(self, obj):\n new_key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n self.__objects[new_key] = obj", "def put(self, object_to_put, pos_x, pos_y):\n self.validate_position(pos_x, pos_y)\n old_object, self.map[pos_x][pos_y] = self.map[pos_x][pos_y], object_to_put\n return old_object", "def add_to_template(cls, template, name, obj):\n if isinstance(obj, troposphere.AWSObject):\n template.add_resource(obj)", "def register_object(self, obj, isdelete = False, listonly = False, postupdate=False, **kwargs):\n #print \"REGISTER\", repr(obj), repr(getattr(obj, '_instance_key', None)), str(isdelete), str(listonly)\n \n # things can get really confusing if theres duplicate instances floating around,\n # so make sure everything is OK\n self.uow._validate_obj(obj)\n \n mapper = object_mapper(obj)\n self.mappers.add(mapper)\n task = self.get_task_by_mapper(mapper)\n\n if postupdate:\n mod = task.append_postupdate(obj)\n if mod: self._mark_modified()\n return\n \n # for a cyclical task, things need to be sorted out already,\n # so this object should have already been added to the appropriate sub-task\n # can put an assertion here to make sure....\n if task.circular:\n return\n \n mod = task.append(obj, listonly, isdelete=isdelete, **kwargs)\n if mod: self._mark_modified()", "def _add_stix_object(self, stix_object: _Observable):\n if stix_object.id in self._all_objects:\n if len(stix_object.serialize()) > len(self._all_objects[stix_object.id].serialize()):\n self._all_objects[stix_object.id] = stix_object\n else:\n self._all_objects[stix_object.id] = stix_object", "def add_node(self, node):\n self.nodes[node.id] = node\n\n self.layers = max(self.layers, node.layer + 1)", "def __init__(self):\n self.full_map = [] # map with all the items in place.\n self.list_objects = []\n self.objects_numbers = 3\n self.x_axis = None\n self.y_axis = None\n self.user = User(self)\n self.load_data_map()\n self.create_object()\n self.graphics = Graphics(self)" ]
[ "0.77441376", "0.7656286", "0.75963026", "0.7574792", "0.75481164", "0.7518914", "0.74521136", "0.737135", "0.7309146", "0.7289169", "0.71311355", "0.70795727", "0.69718575", "0.6938054", "0.68440473", "0.68283266", "0.681699", "0.68109393", "0.67909193", "0.6732807", "0.670669", "0.6674076", "0.66022396", "0.659429", "0.65858537", "0.65739167", "0.6563956", "0.65006536", "0.64487547", "0.6415947", "0.6411454", "0.6361399", "0.63482904", "0.6340743", "0.63208985", "0.631717", "0.62983024", "0.6270037", "0.62649727", "0.62522537", "0.62339956", "0.6217483", "0.62053883", "0.6200416", "0.6188772", "0.6188012", "0.6176182", "0.6170754", "0.6088183", "0.60828984", "0.60795665", "0.6076012", "0.6075223", "0.6070501", "0.6044854", "0.6040353", "0.6038982", "0.6038982", "0.6038982", "0.6038982", "0.6038982", "0.6038982", "0.6038982", "0.6038982", "0.60280997", "0.59965265", "0.59965265", "0.59856826", "0.59768456", "0.5970344", "0.59661883", "0.595318", "0.5948292", "0.59460014", "0.59438175", "0.59420097", "0.5940897", "0.59362096", "0.5932312", "0.592784", "0.59199727", "0.59189326", "0.5909979", "0.59097683", "0.59066004", "0.59057426", "0.59057426", "0.58847547", "0.58799404", "0.58752674", "0.58722496", "0.58634365", "0.5862151", "0.5859382", "0.58496535", "0.5842278", "0.58325076", "0.5829116", "0.5820794", "0.5819819" ]
0.7219038
10
Sorts the drawables in an order that they can be rendered using the painter's algorithm (back to front).
def sort(self, key_func): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self, **kwargs):\n for o in sorted(self._drawables, key=default_itemgetter(\"z\", default=0)):\n o.draw(**kwargs)", "def start_sorting(self):\n if self.sorting:\n return None\n self.sorting = True\n\n passes = 0\n while self.sorting:\n swap_done = False\n for i in range(len(self.sort_list)-passes-1):\n if not self.sorting:\n break\n if self.sort_list[i][0] > self.sort_list[i+1][0]:\n self.sort_list[i], self.sort_list[i+1] = self.sort_list[i+1], self.sort_list[i]\n self.window.coords(self.sort_list[i][1], 4*i+50, 20, 4*i+50, self.sort_list[i][0])\n self.window.coords(self.sort_list[i+1][1], 4*(i+1)+50, 20, 4*(i+1)+50, self.sort_list[i+1][0])\n self.window.itemconfig(self.sort_list[i][1], fill='red')\n self.window.itemconfig(self.sort_list[i+1][1], fill='red')\n swap_done = True\n self.window.update()\n self.window.itemconfig(self.sort_list[i][1], fill='black')\n self.window.itemconfig(self.sort_list[i+1][1], fill='black')\n self.window.update()\n passes += 1\n\n if not swap_done:\n self.sorting = False\n for line in self.sort_list:\n self.window.itemconfig(line[1], fill='green')\n else:\n self.window.itemconfig(self.sort_list[i+1][1], fill='blue')\n self.window.update()", "def reindex_graphics(self):\n for obj in self.context.static_objects:\n self.canvas.children.remove(obj.widget.canvas)\n # fill _objects_z_index\n _objects_z_index = {}\n for obj in self.context.static_objects:\n y = obj.widget.pos[1]\n if not y in _objects_z_index:\n _objects_z_index[y] = []\n _objects_z_index[y].append(obj)\n _keys = _objects_z_index.keys()\n _keys.sort()\n _keys.reverse()\n for k in _keys:\n objs = _objects_z_index[k]\n for obj in objs:\n self.canvas.add(obj.widget.canvas)", "def array_sort():\n to_concat = []\n for centroid_rgb, cluster in itertools.izip(centroids_rgb, self.clusters):\n # no need to revisit ratio\n new_idxed_arr = tf.concat(1,[tf.slice(cluster, [0,0], [-1,2]),\n tf.tile(tf.expand_dims(\n tf.constant(centroid_rgb), 0),\n multiples=[len(cluster.eval()), 1])])\n to_concat.append(new_idxed_arr)\n\n concated = tf.concat(0, to_concat)\n sorted_arr = np.array(sorted(concated.eval().tolist()), dtype=np.uint8)[:, 2:]\n\n new_img = Image.fromarray(sorted_arr.reshape([self.m, self.n, self.chann]))\n if save:\n new_img.save(outfile, format=format_)\n os.popen(\"open '{}'\".format(outfile))\n else:\n new_img.show()", "def sort_sprites(sprites, sort_type):\n sort_func = globals()['sort_' + sort_type]\n return sorted(sprites, key=sort_func, reverse=sort_type != 'name')", "def bubble_sort(game: \"Game\") -> None:\n for m in range(1, len(ARRAY)):\n for i in range(len(ARRAY) - m):\n # Pick 2 elements and compare them\n # if first element is higher, swap them, else keep them\n if ARRAY[i] > ARRAY[i + 1]:\n # These lines are being swapped\n STATES[i] = Colors.RED\n STATES[i + 1] = Colors.RED\n\n ARRAY[i], ARRAY[i + 1] = ARRAY[i + 1], ARRAY[i]\n else:\n # These lines won't be swapped\n STATES[i] = Colors.BLUE\n STATES[i + 1] = Colors.BLUE\n\n # Update game window\n game.update_screen()\n\n # Reset the line colors\n STATES[i] = Colors.WHITE\n STATES[i + 1] = Colors.WHITE\n # This line is already correctly sorted\n STATES[len(ARRAY) - m] = Colors.GREEN", "def populate_buttons(self):\n\n # Figure out which index we'll sort on\n if self.sort_group.checkedButton() == self.button_mtime:\n to_sort = self.sort_mtime_idx\n reverse = True\n else:\n to_sort = self.sort_alpha_idx\n reverse = False\n\n # Now add things. This'll automatically shuffle stuff around without\n # us having to worry about removing things first.\n for row, (_, _, button) in enumerate(\n sorted(self.buttons, reverse=reverse, key=lambda i: i[to_sort])\n ):\n self.grid.addWidget(button, row, 0)", "def sortColors(self, nums):\n prev = None\n for i in range(len(nums)):\n curr = nums[i]\n if prev is None:\n prev = curr\n else:\n x = i\n while curr < prev:\n nums[x] = prev\n nums[x - 1] = curr\n x -= 1\n if x <= 0:\n break\n curr = nums[x]\n prev = nums[x - 1] \n \n prev = nums[i]", "def paintTags(self):\n imagesTagOrder = [\"gender\", \"skin\", \"head\", \"body\", \"mask\", \"hair\", \"shirt\", \"trousers\", \"skirt\", \"shoes\"]\n pos = 0\n for img in imagesTagOrder:\n self.imagesTag[img].topleft = 296, pos * 76\n self.imagesTag[img].connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.paintCustomizeZone, img)\n self.window.add_child(self.imagesTag[img])\n pos += 1", "def sortChoices(self):\n self.formatList.sort()", "def sort(self):\n self.cards.sort()", "def sort(self):\n self.cards.sort()", "def sortColors(self, nums: List[int]) -> None:\n self.quickSort(nums, 0, len(nums)-1)", "def sort(self):\n self.deckcards.sort()", "def sort(self):\n\n img_files = os.listdir(self.path)\n\n img_list = {}\n\n for img_file in img_files:\n filename = os.path.join(self.path, img_file)\n\n try:\n img = Image.open(filename)\n except:\n continue\n\n print \"Analyzing %s\" % img_file\n\n points = self.points(img.size[0], img.size[1])\n key = \"\"\n for point in points:\n\n # Get the average color for each point\n ave_points = self.diamond_points(point[0], point[1])\n red = 0\n green = 0\n blue = 0\n for ave_point in ave_points:\n try:\n rgb = img.getpixel(ave_point)\n red += rgb[0]\n green += rgb[1]\n blue += rgb[2]\n except IndexError:\n pass\n red /= len(ave_points)\n green /= len(ave_points)\n blue /= len(ave_points)\n\n # Bitdepths:\n # 12 bit - 4096 colors, range 0-F, divide by 16\n # 9 bit - 512 colors, range 0-7, divide by 32\n # 6 bit - 64 colors, range 0-3, divide by 64\n # 3 bit - 8 colors, range 0-1, divide by 128\n\n if self.num_colors == 8:\n div = 128\n elif self.num_colors == 64:\n div = 64\n elif self.num_colors == 512:\n div = 32\n elif self.num_colors == 4096:\n div = 16\n else:\n self.usage()\n\n # Lower the bitdepth\n red = int(red / div)\n green = int(green / div)\n blue = int(blue / div)\n\n # Add to the key\n key += \"%x%x%x\" % (red, green, blue)\n\n # Add the key if needed\n if key not in img_list:\n img_list[key] = []\n\n # Add the file to the list\n img_list[key].append(img_file)\n\n # Go through and rename the files, based on the img_list dictionary\n # and the prefix\n num = 1\n for img in sorted(img_list.iterkeys()):\n for filename in sorted(img_list[img]):\n name, ext = os.path.splitext(filename)\n new_filename = \"%s%04d%s\" % (self.prefix, num, ext)\n full_filename = os.path.join(self.path, filename)\n full_new_filename = os.path.join(self.path, new_filename)\n if os.path.isfile(full_new_filename):\n print \"File %s exists - aborting!\" % full_new_filename\n return\n\n os.rename(full_filename, full_new_filename)\n print \"Renamed %s to %s.\" % (filename, new_filename)\n num += 1", "def sortColors(self, nums: 'List[int]') -> None:\n for i in range(len(nums)):\n for j in range(len(nums)-1-i):\n if nums[j] > nums[j+1]:\n temp = nums[j]\n nums[j] = nums[j+1]\n nums[j+1] = temp", "def get_sorted_pixels(self):\n # (but make sure you have lots of memory before you do that on huge images).\n # [::-1] reverses order\n return sorted(self.get_colors(), key=lambda x: x[0])[::-1]", "def selection_sort(master, canvas, user_list):\n\n for i in range(len(user_list)):\n\n low = find_min(i, user_list)\n\n canvas.delete(user_list[i].object)\n canvas.delete(user_list[low].object)\n\n user_list[i], user_list[low] = user_list[low], user_list[i]\n\n user_list[i].iteration = i\n user_list[low].iteration = low\n\n user_list[i].object = canvas.create_rectangle(\n space + wi * user_list[i].iteration,\n le - user_list[i].value,\n space + wi * (user_list[i].iteration + 1),\n le, fill=\"blue\")\n user_list[low].object = canvas.create_rectangle(\n space + wi * user_list[low].iteration,\n le - user_list[low].value,\n space + wi * (user_list[low].iteration + 1),\n le, fill=\"blue\")\n\n time.sleep(stopper)\n\n master.update()", "def sort(self,color_list):\r\n sorted_list= deque()\r\n if len(color_list)==0:\r\n print(\"Invalid input, expecting non-empty color list\")\r\n return\r\n \r\n for x in color_list:\r\n if x not in Cards.shades:\r\n print(\"Invalid Input, invalid color given as input\")\r\n return \r\n \r\n for x in color_list:\r\n sort1=deque()\r\n for y in self.deck_of_cards:\r\n if x == y[0]:\r\n sort1.append(y)\r\n sort1=sorted(sort1,key=lambda x:x[1])\r\n sorted_list.extend(sort1)\r\n self.deck_of_cards = sorted_list\r\n return self.deck_of_cards", "def get_all_drawables(self): \n drawables = []\n if len(self.component_list) > 0:\n for c in self.component_list:\n drawables.append(c.get_drawables())\n return drawables", "def reorder( self ):\n self.sorted.sort(self.compareFunction)", "def sort(self):\n # sort the contents of the container alphabetically\n # this is done automatically whenever an item is added/removed from the Container\n self.items.sort(key=lambda item: item.name)", "def sort(self):\n # Sort here actually uses the tuple comparison we defined in the Card class\n self.cards.sort()", "def sort(self):\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[0][j] < self.genepool[0][j-1]:\n self.genepool[0][j], self.genepool[0][j-1] = self.genepool[0][j-1], self.genepool[0][j]\n else:\n break\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[1][j] < self.genepool[1][j-1]:\n self.genepool[1][j], self.genepool[1][j-1] = self.genepool[1][j-1], self.genepool[1][j]\n else:\n break", "def sortColors(self, nums: List[int]):\n nums = []\n nus0 = nums.count(0)\n nus1 = nums.count(1)\n nus2 = nums.count(2)\n for i in range(nus0):\n nums.insert(i,0)\n for j in range(nus1):\n nums.insert(nus0+j,1)\n for z in range(nus2):\n nums.insert(nus0+nus1+z,2)", "def volume_sort(self):\n self.jobs_sorted = sorted(\n self.jobs,\n key=lambda job: (job['height'], job['width'] * job['height']),\n # key=lambda job: job['width'] * job['height'],\n reverse=True)", "def sortColors(self, nums: List[int]) -> None:\n\n def quick_sort(ary, gte_idx, lte_idx) -> None:\n\n # 재귀 탈출조건\n if lte_idx - gte_idx < 1:\n return\n\n # 임의의 위치를 pivot으로 지정 (여기선 가운데 값)\n init_pivot_idx = (gte_idx + lte_idx) // 2\n\n # print(\"test:\", [str(p) if gte_idx <= p_idx <= lte_idx else \"_\" for p_idx, p in enumerate(ary)])\n # pivot 값을 맨 앞의 값과 swap한다 (순차적으로 읽으면서 pivot과 비교할거니까)\n ary[gte_idx], ary[init_pivot_idx] = ary[init_pivot_idx], ary[gte_idx]\n\n # gte_idx+1부터 차례로 읽으면서 pivot보다 작은 값이 있으면\n # 읽은 범위 내의 pivot보다 큰 값 중 맨 앞의 것과 swap해준다\n bigger_idx = gte_idx + 1\n for i in range(gte_idx + 1, lte_idx + 1):\n # 맨 앞의 pivot과 비교\n if ary[i] < ary[gte_idx]:\n ary[i], ary[bigger_idx] = ary[bigger_idx], ary[i]\n bigger_idx += 1\n # print(\"test:\", [str(p) if gte_idx <= p_idx <= lte_idx else \"_\" for p_idx, p in enumerate(ary)])\n\n # 맨 앞에 있던 pivot을 pivot보다 큰 범위의 바로 앞의 값과 swap\n ary[gte_idx], ary[bigger_idx - 1] = ary[bigger_idx - 1], ary[gte_idx]\n # print(\"test:\", [str(p) if gte_idx <= p_idx <= lte_idx else \"_\" for p_idx, p in enumerate(ary)])\n\n # recursion으로 좌우 분할정복\n pivot_idx = bigger_idx - 1\n quick_sort(ary, gte_idx, pivot_idx - 1)\n quick_sort(ary, pivot_idx + 1, lte_idx)\n\n quick_sort(nums, 0, len(nums) - 1)", "def sortColors2(self, nums: List[int]) -> None:\n # Accepted\n # 87/87 cases passed (32 ms)\n # Your runtime beats 86.65 % of python3 submissions\n # Your memory usage beats 46.36 % of python3 submissions (13.1 MB)\n self.sortArray(nums)", "def sortAssemsByRing(self):\n sortKey = lambda a: a.spatialLocator.getRingPos()\n self._children = sorted(self._children, key=sortKey)", "def heap_sort(data_list, draw_data, time_value):\n\n # heapifies the list\n for i in range((len(data_list) // 2) - 1, -1, -1):\n heapify(data_list, len(data_list), i, draw_data, time_value)\n\n # draw the heapified list as blue before starting the popping from the heap\n draw_data(data_list, [\"blue\" for i in range(len(data_list))])\n time.sleep(time_value)\n\n for i in range(len(data_list) - 1, 0, -1):\n _swap(data_list, i, 0)\n\n # generate the color list to be visualized\n color_list = [\"red\" for x in range(len(data_list))]\n\n # color the two elements being swapped green\n for x in range(len(color_list)):\n if (x == i) or (x == 0):\n color_list[x] = \"green\"\n\n # visualize the swap and wait the specified amount of time\n draw_data(data_list, color_list)\n time.sleep(time_value)\n\n # heapify the remaining portion of the list\n heapify(data_list, i, 0, draw_data, time_value)\n\n # color the whole list as green after the sort\n draw_data(data_list, [\"green\" for i in range(len(data_list))])", "def sort_fabrics():\n max_shelf_qty = Decimal('240')\n shelves = Shelf.objects.all().order_by('tower', 'name')\n current_shelf_index = 0\n shelf = shelves[current_shelf_index]\n cell_style = \"\"\"\n border-bottom:1px solid #595959;\n border-right:1px solid #595959;\n padding:1em 0.5em;\n text-align:center;\n font-size:1;\n font-family:Tahoma;\n max-height:5em;\n \"\"\"\n header_cell_style = \"\"\"\n border-right:1px solid #595959;\n border-bottom:1px solid #595959;\n border-top:1px solid #595959;\n padding:1em;\n \"\"\"\n item_cell_style = \"\"\"\n padding:0.75em 0.25em;\n \"\"\"\n \n def exceeds_shelf_capacity(shelf, fabric):\n \"\"\"\n Tests whether adding this fabric to shelf will exceed the shelf's maximum \n capacity. Returns a boolean based on the result\n \"\"\"\n shelf_total = Decimal(shelf.fabrics.all().aggregate(Sum('quantity_th'))['quantity_th__sum'] or 0)\n return True if (shelf_total) + fabric.quantity > max_shelf_qty else False\n \n # Reset the shelving arrangements\n Fabric.objects.all().update(shelf=None)\n \n # Loops through the fabrics, organized by patterns so that \n # similar fabrics by patterns are close to each other\n for fabric in Fabric.objects.filter(item__acknowledgement__time_created__gte=date(2014, 1, 1)).distinct().order_by('pattern', 'color'):\n # Only find a shelf if there is fabric to store\n if fabric.quantity > Decimal('0'):\n if not exceeds_shelf_capacity(shelf, fabric):\n fabric.shelf = shelf\n \n else:\n # Loops through all the previous shelves to look for space\n for past_shelf in shelves[0: current_shelf_index]:\n if not exceeds_shelf_capacity(past_shelf, fabric): \n fabric.shelf = past_shelf\n \n try:\n if fabric.shelf is None: \n current_shelf_index += 1\n \n try:\n shelf = shelves[current_shelf_index]\n except (KeyError, IndexError):\n pass#raise ValueError(\"You've run out of space to store fabrics!\")\n \n fabric.shelf = shelf\n \n except Exception:\n current_shelf_index += 1\n \n try:\n shelf = shelves[current_shelf_index]\n except (KeyError, IndexError):\n pass#raise ValueError(\"You've run out of space to store fabrics!\")\n \n fabric.shelf = shelf\n \n fabric.save()\n\n \n \n #return self.message\n return render_to_string('fabric_email.html', {'towers': Tower.objects.all().order_by('id'),\n 'header_style': header_cell_style,\n 'cell_style': cell_style,\n 'item_cell_style': item_cell_style})", "def GetSortImages(self):\n\n return self.sort_down, self.sort_up", "def sortColors(self, nums: List[int]) -> None:\n tp=[1 for i in nums]\n start=0\n end=len(tp)-1\n for i in nums:\n if i==0:\n tp[start]=i\n start+=1\n elif i==2:\n tp[end]=i\n end-=1\n for index,i in enumerate(tp):\n nums[index]=i", "def sortColors(self, nums: List[int]) -> None:\n n = len(nums)\n if n < 2:\n return\n for i in range(n):\n flag = False\n for j in range(n - 1 - i):\n if nums[j] > nums[j+1]:\n tmp = nums[j]\n nums[j] = nums[j+1]\n nums[j+1] = tmp\n flag = True\n if not flag:\n break", "def sortColors(self, nums: List[int]) -> None:\n p0_end, p1_end = 0, 0\n for i, n in enumerate(nums):\n if n != 2:\n if p1_end < i:\n nums[i], nums[p1_end] = nums[p1_end], nums[i]\n p1_end += 1\n if n == 0:\n i = p1_end - 1\n if p0_end < p1_end:\n nums[i], nums[p0_end] = nums[p0_end], nums[i]\n p0_end += 1", "def sort_my_hands(self):\n self.hands_list.sort(reverse=True)", "def sort(self):\n\t\twith self.AutoSplitlines():\n\t\t\tself.lines = sorted(self.lines)", "def sortColors(nums: [int]) -> None:\n # 简单版快排,使用了额外的空间,不符合题意,做升级版\n # if not nums or len(nums) < 2: return nums\n # pivot_index = 0\n # pivot = nums[pivot_index]\n # left = [i for i in nums[pivot_index+1:] if i <= pivot]\n # right = [i for i in nums[pivot_index+1:] if i > pivot]\n # return sortColors(left) + [pivot] + sortColors(right)\n\n\n # 升级版,原地快排\n l, cur, r = 0, 0, len(nums) - 1\n while l <= r:\n if nums[l] == 0: # 当左指针为0,把先前的1换掉\n nums[l], nums[cur] = nums[cur], nums[l]\n cur += 1\n l += 1\n elif nums[l] == 2: # 当左指针为2时,放到最右边去,然后右指针往左走\n nums[l], nums[r] = nums[r], nums[l]\n r -= 1\n else:\n l += 1", "def sort(self, p_int, order=None):\n try:\n self.layoutAboutToBeChanged.emit()\n\n source_array = self.get_source_array()\n\n if len(source_array):\n source_array = sorted(source_array,\n key=lambda x: (self.get_item(x, p_int) is None,\n str(type(self.get_item(x, p_int))),\n self.get_item(x, p_int)))\n if order == Qt.DescendingOrder:\n source_array = source_array[::-1]\n\n self.set_source_array(source_array)\n\n self.init_cache()\n self.layoutChanged.emit()\n except Exception as e:\n logging.error(str(e))", "def __get_sorted_file_list(self):\n d = self.__view.CurrentImgDir\n list = os.listdir(d)\n if self.__view.SortType == constant.THUMB_SORT_FILENAME:\n # Sort by Name\n list.sort()\n if self.__view.SortType == 2:\n # Sort by Size\n list.sort(lambda a, b: int(os.stat(os.path.join(d,a))[stat.ST_SIZE] - os.stat(os.path.join(d,b))[stat.ST_SIZE])) \n return list", "def main():\n\n #setup for the display window\n dimensions = (1000, 700)\n window = pygame.display.set_mode(dimensions)\n pygame.display.set_caption(\"Bubble Sort\")\n\n #create buttons\n finish_button = Button(window, (0, 255, 0), 0, 700, 250, -100, \"Finish\")\n forward_button = Button(window, (255, 0, 0), 250, 700, 250, -100, \"Forwards\")\n switch_algorithm_button = Button(window, (200, 200, 200), 500, 700, 250, -100, \"Switch Algorithm\")\n\n num = [7, 2, 9, 17, 12, 1, 3, 5]\n\n #Create a list of bubbles based on a list of ints\n num_list = []\n for i in num:\n num_list.append(Value(i))\n width = dimensions[0]/len(num_list)\n iteration = 0\n algorithms = [\"bubble\", \"selection\"]\n current = algorithms[0]\n\n #Game/Display loop\n #Will set the background to white\n running = True\n while running:\n #Reset the background to white\n window.fill((255, 255, 255))\n finish_button.draw()\n forward_button.draw()\n switch_algorithm_button.draw()\n pos = pygame.mouse.get_pos()\n\n #event handling\n #handling for exiting the program\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n #Handling for changing the color of buttons\n if event.type == pygame.MOUSEMOTION:\n if finish_button.above(pos):\n finish_button.color = (0, 255, 100)\n else:\n finish_button.color = (0, 255, 0)\n if forward_button.above(pos):\n forward_button.color = (255, 0, 100)\n else:\n forward_button.color = (255, 0, 0)\n if switch_algorithm_button.above(pos):\n switch_algorithm_button.color = (255, 255, 255)\n else:\n switch_algorithm_button.color = (200, 200, 200)\n\n #handling for iterating through the sort\n if event.type == pygame.MOUSEBUTTONDOWN:\n if forward_button.above(pos):\n if current == \"bubble\":\n for j in range(0, len(num_list)-iteration-1):\n temp = num_list[j].value\n if num_list[j].value > num_list[j+1].value:\n num_list[j].value = num_list[j+1].value\n num_list[j+1].value = temp\n iteration += 1\n\n if current == \"selection\" and iteration < len(num_list):\n maximum = num_list[0].value\n index = 0\n for j in range(1, len(num_list)-iteration):\n if num_list[j].value > maximum:\n maximum = num_list[j].value\n index = j\n num_list[index].value = num_list[len(num_list)-iteration-1].value\n num_list[len(num_list)-iteration-1].value = maximum\n iteration += 1\n\n\n if finish_button.above(pos):\n if current == \"bubble\":\n for i in range(len(num_list)):\n for j in range(0, len(num_list)-iteration-1):\n temp = num_list[j].value\n if num_list[j].value > num_list[j+1].value:\n num_list[j].value = num_list[j+1].value\n num_list[j+1].value = temp\n\n if current == \"selection\":\n for i in range(len(num_list)):\n maximum = num_list[0].value\n index = 0\n for j in range(1, len(num_list)-i):\n if num_list[j].value > maximum:\n maximum = num_list[j].value\n index = j\n num_list[index].value = num_list[len(num_list)-i-1].value\n num_list[len(num_list)-i-1].value = maximum\n\n if switch_algorithm_button.above(pos):\n iteration = 0\n for i, entry in enumerate(num_list):\n entry.value = num[i]\n current = algorithms[1]\n print(current)\n temp = algorithms[0]\n del algorithms[0]\n algorithms.append(temp)\n\n\n #draw each member of the list\n for i, entry in enumerate(num_list):\n entry.draw(window, i*width, width)\n\n #update the display\n pygame.display.flip()", "def get_drawables(self):\n to_draw = []\n for k,v in self._to_draw.items():\n if isinstance(v,Iterable):\n for i in v:\n to_draw.append(i)\n else:\n to_draw.append(v)\n return to_draw", "def sortColors(self, nums: List[int]) -> None:\n # initialize several pointers\n l = 0\n r = len(nums) - 1\n cur = 0\n \n # use two pointers on the two ends\n while (cur <= r):\n # if number is 0, swap with l (to stay on the left)\n if nums[cur] == 0:\n nums[l], nums[cur] = nums[cur], nums[l]\n l += 1\n cur += 1\n # if number is 2, swap with r (to stay on the right)\n elif nums[cur] == 2:\n nums[r], nums[cur] = nums[cur], nums[r]\n r -= 1\n else: \n cur += 1", "def sortColors(self, nums: List[int]) -> None:\n #Swapping balls\n j = 0\n for i in range(len(nums)):\n if nums[i] == 0:\n nums[i],nums[j] = nums[j],nums[i]\n j+=1\n k = len(nums)-1\n for i in range(len(nums)-1,j-1,-1):\n if nums[i] == 2:\n nums[i],nums[k] = nums[k],nums[i]\n k-=1\n # dic = collections.defaultdict(int)\n # for i in nums:\n # dic[i]+=1\n # p = 0\n # i = 0\n # while i < len(nums):\n # if dic[p]>0:\n # nums[i] = p\n # i+=1\n # dic[p]-=1\n # else:\n # p+=1\n # print(dic)", "def sortColors(self, nums: List[int]) -> None:\r\n l = len(nums)\r\n index = 0\r\n for i in range(1, l):\r\n if nums[i] < nums[index]:\r\n index = i\r\n nums[0], nums[index] = nums[index], nums[0]\r\n k = j = 1\r\n while j < l:\r\n if nums[j] == 2:\r\n nums.pop(j)\r\n nums.append(2)\r\n l -= 1\r\n elif nums[j] == 0:\r\n nums[k], nums[j] = nums[j], nums[k]\r\n k += 1\r\n j += 1\r\n else:\r\n j += 1\r\n\r\n print(nums)", "def get_components_drawables(self):\n # print self.component_list\n print len(self.component_list)\n for c in self.component_list:\n return c.get_drawables()", "def sort_in_jobs(self, in_jobs):\n if len(in_jobs) is 0:\n return in_jobs\n jobs_ordered_yx = sorted(\n in_jobs,\n key=lambda job: (job['y'] + job['height'], job['x'] + job['width']),\n reverse=True)\n\n return jobs_ordered_yx", "def sort_area(sprite):\n return sprite.area", "def sortColors(self, nums):\n st = 0\n end = len(nums)-1\n for i in range(len(nums)):\n if nums[i] == 2:\n nums[i], nums[end] = nums[end], nums[i]\n end -= 1\n if nums[i] == 0:\n nums[i], nums[st] = nums[st], nums[i]\n st += 1\n if i >= end:\n break\n return nums", "def sort_cards(self):\n self.cards.sort(key=operator.attrgetter('persona', 'rank'))\n self.update_position()", "def get_drawables(self):\n w,h = self.image.get_size()\n return [DrawableSurface(self.image, \n pygame.Rect(self.pos_x, self.pos_y, w, h))]", "def resort(self):\n self.items.sort(key=lambda node: node.path_weight, reverse=True)", "def sort_colors(self, nums: List[int]) -> None:\n if not nums:\n return\n r, w, b = 0, 0, len(nums)\n while w < b:\n if nums[w] == 0:\n nums[r], nums[w] = nums[w], nums[r]\n r += 1\n w += 1\n elif nums[w] == 2:\n b -= 1\n nums[b], nums[w] = nums[w], nums[b]\n else:\n w += 1", "def sortColors(self, nums):\n \n firstPtr = currPtr = 0\n \n secondPtr = len(nums) - 1\n \n while currPtr <= secondPtr:\n if nums[currPtr] == 0:\n nums[firstPtr], nums[currPtr] = nums[currPtr], nums[firstPtr]\n firstPtr += 1\n currPtr += 1\n elif nums[currPtr] == 2:\n nums[secondPtr], nums[currPtr] = nums[currPtr], nums[secondPtr]\n secondPtr -= 1\n else:\n currPtr += 1\n\n return nums", "def sort_objects_from_viewworld(self, viewworld):\n opaque_objects = []\n transparent_objects = []\n centers = []\n for guid in self.objects:\n obj = self.objects[guid]\n if isinstance(obj, BufferObject):\n if obj.opacity * self.opacity < 1 and obj.bounding_box_center is not None:\n transparent_objects.append(obj)\n centers.append(transform_points_numpy([obj.bounding_box_center], obj.matrix)[0])\n else:\n opaque_objects.append(obj)\n if transparent_objects:\n centers = transform_points_numpy(centers, viewworld)\n transparent_objects = sorted(zip(transparent_objects, centers), key=lambda pair: pair[1][2])\n transparent_objects, _ = zip(*transparent_objects)\n return opaque_objects + list(transparent_objects)", "def sort(self):\r\n return self.sort_targets([self])", "def sortColors(self, nums: List[int]) -> None:\n cur, length = 0, len(nums)\n for i in range(length):\n if nums[i] == 0:\n nums[i], nums[cur] = nums[cur], nums[i]\n cur += 1\n for i in range(cur, length):\n if nums[i] == 1:\n nums[i], nums[cur] = nums[cur], nums[i]\n cur += 1", "def gnome_sort(items):\n i = 0\n n = len(items)\n while i < n:\n if i and items[i] < items[i-1]:\n items[i], items[i-1] = items[i-1], items[i]\n i -= 1\n else:\n i += 1\n return items", "def sort_colors(nums: List[int]) -> None:\n if nums is None or len(nums) <2:\n return nums\n low, mid, high = 0, 0, len(nums)-1\n while mid <= high:\n #loop over and swap elements depending on the value is 0 or 2\n if nums[mid]==0:\n nums[low], nums[mid] = nums[mid], nums[low]\n low += 1\n mid += 1\n elif nums[mid] == 1:\n # just move the mid pointer as we want the number 1 to be in the middle\n mid += 1\n elif nums[mid] == 2:\n # swap the mid and highh\n nums[mid], nums[high] = nums[high], nums[mid]\n high -= 1", "def sortColors(self, nums: List[int]) -> None:\n #fucking boring problem\n n = len(nums)\n i, j, k = 0, n - 1, n - 1\n while i <= j:\n if nums[i] == 1:\n nums[i], nums[j] = nums[j], nums[i]\n j -= 1\n elif nums[i] == 2:\n nums[i], nums[k] = nums[k], nums[i]\n k -= 1\n j = min(j, k)\n else:\n i += 1", "def __editSortSelectedLines(self):\n editor = self.activeWindow()\n if editor:\n editor.sortLines()", "def sortColors(self, nums: List[int]) -> None:\n # 将0不断交换到左边,将2不断交换到右边\n left,right=0,len(nums)-1\n index=0\n while index<=right:\n if nums[index]==0:\n nums[index],nums[left]=nums[left],nums[index]\n left+=1\n if nums[index]==2:\n nums[index],nums[right]=nums[right],nums[index]\n right-=1\n if nums[index]!=1: #因为index是往右走的,上面那个如果出现换完以后nums[index]=0的话,无所谓,index继续走就好了,因为0就是要在index左边的\n index-=1 # 但是如果换完是2的话,还需要继续判断这个值。所以让index原地不动\n index+=1", "def sortColors(self, nums: List[int]) -> None:\n \"\"\"执行用时:\n48 ms\n, 在所有 Python3 提交中击败了\n15.26%\n的用户\n内存消耗:\n14.6 MB\n, 在所有 Python3 提交中击败了\n93.13%\n的用\"\"\"\n # 快排\n def quick_sort(l, r):\n # 加速,以0开头的,l+=1\n while l < r and nums[l] == 0:\n l += 1\n # 以2结尾的r-=1\n while l < r and nums[r] == 2:\n r -= 1\n # 以2开头的,对换,r-=1\n while l < r and nums[l] == 2:\n nums[l], nums[r] = nums[r], nums[l]\n r -= 1\n while l < r and nums[r] == 0:\n nums[l], nums[r] = nums[r], nums[l]\n l += 1\n if l >= r:\n return\n base = nums[r]\n min_i = l\n for i in range(l, r):\n if nums[i] < base:\n nums[min_i], nums[i] = nums[i], nums[min_i]\n min_i += 1\n nums[min_i], nums[r] = nums[r], nums[min_i]\n quick_sort(l, min_i-1)\n quick_sort(min_i+1, r)\n quick_sort(0, len(nums)-1)", "def sort_eyes(self):\n x1 = self.eyes[0][0]\n x2 = self.eyes[1][0]\n\n if x1 > x2:\n self.eyes.reverse()", "def sortColors(self, nums: List[int]) -> None:\n if nums:\n low, high = 0, len(nums) - 1\n i = 0\n\n while i <= high:\n if nums[i] == 0:\n nums[i], nums[low] = nums[low], nums[i]\n i += 1\n low += 1\n elif nums[i] == 1:\n i += 1\n else: # nums[i] == 2\n nums[i], nums[high] = nums[high], nums[i]\n high -= 1", "def sort(self):\n self.chain_list.sort()\n for chain in self.chain_list:\n chain.sort()", "def _sort(self):\n self.population.sort()\n self.population.reverse()", "def order_vertices(self):\r\n \r\n ordered = False\r\n while ordered == False:\r\n for i in range(len(self.vertices)):\r\n ordered = True\r\n for parent in self.vertices[i].parents:\r\n if parent>i:\r\n ordered = False\r\n self.swap_vertices(i, parent)", "def sortColors(self, nums) -> None:\n my_list = [0, 0, 0]\n for digit in nums:\n my_list[digit] += 1\n k = 0 # k指向第一个不为0的数\n while k < 3 and my_list[k] == 0:\n k += 1\n for i in range(len(nums)):\n nums[i] = k\n my_list[k] -= 1\n while k < 3 and my_list[k] == 0:\n k += 1", "def sort(self):\n self.fragment_list.sort()", "def sortColors(self, nums: List[int]) -> None:\n ones = []\n if len(nums)==0: return\n lo, hi = 0, len(nums) - 1\n zero, one, two = 0, 0,0\n while lo <= hi:\n if nums[lo] == 0:\n zero +=1\n elif nums[lo] == 1:\n one +=1\n else:\n two +=1\n lo +=1\n\n i = 0\n while i < zero:\n nums[i] = 0\n i +=1\n i = 0\n while i < one:\n nums[i+zero] = 1\n i +=1\n i = 0\n while i < two:\n nums[i+zero+one] = 2\n i +=1", "def sortByColor(xs):\n xs = [x for x in xs if len(x.reshape(-1)) > 0]\n return list(sorted(xs, key=lambda x: x.max()))", "def sortColors(self, nums: List[int]) -> None:\n u = v = 0\n for i in range(len(nums)):\n temp = nums[i]\n nums[i] = 2\n if temp == 1:\n nums[v] = 1\n v += 1\n if temp == 0:\n nums[u] = 0\n if v != u:\n nums[v] = 1\n u += 1\n v += 1\n print(nums)", "def sortColors(self, nums: List[int]) -> None:\n left = -1\n right = len(nums)\n index = 0\n # since it is only 0,1,2, make 0 to the very left side, 2 to the very right side\n # will solve the problem\n while index < right:\n if nums[index] == 0:\n left += 1\n temp = nums[left]\n nums[left] = 0\n nums[index] = temp\n if left == index: index += 1\n elif nums[index] == 2:\n right -= 1\n temp = nums[right]\n nums[right] = 2\n nums[index] = temp\n else:\n index += 1", "def sortColors(self, nums: List[int]) -> None:\n c0, c1, c2 = 0, 0, 0\n\n n = len(nums)\n\n for i in range(n):\n if nums[i] == 0:\n c0 += 1\n elif nums[i] == 1:\n c1 += 1\n else:\n c2 += 1\n \n nums[0:c0] = [0] * c0\n nums[c0:c1+c0] = [1] * c1\n nums[c0+c1:] = [2] * c2", "def decomposition_order(self):\n if not self.is_atomistic():\n self.make_atomistic()\n objects = set(self.above(self.bottom))\n predecessors_exist = set()\n take_after = set()\n arrow_head = set()\n arrows = {}\n order = []\n is_built = objects.copy()\n for element in objects:\n for successor in self.above(element):\n if self.other_under(successor, element) in objects:\n predecessors_exist.add(successor)\n while len(predecessors_exist) > 0:\n chosen_candidate = random.sample(predecessors_exist - take_after, 1)[0]\n predecessors_exist.remove(chosen_candidate)\n is_built.add(chosen_candidate)\n order.append(chosen_candidate)\n arrows[chosen_candidate] = []\n for predecessor in self.under(chosen_candidate):\n if len(self.above(predecessor)) == 2:\n other_succ = self.other_above(predecessor, chosen_candidate)\n if other_succ not in is_built:\n arrow_head.add(chosen_candidate)\n arrows[chosen_candidate].append(predecessor)\n else:\n arrows[other_succ].remove(predecessor)\n if len(arrows[other_succ]) == 0:\n arrow_head.remove(other_succ)\n for successor in self.above(other_succ):\n if self.other_under(successor, other_succ) not in arrow_head:\n take_after.discard(successor)\n for successor in self.above(chosen_candidate):\n if self.other_under(successor, chosen_candidate) in is_built:\n predecessors_exist.add(successor)\n for other_succ_pred in self.under(successor):\n if other_succ_pred in arrow_head:\n take_after.add(successor)\n return order", "def bubbleSort(list):", "def sorted(self): \n pass", "def reset_window(self):\n self.sorting = False\n self.sort_list = []\n self.window.delete('all')\n for i in range(100):\n random_height = randint(40,280)\n line_id = self.window.create_line(4*i+50, 20, 4*i+50, random_height)\n self.sort_list.append([random_height, line_id])\n self.window.update()", "def sort(self, col, order):\n self.layoutAboutToBeChanged.emit()\n self.mylist = sorted(self.mylist,\n key=operator.itemgetter(col))\n if order == Qt.DescendingOrder:\n self.mylist.reverse()\n self.layoutChanged.emit()", "def sortColors(self, nums: List[int]) -> None:\n\n red = 0\n white = 1\n blue = 2\n\n (redp, whitep, bluep) = (0, 0, len(nums) - 1)\n\n while whitep <= bluep:\n\n if nums[whitep] == white:\n whitep += 1\n\n elif nums[whitep] == blue:\n nums[whitep], nums[bluep] = nums[bluep], nums[whitep]\n bluep -= 1\n\n else:\n nums[whitep], nums[redp] = nums[redp], nums[whitep]\n redp += 1\n whitep += 1", "def sort(self, col, order):\r\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\r\n self.mylist = sorted(self.mylist,\r\n key=operator.itemgetter(col))\r\n if order == QtCore.Qt.DescendingOrder:\r\n self.mylist.reverse()\r\n self.emit(SIGNAL(\"layoutChanged()\"))", "def test_sorting():\n circles = [Circle(i) for i in range(10, 1, -1)] \n sorted_circles = sorted(circles, key=Circle.sort_key)\n assert circles != sorted_circles", "def toposorted(self):\n order = []\n colors = {node: \"white\" for node in self._neighbors}\n\n def visit(node):\n assert colors[node] == \"white\"\n colors[node] = \"gray\"\n for neighbor in self._neighbors[node]:\n if colors[neighbor] == \"white\":\n visit(neighbor)\n elif colors[neighbor] == \"gray\":\n raise CyclicGraphError(\n \"Cycle involving {!r} and {!r} detected\".format(node, neighbor)\n )\n order.append(node)\n colors[node] = \"black\"\n\n for node in self._neighbors:\n if colors[node] == \"white\":\n visit(node)\n return order", "def sortColors(self, nums: List[int]) -> None:\n\n right = 0\n left = len(nums) - 1\n i = 0\n while i <= left:\n if nums[i] == 0:\n temp = nums[i]\n nums[i] = nums[right]\n nums[right] = temp\n right += 1\n i += 1\n elif nums[i] == 2:\n temp = nums[i]\n nums[i] = nums[left]\n nums[left] = temp\n left -= 1\n else:\n i += 1", "def sort_by_rgb(colors_tuple):\n sorted_tuple = sorted(colors_tuple, key=lambda x:x[1])\n return sorted_tuple", "def sort(self):\n # Base Case\n # If the robot has reached the end of the list and his light is off (no swaps have occurred),\n if self.can_move_right() == False and self.light_is_on() == False:\n return\n\n # Grab the first card\n self.swap_item()\n\n # While the robot is still able to move right,\n while self.can_move_right():\n\n # Move right\n self.move_right()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is greater than what he is holding (-1), swap items\n if self.compare_item() == -1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once the robot can no longer move right, he is at the end of the list and holding the largest value\n # Swap items\n self.swap_item()\n\n # Now the robot needs to traverse back to index 0, grabbing the smallest value as he goes\n # Follow the same logic as when he moved right with the largest value\n\n # If he hits a empty slot in the list, everything in front of it has been sorted\n # He doesn't need to sort anymore, he is holding the smallest value left to be sorted. \n # Put it in the blank spot and turn to move back in the other direction\n\n while self.compare_item() is not None:\n\n # Move left\n self.move_left()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is less than what he is holding (1), swap items\n if self.compare_item() == 1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once self.compare_item() is None, that means he is in front of a blank space\n # - everything to the left of the blank space has already been sorted\n # Deposit what he is holding\n self.swap_item()\n\n # Reset the light to the off position\n self.set_light_off()\n\n # Move one spot over to the right\n self.move_right()\n\n # Re-run the process all over again\n self.sort()", "def sort(self, col, order):\r\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\r\n self.mylist = sorted(self.mylist,\r\n key=operator.itemgetter(col))\r\n if order == Qt.DescendingOrder:\r\n self.mylist.reverse()\r\n self.emit(SIGNAL(\"layoutChanged()\"))", "def selectFigures(self):\n # Shuffle the dictionary\n dict_items = list(self.pathCrr.items())\n random.shuffle(dict_items)\n self.pathCrr = {}\n self.pathCrr = {key:value for (key,value) in dict_items}\n\n self.nextBarFigure()\n self.nextLineFigure()", "def sortEdges( self, listEdges ):\r\n changed = True\r\n while changed:\r\n changed = False\r\n for i in range( len(listEdges)-1 ):\r\n if listEdges[ i ].cost > listEdges[ i+1 ].cost:\r\n changed = True\r\n aux = listEdges[ i ]\r\n listEdges[ i ] = listEdges[ i+1 ]\r\n listEdges[ i+1 ] = aux", "def View_Preorder( self ):\r\n cb.order = 0\r\n self.system.Draw( )", "def sortColors(self, nums: List[int]) -> None:\n last = len(nums)-1\n middle = 0\n first = 0\n while middle <= last:\n if nums[middle] == 2:\n nums[middle], nums[last] = nums[last], nums[middle]\n last -= 1\n elif nums[middle] == 0:\n nums[first] = 0\n if middle > first:\n nums[middle] = 1\n first += 1\n middle += 1\n else:\n middle += 1", "def sort_weight(self):\n self._elements = list(\n _[-1] for _ in sorted((e.weight, e.value, e) for e in self)\n )", "def sortColors(self, nums) -> None:\n if nums is None or len(nums) == 0:\n return None\n # len_n = len(nums)\n # res = [1 for i in range(len_n)]\n # right = len_n - 1\n # left = 0\n # for n in nums:\n # if n == 2:\n # res[right] = 2\n # right -= 1\n # if n == 0:\n # res[left] = 0\n # left += 1\n # return res\n\n index = 0\n count = 0\n len_n = len(nums)\n while count < len_n:\n if nums[index] == 2:\n nums.pop(index)\n nums.append(2)\n elif nums[index] == 0:\n nums.pop(index)\n nums.insert(0, 0)\n index += 1\n else:\n index += 1\n count += 1\n return nums", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]" ]
[ "0.6645382", "0.6610428", "0.6208446", "0.6029311", "0.60101074", "0.60021496", "0.59987986", "0.5927565", "0.5859521", "0.58288765", "0.5824865", "0.5824865", "0.58011025", "0.57603604", "0.57544345", "0.5688162", "0.5662971", "0.5642846", "0.5616782", "0.55934376", "0.55926085", "0.55910313", "0.55880934", "0.5565566", "0.5539935", "0.5531791", "0.55188656", "0.5513584", "0.5510521", "0.55035174", "0.54983276", "0.5495553", "0.54932225", "0.5485079", "0.5480426", "0.546151", "0.5423955", "0.54028654", "0.5397365", "0.5387487", "0.5366523", "0.53593934", "0.53484863", "0.5346526", "0.53370607", "0.5327913", "0.5323629", "0.53210664", "0.53209454", "0.53167576", "0.5314149", "0.5311526", "0.5301376", "0.52772146", "0.5272899", "0.5260798", "0.5259317", "0.52588737", "0.52518404", "0.52375686", "0.5234304", "0.5233577", "0.52220845", "0.521646", "0.5213788", "0.52115643", "0.51942074", "0.51873064", "0.51846784", "0.51766586", "0.5170421", "0.5169004", "0.51631117", "0.5160998", "0.51549435", "0.5147071", "0.5139956", "0.5128741", "0.51231825", "0.5122973", "0.5112429", "0.5112131", "0.5107255", "0.5103023", "0.5102419", "0.50981534", "0.50977147", "0.5096224", "0.5089084", "0.50870854", "0.5086444", "0.50794876", "0.506447", "0.50541", "0.50458574", "0.50458574", "0.50458574", "0.50458574", "0.50458574", "0.50458574", "0.50458574" ]
0.0
-1
Save the response string.
def writeresponse(self, rspstr): self.response += rspstr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_response(self, res) -> None:\n file = open(\"response_{}.json\".format(self.num_res), \"w\")\n file.write(str(res))\n file.close()", "def save_response(self, request, response):\n response_dict = self.process_response(request.path, response)\n try:\n self.ser.info(pickle.dumps(response_dict))\n self.ser.info(RESPONSE_UNIQUE_STRING)\n except (TypeError, pickle.PicklingError):\n #Can't pickle wsgi.error objects\n pass", "def save_response(self):\r\n self.q(css='input.save-button').first.click()\r\n EmptyPromise(\r\n lambda: 'save' in self.alert_message.lower(),\r\n \"Status message saved\"\r\n ).fulfill()", "def writeResponse(response):", "def save_response(self):\n self.indexes['resp'] = attribute_index('resp', self)\n # Checking if the attribute \"resp\" is not empty:\n if not type(self.resp['coords']) == np.ndarray:\n print(\"Response is empty. Please run a simulation.\")\n # Checking if the target response has already been registered:\n elif self.indexes['resp'] == None:\n # Registering the synapse if necessary:\n self.indexes['syn'] = register_instance('syn', self)\n # Registering the response and setting its new index:\n self.indexes['resp'] = register_instance('resp', self)\n create_directory('resp', self)\n # Exporting the contents of the attribute \"resp\" to csv files:\n path_dir = path_directory('resp', self)\n coords_ref = create_coords_ref(self.resp['coords'])\n pd.DataFrame(self.resp['coords']).to_csv(os.path.join(path_dir, 'coords.csv'))\n for i in range(len(coords_ref)) :\n self.resp['glus'][i].to_csv(os.path.join(path_dir, 'resglu{}.csv'.format(coords_ref[i])), header=True)\n self.resp['AMPAtot'].to_csv(os.path.join(path_dir, 'resAMPAtot.csv'), header=True)\n self.resp['V'].to_csv(os.path.join(path_dir, 'resV.csv'), header=True)\n print(\"Saved: response at index {} for synapse {}.\".format(self.indexes['resp'], self.indexes['syn']))\n else:\n print(\"Response already registered at index {} for synapse {}.\".format(self.indexes['resp'], self.indexes['syn']))", "def save_response(response, file_name, path='~/tmp/fcb-analyzer'):\n \n path = ensure_path(path)\n f = open(path + '/' + file_name, 'w')\n f.write(response.text)", "async def save_response(self, key: str, response: ClientResponse):\n if not self.is_cacheable(response):\n return\n logger.info(f'Saving response for key: {key}')\n\n expires = self.get_expiration_date(response)\n cached_response = await CachedResponse.from_client_response(response, expires)\n await self.responses.write(key, cached_response)\n\n # Alias any redirect requests to the same cache key\n for r in response.history:\n await self.redirects.write(self.create_key(r.method, r.url), key)", "def save(self, filepath: Union[str, pathlib.Path]) -> None:\n if isinstance(filepath, str):\n filepath = pathlib.Path(filepath)\n with filepath.open(mode='wb') as file:\n file.write(self.response.content)", "def save_response(self, key, response):\n self.responses[key] = response, datetime.now(timezone.utc)", "def write(self, str):\r\n self._setHeaders()\r\n self._response.getWriter().write(str)", "def update_response(self, response):\r\n self.stri.update_response(response)", "def update_response(self, response):\r\n self.stri.update_response(response)", "def write_response(self):\n response = {\n \"data\": self.data,\n \"errors\": self.errors,\n }\n self.write(json.dumps(response))", "def __save_response(self, method, extras, data):\n\n import os, re\n to = \"/tmp/lex/\"\n if not os.path.exists(to):\n os.mkdir(to)\n\n removeables = re.compile('[/&?:]')\n filename = method + '-' + '_'.join(\"%s=%s\" % kv for kv in extras.iteritems())\n filename = os.path.join(to, removeables.sub('_', filename))\n with open(filename, 'w') as f:\n f.write(data)", "def update_response(self, response):\n try:\n self.set_workspace()\n with open(self.RESPONSE_FILE, 'wb') as fobj:\n fobj.write(self.encoder.serialize(response)[0])\n path, url = self.publish(self.RESPONSE_FILE)\n except Exception as error:\n self.logger.warning(\"Failed to update the WPS execute response! %s\", error)\n raise\n self.logger.debug(\"Response updated.\")\n return path, url", "def save(self):\n return self.write()", "def store_response(self, new_response):\n self.responses.append(new_response)", "def save(self, output, data):\n return", "def save(self, output, data):\n pass", "def update_response(self, response):\r\n self.stri_ext.update_response(response)\r\n self.stri_int.update_response(response)", "def do_SEND_SIMPLE_RESPONSE(self, response: str):\n self.wfile.write(response.encode('utf-8'))", "def store_response(self, resource):\n\n \"\"\"Get the content from the POST request.\"\"\"\n content_length = int(self.headers.getheader('Content-Length'))\n body = self.rfile.read(content_length)\n response = json.loads(body)\n\n \"\"\"Add the content to the configured resource queue\"\"\"\n if resource not in self.responses_qeues:\n self.responses_qeues[resource] = []\n self.responses_qeues[resource].append(response)\n else:\n self.responses_qeues[resource].append(response)\n\n \"\"\"Add the content to the dictionary of responses.\"\"\"\n #self.responses_dict.update(response)\n\n \"\"\"Send the response to the request.\"\"\"\n self.send_response(204)\n self.end_headers()", "def output(self, response: str):\n\n # Try to output through the prefered medium, but revert to\n # backup if need to and log any errors found, for example:\n # logging.error(\"Problem!\")\n\n IO.stdout(response)", "def create_response_info(self, response):\n output_path = os.path.join(self.output_folder, self.file_name)\n output_path += \".response.txt\"\n with open(output_path, 'w') as file:\n file.write(json.dumps(response))", "def saveCookie(self, resp):\n #save Cookie\n if resp.has_key('set-cookie'):\n self.updateHeaders('Cookie', resp['set-cookie'])\n print '--', 'Save cookie : ', resp['set-cookie']", "def save(self, output, data):", "def save_to_file(self, string):\n with open(self.output_path, \"w\") as text_file:\n text_file.write(string)\n print \"Saved to file \" + self.output_path", "def save_file(self, response):\r\n # Extract filename from response url\r\n filename = re.search('[^/]+(?=/$|$)', response.url).group(0)\r\n\r\n # Prepend download folder name to the filename\r\n filename = self.config[\"folder\"] + filename\r\n os.makedirs(os.path.dirname(filename), exist_ok=True)\r\n\r\n # Write contents to file\r\n with open(filename, 'wb') as f:\r\n f.write(response.content)\r\n\r\n # Print message displaying the absolute filepath for convenience\r\n print(\"Downloaded file to \" + os.path.abspath(filename))", "def store_response(resp, response_dict):\n if response_dict is not None:\n response_dict['status'] = resp.status\n response_dict['reason'] = resp.reason\n response_dict['headers'] = resp_header_dict(resp)", "def save(self):\n with open(self.file_path, 'w', encoding=Config.ENCODING) as file:\n json.dump(self.data, file, indent=2, ensure_ascii=False)", "def save_response_to_file(self, response, format=None, annotation=''):\n \n if format is None:\n logging.error(\"Specify a format\")\n return None\n\n # Build filename, choosing extension carefully\n url = response.url\n _name, _ext = os.path.splitext(url.split('/')[-1])\n name = remove_reserved_chars(_name)\n if format in ['html', 'pdf']:\n # HTML files might originally have no extension;\n # PDF files may have a non-PDF extension but PDFMiner requires them to have a .pdf extension\n ext = f'.{format}'\n if _ext != '':\n logging.warning(f\"Overwriting file extension from url ({_ext}) with expected extension ({ext}) for {url}\")\n else:\n if _ext == '':\n # Look up extension from dictionary. Note that Google Sheets are assumed to be exported as CSV files.\n ext = todf.get_ext(format)\n logging.warning(\"No extension in original url for {format} data: using expected extension {ext}\")\n else:\n ext = _ext.split('?')[0] # Remove query portion of URL, if any \n file_name = f\"{self.state_abbrev}{annotation}{name}{ext}\"\n\n # Save HTML and CSV as text, other formats as binary\n file_path = os.path.join(TMPDIR, file_name)\n if ext == '.html' or ext == '.csv':\n try:\n with open(file_path, 'w') as f:\n f.write(response.text)\n except UnicodeEncodeError:\n with open(file_path, \"w\", encoding=\"utf-8\") as f:\n f.write(response.text)\n except AttributeError as e:\n logging.error(f\"{e}. Check if the format of the content at this URL is html as expected; if not, update the code to specify the correct format (e.g., pdf).\")\n else:\n with open(file_path, 'wb') as f:\n f.write(response.body) \n\n return file_path", "def save(self) -> str:\n return self._bettor.save()", "def save(self, path):\n f = open(path, 'w')\n f.write(self.content().encode('utf-8'))\n f.close()", "async def save(self, request, response) -> None:\n value = self.cipher.encrypt(request.session.dumps().encode())\n cookie = f'{self.cookie_name}={value.decode()}; SameSite=Lax'\n response.headers['Set-Cookie'] = cookie", "def save(self, url, output):\n\n shutil.copy2(self.get(url), output)", "def save(self):\n # EXERCISE:\n # - save self.access_token, self.user_id, self.save_message to access token file AccessData.ACCESS_TOKEN_FILE\n # @see http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python\n# TODO ==> INSERT CODE HERE <==\n\n logger.debug('saved access token in file %s' % (AccessData.ACCESS_TOKEN_FILE))", "def save(self, response):\n url = response.url\n if self.item_url(url):\n table_name = self._get_table_name(url)\n if table_name:\n data = response.json()\n self.backend.save(table_name, data)", "def save_request(self, request):\n request_dict = self.process_request(request)\n self.ser.info(pickle.dumps(request_dict))\n self.ser.info(REQUEST_UNIQUE_STRING)", "def respond(self, response):\n self.response = response", "def save_result(self):\n self.print_to_console()", "def do_POST(self, request, response):\n # Store data\n self.data = to_str(request.read_data())\n\n # Respond\n if self.error:\n response.send_content(404, 'Not active', 'text/plain')\n\n else:\n response.send_content(200, 'OK', 'text/plain')", "def save(self):\n # Ensure store path exists\n store_path = self.manager.store_path\n if not os.path.exists(store_path):\n os.makedirs(store_path)\n \n # Get filepath\n filename = self._filename\n \n # Write into file\n raw = self.to_json()\n self.service.log.store('Saving %s' % filename)\n f = open(filename, 'w')\n f.write(raw)\n f.close()", "def save(self):\n path = self.user.get_session_path()\n with open(path, 'a', encoding='utf8') as file:\n self.write(file=file)", "def serialize_response(self, response):\n raise NotImplementedError()", "def save(self, data):\n self.write(data)", "def save_data(self):\n # Command to get the download data\n pass", "def save_httpd_session(self):\n # EXPLANATION:\n # The Dropbox redirect flow generates a token during the start() method,\n # which you must supply when calling the finish() method to prevent CSRF attacks\n # @see https://www.dropbox.com/developers/core/docs/python#DropboxOAuth2Flow\n # EXERCISE:\n # - save self.httpd_session to session data file self.HTTPD_SESSION_FILE\n # @see http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python\n# TODO ==> INSERT CODE HERE <==\n\n logger.debug('saved HTTPD session data \"{httpd_session}\" in file \"{session_file}\"'.format(\n httpd_session=str(self.httpd_session), session_file=self.HTTPD_SESSION_FILE))", "def return_response_string(self):\n response = \"{} {}\\r\\n\".format(self.protocol, self.code)\n str_headers = \"\"\n if self.headers:\n for k, v in self.headers.items():\n str_headers += \"{}: {}\\r\\n\".format(k, v)\n\n encoded_response = \"{}{}\\r\\n\".format(response, str_headers)\n encoded_response = encoded_response.encode(\"utf-8\")\n if self.body:\n if type(self.body) is not bytes:\n self.body = self.body.encode(\"utf-8\")\n encoded_response = encoded_response + self.body\n return encoded_response", "def save(self):\n # TODO: save the file", "def make_response(self, s):\n if sys.version_info[0] < 3:\n return s\n return s.encode('utf-8') # nocoverage - unreachable under py2", "def fetch_response(self):\n if self.stored_answer:\n return \"Thanks for your answer. Your answer has been saved. \"\\\n \"I will get back to you when the destined asker, rates your response. \"\\\n \"Keep your fingers crossed. Hopefully the asker will give you good ratings, \"\\\n \"and your karma points will boost up.\"\\\n \"Meanwhile, you can ask another question, or post answer for requested question.\"\n else:\n self.stored_answer = True\n return \"Sorry, you did not enter the Answer in the required format. \"\\\n \"Eg - \\\"[Answer][qid:<placeholder for question_number>] <Placeholder for Answer>\\\". Try again\"", "def set_cached_response(self) -> None:\n if self.get_caching_duration() > 0: # if caching is enabled for this request\n json_response = self._request_result.json()\n with open(self.cache_file_name, 'w') as json_file:\n json.dump(json_response, json_file, indent=4)", "def save_response_as_mp3(self, mp3_name: str = \"output.mp3\"):\n # The response's audio_content is binary.\n with open(mp3_name, 'wb') as out:\n # Write the response to the output file.\n out.write(self.response.audio_content)\n print('Audio content written to file \"output.mp3\"')", "def _success(self, result_ser, request):\n result = json.dumps(result_ser)\n request.write(result)\n request.finish()", "def _save_SERP(\n self, response: Union[SplashJsonResponse, ScrapyHttpResponse, ScrapyTextResponse]\n ) -> None:\n\n scraped_page = ScrapedPage(\n timestamp=self.timestamp,\n source=self.source,\n merchant=self.merchant,\n country=self.country,\n url=response.url,\n html=response.body.decode(\"utf-8\"),\n page_type=PageType.SERP.value,\n category=response.meta.get(\"category\"),\n gender=response.meta.get(\"gender\"),\n consumer_lifestage=response.meta.get(\"consumer_lifestage\"),\n meta_information=response.meta.get(\"meta_data\"),\n )\n\n self.message_queue.add_scraping(table_name=self.table_name, scraped_page=scraped_page)", "def handle_response(self, response):\n self.__log(f'Received response from server. The code is: \"{response}\"')\n if not response.status_code == 200:\n self.handle_api_error(response)\n self.to_output_file(response.text)", "def save(self):\n f = open(self.file.name, 'w')\n json.dump(self.data,\n f,\n indent=4)\n f.close()\n return True", "def get_final_response(self,request,response):\n return response", "def save_session():\n\n filename = request.json.get(\"path\")\n finished = request.json.get(\"finished\")\n config = request.json.get(\"config\")\n\n success = engine.io.save(filename, state.proc, state.corpus, state.test_corpus, state.classifier, state.last_result, finished, config)\n\n if success:\n return jsonify({\"saved\":True})\n else:\n return 'Could not save session file.', 428", "def get_response(self):\n result = self.get_response_impl()\n if self.log_dest is not None:\n is_error, response = result\n if is_error:\n response = \"? \" + response\n else:\n response = \"= \" + response\n self._log(\"<< \", response.rstrip())\n return result", "def _save(self):\n with open(self.file_path, 'w') as fid:\n json.dump(self.data, fid, indent=4, sort_keys=True)", "def reponse(self, data):\n response = self.response\n response.headers['Content-Type'] = 'application/json'\n json.dump(data, response.out)\n return response", "def record(self, response):\n self.get_recorder().record(self.request, response)", "def save(self):\n return None", "def fetch_and_save(cls, url, path):\n content = cls.fetch_with_retry(url)\n if not content:\n return False\n # print(\"Saving {}\".format(os.path.basename(path)))\n with open(path, \"wb\") as file:\n file.write(content)\n return content", "def save():", "def respond(self, resp):\n self.push(resp + '\\r\\n')\n self.logline('==> %s' % resp)", "def send_response():\n\n log_info(e)\n\n response = urlopen(Request(e['ResponseURL'], json_dumps(e), {'content-type': ''}, method='PUT'))\n\n if response.status != 200:\n raise Exception(response)", "def save_match_result(self):\n self.save_yn = \"Y\"\n scorer = self.leScore.text()\n assistant = self.leAssist.text()\n\n if scorer is None or scorer == \"\":\n scorer = \"-\"\n\n if assistant is None or assistant == \"\":\n assistant = \"-\"\n\n self.scorer_name = scorer\n self.assistant_name = assistant\n self.close()", "def store_string(self, string: str) -> None:", "def save(self):\n if self._cache is not None:\n with open(self.cache_path, 'w') as cache_file:\n json.dump(self._cache, cache_file)", "def setResponse(self, response):\n self.response = response\n self.field.setText(response.file_name)", "def save(self, file: io.BufferedWriter):\n if self.downloaded:\n json.dump(self.data, file, indent=2)", "def save():\n pass", "def save_result(self):\n self.logger.info(f'Saving results to {self.db_loc}s24_{self.year}.json')\n open(f'{self.db_loc}s24_{self.year}.json', 'w').write(json.dumps(self.db, indent=4, ensure_ascii=False))", "def save_(self):\n if not self._edited:\n return\n data = {'history': self.dump()}\n with open(os.path.join(os.path.dirname(self.arch_handler.dicomdir_path), self.SAVE_NAME), \"w\") as outfile:\n json.dump(data, outfile)\n self._edited = False", "def save_data(self):\n with open(self.storage_path, 'w') as cache_file:\n json.dump(self.data, cache_file)", "def _save_file(json_response, path, filename):\n if path is not None:\n if path[-1] != \"/\":\n path = path+\"/\"\n filepath = os.path.join(path, filename)\n if not os.path.exists(path):\n os.makedirs(path)\n\n with open(filepath+'.json', 'w') as output_file:\n output_file.write(json_response.text)", "def save(self):\n if PYTHON3:\n fileobj = open(self.filename, 'w', encoding=self.ENCODING, errors=\"replace\")\n else:\n fileobj = open(self.filename, 'w')\n self.save_to_fileobj(fileobj)\n fileobj.close()", "def send_final_response(self, response):\n if response.get_meaning() == ResponseMeaning.Informational:\n raise TurmdrehkranException(\"Final Response can not be informational.\")\n self.client_sock.sendall(str(response))", "def save_end(self):\n data = self.savedata\n self.savedata = None\n return data", "def write(self):\n \n self.session.cookies['with_max_age'] = 'expires in {} minutes'.format(self.settings.sessionlifetime)\n sys.stdout.flush()\n sys.stdout.write(self.session.cookies.output(self.session.cookies))\n sys.stdout.write('\\n')\n sys.stdout.write(self.settings.contenttype)\n sys.stdout.write('\\n')\n sys.stdout.buffer.write(self.content.encode('utf-8'))\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def save(self):\n # TODO (Pierre): code", "def __save_txt(self, output_path: str, content: List[str]) -> None:\n file = open(output_path, \"w\")\n file.write(str(json.dumps(content)) + \"\\n\")\n file.close()", "def write(self, s):\n pass", "def __send_response(self, response):\n if isinstance(response, str):\n response = response.encode()\n logger.debug(' >>> %s', binascii.b2a_qp(chr(response[0]).encode()))\n self.push(struct.pack('!I', len(response)))\n self.push(response)", "def save(self, path: str):\n with open(path, 'w', encoding='utf-8') as f:\n f.write(self.to_json())", "def post(self):\n return write_msg(request.json)", "def save(self, path: str):\n\n\t\tinfo_dict = {\n\t\t\t\"n_gram_size\": self.n_gram_size,\n\t\t\t\"caseless\": self.caseless,\n\t\t\t\"ignore_punctuation\": self.ignore_punctuation,\n\t\t\t\"add_pos_tags\": self.add_pos_tags,\n\t\t\t\"uses_lemma\": self.uses_lemma,\n\t\t\t\"uses_sentences\": self.uses_sentences\n\t\t}\n\n\t\twith open(path, \"wt\", encoding=\"utf8\") as f:\n\t\t\tjson.dump(info_dict, f)", "async def save(self, res: dict):\n self.___cache_data(res['games'])", "def save_answer(self, data, system):\r\n # Once we close the problem, we should not allow students\r\n # to save answers\r\n error_message = \"\"\r\n closed, msg = self.check_if_closed()\r\n if closed:\r\n return msg\r\n\r\n if self.child_state != self.INITIAL:\r\n return self.out_of_sync_error(data)\r\n\r\n message = \"Successfully saved your submission.\"\r\n\r\n # add new history element with answer and empty score and hint.\r\n success, error_message, data = self.append_file_link_to_student_answer(data)\r\n if not success:\r\n message = error_message\r\n else:\r\n data['student_answer'] = OpenEndedModule.sanitize_html(data['student_answer'])\r\n success, error_message = self.send_to_grader(data['student_answer'], system)\r\n if not success:\r\n message = error_message\r\n # Store the answer instead\r\n self.store_answer(data, system)\r\n else:\r\n self.new_history_entry(data['student_answer'])\r\n self.change_state(self.ASSESSING)\r\n\r\n return {\r\n 'success': success,\r\n 'error': message,\r\n 'student_response': data['student_answer'].replace(\"\\n\", \"<br/>\")\r\n }", "def response(self):\n try:\n (code, message) = self.route_request()\n except HTTPError as e:\n logger.exception(e.message)\n logger.error(e.message)\n code = e.code\n message = e.message\n except UserError as e:\n msg = str(e)\n logger.exception(msg)\n logger.error(msg)\n code = 500\n message = {'error': msg}\n except Exception as e:\n logger.exception(str(e))\n logger.error(\"Internal error\")\n # This is an unknown error. Just inform there is an internal error.\n code = 500\n message = {'error': \"Internal error.\"}\n\n try:\n # Try to send the response\n self.send_response(int(code))\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n self.wfile.write(json.dumps(message, cls=JSONEncoder)\n .encode('utf-8'))\n except Exception as e:\n logger.exception(str(e))\n logger.error(\"Could not send response\")", "def save_data(self):\n pass", "def write_response_to_lib_folder(self, label: Optional[str], response: Response) -> None:\n cleaned_label = label.replace(\"/\", \"|\") if label else \"response\"\n file_name = cleaned_label + \" \" + str(datetime.now())\n file_ending = \".json\"\n if not os.path.exists(RECORD_PATH):\n os.mkdir(RECORD_PATH)\n proposed_file_name = os.path.join(RECORD_PATH, file_name + file_ending)\n # Cover files with the same name case\n while os.path.exists(proposed_file_name):\n length_of_file_type = len(file_ending)\n proposed_file_name = proposed_file_name[:-length_of_file_type] + \" (1)\" + file_ending\n with open(proposed_file_name, 'w') as f:\n f.write(response.text)\n if 'X-Trace-Id' in response.headers:\n log.info(cleaned_label + ' | X-Trace-Id: ' + response.headers['X-Trace-Id'])", "def save_answer(self, data, system):\r\n # Check to see if this problem is closed\r\n closed, msg = self.check_if_closed()\r\n if closed:\r\n return msg\r\n\r\n if self.child_state != self.INITIAL:\r\n return self.out_of_sync_error(data)\r\n\r\n error_message = \"\"\r\n # add new history element with answer and empty score and hint.\r\n success, error_message, data = self.append_file_link_to_student_answer(data)\r\n if success:\r\n data['student_answer'] = SelfAssessmentModule.sanitize_html(data['student_answer'])\r\n self.new_history_entry(data['student_answer'])\r\n self.change_state(self.ASSESSING)\r\n\r\n return {\r\n 'success': success,\r\n 'rubric_html': self.get_rubric_html(system),\r\n 'error': error_message,\r\n 'student_response': data['student_answer'].replace(\"\\n\",\"<br/>\")\r\n }", "def handle_request(self, given_request: Request):\n with open(request.output, mode=\"w\", encoding='utf-8') as file:\n file.write(request.result)\n return True", "def save(self, data, outpath):\n with open(path, \"wt\") as open_file:\n json.dump(data, open_file, indent=4)", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n FileRep.save(self,outPath)", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n FileRep.save(self,outPath)", "def set_response(self, response_str):\r\n input_css = \"textarea.short-form-response\"\r\n self.q(css=input_css).fill(response_str)" ]
[ "0.7586363", "0.7441666", "0.67181605", "0.6693726", "0.66190016", "0.6497438", "0.64742", "0.6389973", "0.6360549", "0.62210476", "0.6212932", "0.6212932", "0.6147369", "0.6139059", "0.6138615", "0.6062147", "0.60561544", "0.6024466", "0.602372", "0.59702677", "0.5941074", "0.5938543", "0.59190494", "0.589119", "0.58899385", "0.5876065", "0.58750594", "0.5824529", "0.58165944", "0.57126415", "0.57111275", "0.57027775", "0.5699528", "0.56954235", "0.5693419", "0.5691978", "0.56911784", "0.5684392", "0.5655147", "0.5650484", "0.5632005", "0.5616834", "0.56119865", "0.5596938", "0.55843866", "0.5583053", "0.5579822", "0.55780154", "0.5567608", "0.55478585", "0.5539735", "0.55196285", "0.5484795", "0.54841155", "0.54799914", "0.5474951", "0.54627764", "0.54382306", "0.5430894", "0.5422468", "0.541924", "0.54066336", "0.5400365", "0.53934205", "0.53918487", "0.5384463", "0.5379251", "0.5377288", "0.53772545", "0.5372471", "0.53695357", "0.5367158", "0.5366864", "0.53623706", "0.5361674", "0.5332709", "0.5332374", "0.5325753", "0.53233147", "0.5321723", "0.5315423", "0.5312913", "0.53084326", "0.530406", "0.53038496", "0.5286331", "0.52858573", "0.5268503", "0.5266153", "0.52653396", "0.5250559", "0.5242761", "0.5238926", "0.5236493", "0.5232411", "0.52318364", "0.52288634", "0.52177197", "0.52177197", "0.52151144" ]
0.6328722
9
Using the Command_Handler from command module to handle command.
def usingHandler(self, cmd): self.command_handler.handle_command(cmd) while msg_queue.empty() is False: self.writeresponse(msg_queue.get())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _command(self, *cmd, handler=None):", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def handle_command(self, command):\n\n\t\tif command:\n\t\t\tcmd = shlex.split(command)\n\t\t\tobj = {\"Type\": \"command\", \"Message\": {\"command\": cmd[0], \"arguments\": cmd[1:]}}\n\t\t\tobj = self.communicator.send_message(obj)\n\t\t\tself.console.handle_message(obj)", "def _handler(self, bot, update, *args, **kwargs):\n raise NotImplementedError('Not implemented command handler method.')", "def handler(self, command, args=[]):\n ###\n # command parsing and handling logic to be implemented by child\n ###\n if not command and not hasattr(self, 'handle_'):\n return f'Service {str(self.__class__.__name__)}: {self.__doc__ or \"\"}'\n methodname = 'handle_{}'.format(command or '')\n logger.info('method name: {}'.format(methodname))\n logger.info('args: {}'.format(args))\n method = self.__getattribute__(methodname)\n return method(args)", "def handle_command(self, command, channel):\n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(\"HI\")\n\n # Finds and executes the given command, filling in response\n handler = self.dispatch_config.get_handler_by_command(command.split(None, 1)[0])\n if handler is None:\n print(\"unrecognized command detected: \" + command.split(None, 1)[0])\n # Sends the response back to the channel\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=default_response\n )\n else:\n print(\"using: \" + handler[\"fullpath\"] + \" to handle the request\")\n if handler[\"class\"] in self.handlers:\n self.handlers[handler[\"class\"]].handle_command(command, channel)\n else:\n cls = locate(handler[\"fullpath\"])\n print(cls)\n self.handlers[handler[\"class\"]] = cls(self.slack_client, self.config)\n self.handlers[handler[\"class\"]].handle_command(command, channel)", "def command():\n pass", "def on_command(server, user, command, args):", "def do_command(self, args):\n pass", "def _process_command(self, **kwargs):\n return self.run_command(**kwargs)", "def handle_command(command, event, bot):\n print('slack::cmd::{}'.format(command))\n\n cmd_list = command.split(' ')\n cmd = cmd_list[0].lower()\n args = cmd_list[1:] if len(cmd_list) else 0\n\n if cmd == 'help':\n response, success = handle_command_help()\n\n elif cmd == 'accounts':\n response, success = handle_command_accounts(args, event, bot)\n\n elif cmd == 'assets':\n response, success = handle_command_assets(args, event, bot)\n\n elif cmd == 'publish':\n response, success = handle_command_publish(args, event, bot)\n\n elif cmd == 'self':\n response, success = handle_command_self(args, event, bot)\n\n elif 'reaction_' in cmd:\n response, success = handle_command_reaction(args, event, bot)\n else:\n response, success = handle_command_help()\n\n print('slack::cmd::{}::success::{}'.format(command, success))\n return success, response", "def handle_command(self, command, players, user, channel):\r\n response = self.help()\r\n \r\n if len(command) == 0:\r\n return response\r\n \r\n elif command[0] == self.NEW_GAME_COMMAND:\r\n return self.new_game(players, channel)\r\n \r\n elif command[0] == self.TARGET_COMMAND:\r\n return self.target(user)\r\n \r\n elif command[0] == self.SURVIVORS_COMMAND:\r\n return self.survivors()\r\n \r\n elif command[0] == self.EXPIRE_COMMAND:\r\n return self.expire(channel)\r\n \r\n elif command[0] == self.REMOVE_COMMAND:\r\n return self.remove(command, channel)\r\n \r\n elif command[0] == self.KILL_COMMAND:\r\n (success, response) = self.kill(user, command)\r\n if success and self.game.get_active_channel() != \"\" and channel != self.game.get_active_channel():\r\n post_to_channel(self.game.get_active_channel(), response)\r\n return \"\"\r\n \r\n elif command[0] == self.LOAD_LAST_GAME_COMMAND:\r\n return self.load_last_game(channel)\r\n \r\n return response", "def processCommand(self, command, args):\n\n commandMap = { \n \"new\" : self.createNewList,\n \"view\" : self.trelloView,\n \"add\" : self.trelloAddCard, \n \"remove\" : self.trelloDeleteCard, \n }\n\n if command not in commandMap: return \">> Command not found\" \n \n return commandMap[command](args)", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "async def _run_command(self, command, *args, **kwargs):\n pass", "def get_command(self,command):\n\t\treturn self.command_handlers[command]", "def __init__(self, command_handler_name):\n\n # Set the command handler attributes\n self.name = command_handler_name", "def handle_command_line():\n commands = scan_for_commands()\n parser = argparse.ArgumentParser(\n description=\"A set of utilities to ease the installation of Modoboa.\",\n epilog=\"\"\"Available commands:\n%s\n\"\"\" % \"\\n\".join([\"\\t%s\" % c for c in sorted(commands)]))\n parser.add_argument(\"--verbose\", action=\"store_true\",\n help=\"Activate verbose output\")\n parser.add_argument(\"command\", type=str,\n help=\"A valid command name\")\n (args, remaining) = parser.parse_known_args()\n\n if args.command not in commands:\n print(\"Unknown command '%s'\" % args.command, file=sys.stderr)\n sys.exit(1)\n\n commands[args.command](commands, verbose=args.verbose).run(remaining)", "def handle(self, *args, **options):\n if not self.server:\n print 'Error : %s' % self.init_error\n return\n\n handler_choice = {\n 'proxy': self.proxy_handle,\n 'server': self.server_handle,\n }\n\n sub_command = options['sub_command']\n handler_choice.get(sub_command)(options)", "def handle_command(channel, command):\n print(\"Channel = \", channel)\n print(\"Command = \", command)\n \n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(EXAMPLE_COMMAND)\n\n # Finds and executes the given command, filling in response\n response = None\n # This is where you start to implement more commands!\n if command == \"help\":\n response = \"Sure...write some more code then I can do that!\"\n #help command lists all possible commands\n # if command == \"help\":\n # \tresponse = \"\"\n #report command \n elif command == \"report\":\n response = \"Here I will report on stuff...\"\n else:\n response = \"Try typing help to see valid commands\"\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "async def handle_command(server, process, command):\n process.stdout.write(f'Hello {server.username}\\r\\n')\n if server.listeners:\n forwarding(server, process)\n return\n\n if command is None:\n if config.ENABLE_SHELL:\n await shell(server, process)\n\n else:\n process.stderr.write('This server does not support'\n ' interactive sessions.\\r\\n')\n logging.warning('Interactive shell disabled')\n process.exit(1)\n\n elif command not in supported_commands:\n process.stderr.write('Unsupported command\\n')\n process.exit(1)\n\n else:\n eval(f'{command}(server, process)')\n process.exit(0)", "def handle_command(self, command, channel, user):\r\n response = \"Hello. Type \\\"@hexbot help\\\" for more information\"\r\n command = command.split()\r\n \r\n if len(command) == 0:\r\n return response\r\n \r\n if command[0] == self.HELP_COMMAND:\r\n response = self.help()\r\n elif command[0] == self.DEBUG_COMMAND:\r\n response = self.debug(command, channel);\r\n elif command[0] == self.ASSASSIN_COMMAND:\r\n command.pop(0)\r\n response = self.assassin(command, channel, user);\r\n \r\n return response", "def _addCommand(self, command):\n self.updater.dispatcher.add_handler(command)", "def process_command(self, command):\r\n\r\n tokenized_command = command.split() # Splitting the command and the arguments into separate list elements\r\n\r\n # In order to save a lot of code writing, we are making the command appear the same as the ones from single\r\n # iteration modes. This way, the same method that handles the commands in single iteration mode is now able\r\n # to process commands from the looped run mode as well.\r\n sys_argv_emulation = tokenized_command.copy()\r\n sys_argv_emulation.insert(0, \"filler argument\")\r\n\r\n if tokenized_command[0] == \"add_song\":\r\n add_media(tokenized_command[1], 0)\r\n\r\n elif tokenized_command[0] == \"delete_song\":\r\n remove_media(tokenized_command[1])\r\n\r\n elif tokenized_command[0] == \"list_media\":\r\n self.display_media_cli()\r\n\r\n elif tokenized_command[0] == \"media_folder\":\r\n self.configure_media_folder(sys_argv_emulation)\r\n\r\n elif tokenized_command[0] == \"modify_data\":\r\n self.configure_media(tokenized_command[1])\r\n\r\n elif tokenized_command[0] == \"create_save_list\":\r\n self.generate_savelist_cli(sys_argv_emulation)\r\n\r\n elif tokenized_command[0] == \"search\":\r\n self.search_cli(sys_argv_emulation)\r\n\r\n elif tokenized_command[0] == \"play\":\r\n play_media(tokenized_command[1], 1)\r\n\r\n elif tokenized_command[0] == \"load_gui\":\r\n self.run_mode = 0\r\n load_gui()\r\n\r\n elif tokenized_command[0] == \"help\":\r\n self.display_help_cli()\r\n\r\n elif tokenized_command[0] == \"quit\":\r\n sys.exit()\r\n\r\n else:\r\n print(\"\\nUnrecognized command \\\"\" + tokenized_command[0] + \"\\\".\\n\"\r\n \"Use command \\\"Help\\\" for a list of available commands.\")", "def command(self):\n raise NotImplementedError", "def handle_command(self, command, channel, user, msg_type):\n # Default response is help text for the user\n default_response = \"Does not compute. Try `<@{}> help` for command information.\".format(\n self.id)\n\n response = None\n attachment = None\n\n output(f\"Command: '{command}' - User: {user} - Channel: {channel}\")\n\n if self.db_conn:\n # TODO: create a document generator\n doc = {\n 'date': datetime.datetime.utcnow(),\n 'command': command,\n 'user': user,\n 'channel': channel\n }\n\n result = self.db_conn.insert_document(\n doc,\n db=self.db_conn.CONFIG['db'],\n collection=self.db_conn.CONFIG['collections']['cmds']\n )\n\n # TODO: Fix logging output for DB stuff\n output(\n f\"[{self.db_conn.db}: {self.db_conn.collection}] - Inserted: {result.inserted_id}\")\n\n if msg_type == \"message\":\n response, attachment = self.execute_command(\n command, cmds.COMMANDS.items(), user)\n else:\n response, channel = self.execute_command(\n command, cmds.COMMANDS_HIDDEN.items(), user)\n\n # TODO: Make a better name for out\n out = Response(channel, response or default_response, attachment)\n\n # Log response\n if self.db_conn:\n response_type = \"attachment\" if out.attachment else \"response\"\n update = {'$set': {\n 'response': {\n 'date': datetime.datetime.now(),\n 'type': response_type,\n 'message': out.attachment or out.message or default_response,\n 'channel': out.channel\n }\n }}\n\n result = self.db_conn.update_document_by_oid(\n result.inserted_id,\n update,\n db=self.db_conn.CONFIG['db'],\n collection=self.db_conn.CONFIG['collections']['cmds']\n )\n\n output(\n f\"[{self.db_conn.db}: {self.db_conn.collection}] - Updated: {result.raw_result}\")\n\n return out", "def handle_command(command, channel):\n #Default respons is help text for the user\n default_response = \"This don't exist m8. Try *{}*.\".format(\"!price trx\")\n #Finds and executes the given command, filling in response\n response = None\n \n if command.lower() in name_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + name_id_map[command.lower()] + '/')\n coin = req.json()\n text =format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command.lower() in symbol_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + symbol_id_map[command.lower()] + '/')\n coin = req.json()\n text = format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!top':\n text = top_coins()\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!exit':\n text = \":wasssap3::wasssap3:ABANDON SHIP!!!:wasssap3::wasssap3:\\n :rotating_light:EXIT ALL MARKETS:rotating_light:\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!ping':\n text = \"Still scavaging the moon.\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n else:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=default_response,\n )", "def process_cmd(self, cmd):\n\n resp = self.COMMANDS[cmd.cmd](cmd)\n\n logger.debug(\"Resp: %s\" % resp)\n # send to resp_queue\n # if type == G.CTRL_TYPE:\n #\n # response = json.dumps((corr_id, routing_key, resp))\n # logger.debug(\"Sending response: %s\" % response)\n # self.out_queue.put(response)\n\n response = cmd.make_response(resp)\n logger.debug(\"Sending response: %s\" % response)\n self.out_queue.put(str(response))", "async def _get_command_handler(self, command_type):\n if isinstance(command_type, str):\n module_name = 'command'\n module = import_module(module_name)\n handler = getattr(module, command_type)\n return command_type, handler", "def handle_user_command(self, command):\n\n out, err = pyautogit.commands.handle_custom_command(command)\n self.show_command_result(out, err, command_name=command)\n self.refresh_status()", "def get_command_handler(self, command):\n try:\n command_handler = getattr(self, \"command_{}\".format(command))\n except AttributeError:\n raise AttributeError(\"Unknown command: '{}'\".format(command))\n\n return command_handler", "def handle_command(command, channel):\n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(EXAMPLE_COMMAND)\n\n # Finds and executes the given command, filling in response\n response = None\n # This is where you start to implement more commands!\n if command.startswith(EXAMPLE_COMMAND):\n #response = \"Sure...write some more code then I can do that!\"\n command1, command2, food, floor = command.split()\n \n find_value(food+floor, call_response)\n response = food + \" is\" + find_value + \" there\"\n \n #response = food + \" is available on floor \" + floor\n \n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def handle_command(command, channel):\r\n # Default response is help text for the user\r\n default_response = \"Not sure what you mean\"\r\n default_food_response = \"I didn't quite catch that, but I see that you mentioned something about food. If you want me to order some food, try: @Starter Bot Order <<food>>\"\r\n\r\n # Finds and executes the given command, filling in response\r\n # This is where you start to implement more commands!\r\n response = None\r\n\r\n verb_list=['order','place','make']\r\n food_list = [line.rstrip('\\n') for line in open('food.txt')]\r\n\r\n print(\"Made the lists\")\r\n\r\n predictor = Predictor.from_path(\"srl-model-2018.05.25.tar.gz\")\r\n result=predictor.predict(command)\r\n print(result)\r\n\r\n for dictionary in result['verbs']:\r\n verb = dictionary['verb']\r\n if verb in verb_list:\r\n if verb=='order':\r\n try:\r\n response = dictionary['description']\r\n response=response.split('ARG1: ')[1].replace(']','')\r\n except:\r\n print(\"We did an oopsie here\")\r\n\r\n print(\"Went through the dictionaries\")\r\n\r\n if response == None:\r\n for word in command:\r\n if word in food_list:\r\n response=default_food_response\r\n break\r\n\r\n # Sends the response back to the channel\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=channel,\r\n text=response or default_response\r\n )", "def handle_command(command, channel):\n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(HELP_COMMAND)\n # Finds and executes the given command, filling in response\n response = None\n # This is where you start to implement more commands!\n if command.startswith(HELP_COMMAND):\n response = \"\"\"choc - displays chocolate emoji\n \\nping - ping kitkat bot\n \\nbots - displays all the slack bots\n \\nexit - exits\"\"\"\n elif command.startswith(CHOC_COMMAND):\n response = \":chocolate_bar:\"\n elif command.startswith(BOTS_COMMAND):\n response = ''\n request = slack_client.api_call(\"users.list\")\n if request['ok']:\n for item in request['members']:\n if item['is_bot']:\n response = response + item['name'] + \"\\n\"\n elif command.startswith(EXIT_COMMAND):\n # slack_client.api_call(\"channels.leave\")\n\n response = \"Bye Bye\"\n global exit_flag\n exit_flag = True\n elif command.startswith(PING_COMMAND):\n response = \"Uptime {}\".format(time.time() - start_time)\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def execute_command(self):\n raise Exception(\"Not implemented\")", "def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)", "def handle_command(command, channel):\n\n # Default response is help text for the user\n default_response = \"Hmm, I don't understand.\"\n\n ABOUT_COMMAND = 'about'\n HELP_COMMAND = 'help'\n\n # Finds and executes the given command, filling in response\n response = None\n\n # This is where you start to implement more commands!\n if command.startswith(ABOUT_COMMAND):\n response = about_course(command)\n elif command.startswith(HELP_COMMAND):\n response = help_text(command)\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def handle_command(command, channel):\n default_response = \"Not sure what you mean. Try *{}*.\".format(EXAMPLE_COMMAND)\n\n response = None\n\n if command.startswith(EXAMPLE_COMMAND):\n response = \"Sure...write some code then I can do that!\"\n elif command.startswith(\"date\"):\n response = currentDate()\n elif command.startswith(\"time\"):\n response = currentTime()\n elif command.startswith(\"your my best friend\") or command.startswith(\"you are my best friend\"):\n response = \"Thanks so much, buddy!!! \\n Your the best!!\"\n elif command.startswith(\"hello\") or command.startswith(\"hi\") or command.startswith(\"hey\"):\n response = \"Hello, My name is BackTalker\"\n elif command.startswith(\"thanks\") or command.startswith(\"thank you\"):\n response = \"Your Welcome\"\n elif command.startswith(\"math\"):\n problem = command[4:]\n response = \"The answer for {} is {}\".format(problem, str(eval(problem)))\n elif command.startswith(\"say something\"):\n response = compliments() \n elif command.startswith(\"weather\"):\n response = currentWeather()\n elif command.startswith(\"cmpt371\"):\n word = command[8:]\n response = cmpt371(word)\n\n\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def ConsoleRun(self, command, sender):\n pass", "def _handle(self):\r\n while True:\r\n cmd = self.inq.get()\r\n args = cmd.strip('\\r\\n').split(' ')\r\n if cmd.find('quit') == 0:\r\n self.outq.put('bye-bye\\r\\n')\r\n break\r\n elif args[0] in self.cmds:\r\n try:\r\n answer = self.cmds[args[0]](args)\r\n self.outq.put(\"%s done: %s\\r\\n\" % (args[0], answer))\r\n except Exception as msg:\r\n self.outq.put(\"%s error: %s\\r\\n\" % (args[0], msg))\r\n else:\r\n self.outq.put(\"error: unknown command %s\\r\\n\" % (args[0]))", "def _pass_to_handler(self, origin, token, args):\r\n try:\r\n self._handlers[token].handle(origin, args)\r\n except KeyError:\r\n raise ParseError(\"Unknown command\", None)", "def handle_command(command, channel, kma_crawler):\n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(COMMAND)\n\n # Finds and executes the given command, filling in response\n response = None\n # This is where you start to implement more commands!\n if command.startswith('help'):\n response = \"'weather 지역 (날짜 YYYYmmddHHMM)'형식으로 입력해주세요\"\n\n elif command.startswith('weather'):\n command_split = command.split(' ')\n\n if len(command_split) == 1:\n respones = \"'weather 지역 (날짜 YYYYmmddHHMM)'형식으로 입력해주세요\"\n elif len(command_split) == 2:\n response = kma_crawler.run(str(command_split[1]))\n elif len(command_split) > 2:\n response = kma_crawler.run(str(command_split[1]), command_split[2])\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def _handle_command(self, command: Command) -> None:\n if isinstance(command.result, LoadLabwareResult):\n # If the labware load refers to an offset, that offset must actually exist.\n if command.result.offsetId is not None:\n assert command.result.offsetId in self._state.labware_offsets_by_id\n\n definition_uri = uri_from_details(\n namespace=command.result.definition.namespace,\n load_name=command.result.definition.parameters.loadName,\n version=command.result.definition.version,\n )\n\n self._state.definitions_by_uri[definition_uri] = command.result.definition\n\n self._state.labware_by_id[\n command.result.labwareId\n ] = LoadedLabware.construct(\n id=command.result.labwareId,\n location=command.params.location,\n loadName=command.result.definition.parameters.loadName,\n definitionUri=definition_uri,\n offsetId=command.result.offsetId,\n displayName=command.params.displayName,\n )\n\n elif isinstance(command.result, MoveLabwareResult):\n labware_id = command.params.labwareId\n new_location = command.params.newLocation\n new_offset_id = command.result.offsetId\n\n self._state.labware_by_id[labware_id].offsetId = new_offset_id\n self._state.labware_by_id[labware_id].location = new_location", "def execute_command(self, command):\n raise NotImplementedError", "def handle_command(command, channel):\n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(QUESTION_COMMAND)\n\n # Finds and executes the given command, filling in response\n response = None\n # This is where you start to implement more commands!\n if command.endswith(\"?\") or command.endswith(\"!\") or command.endswith(\".\"):\n command = command[:-1]\n # print(f\"command is {command}\")\n parsed_command = command.lower().split(\" \")\n print(f\"parsed_command: {parsed_command}\")\n # Extract the question number\n\n question_number = parsed_command[-1]\n\n print(f\"The question number is {question_number}\")\n if \"quiz\" or \"ask\" in parsed_command:\n # Call function to return question from a database\n q_or_a = \"q\"\n if \"answer\" in parsed_command:\n # print(\"answer\")\n q_or_a = \"a\"\n\n response = return_response(question_number, q_or_a)\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def handle_commands(args_in):\n args = [arg.lower() for arg in args_in]\n\n # available commands\n # TODO consider consolidating with help()\n commands = [\"update\", \"list\", \"show\", \"rename\", \"delete\", \"stats\", \"plot\"]\n\n if len(args) == 0:\n help()\n\n command = args[0]\n if command in [\"help\", \"usage\"]:\n if len(args) > 2:\n error(\"Too many args provided for 'help'\")\n elif len(args) == 2:\n help(args[1])\n else:\n help()\n if command not in commands:\n error(\"Invalid command: '{}'\".format(args[0]))\n\n # Map to command's handler function\n # Remaining args are passed regardless, dealt with in handler\n handlers = {\n \"update\" : update_tracker,\n \"list\" : display_list,\n \"show\" : show_tracker,\n \"rename\" : rename_tracker,\n \"delete\" : delete_tracker,\n \"stats\" : display_stats,\n \"plot\" : display_plot\n }\n handlers[command](args[1:])", "def process_command(self, cmd, config):\n return None, None", "def mainCommand(self, args):\r\n command = args.pop(0).lower() # calls exception if no arguments present\r\n if command in vars(CommandManager):\r\n vars(CommandManager)[command](self, *args) # calls exception if wrong amount of arguments\r", "def process_command(self, command, discord_id):\n\n try:\n character = self.known_characters[discord_id]\n except KeyError:\n print(\"Process_command got message from unregistered player, this should not happen\")\n return\n\n character.clear_log()\n self.current_character = character # this is for directing log messages to the appropriate log\n # it is reset at the start of every turn obviously\n\n splitted = command.split(\" \", maxsplit=1) # just take off the first verb for use as command\n if len(splitted) == 1:\n cmd = splitted[0]\n words = \"\"\n else:\n cmd, words = splitted\n if cmd not in self.command_dict.keys():\n character.log(\"Unrecognised command: {}\", cmd)\n return character.print_log() # return early because couldn't do anything\n else:\n executable_command = self.command_dict[cmd]\n # the name of the command as it appears in the object's __dict__\n\n if executable_command == \"on_status\":\n # special command with no target object, just prints player stats and return early\n character.report_status()\n return character.print_log()\n\n resolution_order = [character.equipped, character.items, character.visible_things] # reset everytim\n if executable_command == \"on_take\":\n resolution_order.reverse() # player wants to take visible things, not equipped things.\n\n args = []\n target = None\n\n for ls in resolution_order:\n # the order of these lists is important: items equipped or held by the player\n # must take precedence, otherwise if a player tries to unequip a worn item in a\n # room that contains an item with the same name, the command dispatcher might pick up\n # the room's version of the item first and fail to unequip it. These cases should be rare.\n for k in ls:\n # first check for exact words\n if k.__doc__ in words:\n if target is None:\n target = k # target first, then args, to cope with \"use x on y\"\n else:\n args.append(k)\n\n if len(args) == 0 and len(words) > 0:\n for ls in resolution_order:\n # then check for partially-typed words if nothing was found\n for k in ls:\n if words in k.__doc__:\n if target is None:\n target = k\n else:\n args.append(k)\n\n if executable_command == \"on_go\":\n for direction in [\"north\", \"south\", \"east\", \"west\"]:\n # all directions are permitted because if it's not valid it will be caught by\n # the room's on_go function\n if direction in words:\n args.append(direction)\n target = character.location\n\n if target is None:\n\n if len(words) > 0:\n character.log(\"Unrecognised target: {}.\", words)\n return character.print_log()\n\n if executable_command == \"on_attack\":\n # player might have mistyped a name or just attack with no monster, consistently pick the\n # first monster for them to attack, if present. If not, pass it on to self.location\n # which will of course fail\n if character.check_if_monsters():\n target = character.monsters_in_play[0]\n\n else:\n # either the player typed (\"look\"), which is just to look at the room,\n # or they typed any other no-argument command which is handled by\n # the MyItem class e.g. status, quit\n target = character.location\n\n try:\n to_run = target.__getattribute__(executable_command)\n # look up the command in target's dictionary\n\n except AttributeError:\n character.log(\"Can't {} this.\", cmd)\n return character.print_log()\n\n # THE IMPORTANT PART #\n to_run(*args) # evaluate the command we looked up, passing the arguments the player typed\n\n if not (executable_command in [\"on_go\", \"on_look\", \"on_attack\"]):\n # monsters only attack if the player is still, otherwise they'd attack every time the\n # player ran and running would be pointless\n # not really fair to have the look command trigger attacks either, but anything else\n # is fair game e.g. interacting with objects\n for mon in character.monsters_in_play:\n mon.attack_player()\n\n if not executable_command == \"on_look\":\n # only process heartbeats if the player command actually did something\n for item in self.registered_countdowns:\n item.heartbeat()\n\n return character.print_log()", "def __addCommandHandler(self, command, type = 'channel', requiresdb = False):\n try:\n # ensure we are dealing with booleans\n if not requiresdb:\n requiresdb = False\n else:\n requiresdb = True\n\n # add the handler\n # check for existing command type\n if self.__commandHandlerTypeExists(type):\n cmdExec = self.__getFullCommandName(command, type)\n\n # if database required but no database available raise exception\n if requiresdb and not self.__databaseAvailable:\n raise ConfigurationException(CONFIG_DATABASE_NOT_AVAILABLE % cmdExec)\n\n # add handler only if the correct method exists\n if self.__commandExists(command, type):\n cmdHandler = {'func': getattr(self, cmdExec),\n 'db': requiresdb}\n self.__commandHandlers[type][command] = cmdHandler\n else:\n raise ConfigurationException(CONFIG_COMMAND_EXEC_NOT_FOUND % cmdExec)\n else:\n raise ConfigurationException(CONFIG_COMMAND_TYPE_NOT_FOUND % type)\n\n except ConfigurationException, (e):\n print 'Configuration failed: ',\n print 'Could not add the command handler for %s: ' % command\n print e.parameter", "def dispatch_command(self, args):\n\t\targuments = {k: v for k, v in vars(args).items() if v is not None}\n\t\tfor c in self.COMMANDS.keys():\n\t\t\tcmd = arguments.get(c, False)\n\t\t\tidx = c\n\t\t\tif cmd:\n\t\t\t\tbreak\n\t\telse:\n\t\t\treturn None\n\n\t\tif cmd not in self.COMMANDS[idx]:\n\t\t\traise CommandNotFoundError(\"{cmd} not registered\".format(cmd=cmd))\n\n\t\treturn getattr(self, self.COMMANDS[idx][cmd])(arguments)", "def _invoke_cmd(self, cmd):\n if cmd in self.COMMANDS:\n self.COMMANDS[cmd]()\n else:\n print(ERROR_UNKNOWN_COMMAND.format(cmd=cmd))", "def handle_command(command, metadata):\n # Default response is help text for the user\n default_response = \"Not sure what you mean, <@{}>. Try *{}*.\".format(metadata[\"user\"], HELP_COMMAND)\n\n # Finds and executes the given command, filling in response\n response = None\n\n # Check command for FAQ keywords\n if is_question(command.lower()):\n debug_print(\"Found a question\")\n for keyword in PARSED_FAQ.keys():\n if keyword in command.lower():\n response = PARSED_FAQ[keyword]\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=metadata[\"channel\"],\n text=response or default_response,\n thread_ts=metadata[\"ts\"]\n )", "def cmd(self):", "def dispatch(self, message):\n data = ujson.loads(message)\n command = data.get(\"command\", \"no command field!\")\n if command in self._command_hash_views:\n self._command_hash_views[command](self, data)\n else:\n # handler.send(\"404 Error\")\n logger.warning(\"[Local] System don't understand command[%s]\" % command)", "def do_command(self, command, c, e):\n # get command type\n cmdtype = self.__resolveCommandType(command, e)\n\n # ensure the cmd is valid\n if self.__commandExists(command, cmdtype):\n try:\n # only if command is registered\n if self.__commandHandlers[cmdtype].has_key(command):\n # check for recovered db\n if EVENT_MCX_DATABASE_RECOVERED.isSet():\n self.__databaseAvailable = True\n\n # if database required but not available\n if self.__commandHandlers[cmdtype][command]['db'] == True and not self.__databaseAvailable:\n # tell the user\n self.__privMsg(c, e, DATABASE_SERVER_NOT_AVAILABLE)\n # otherwise execute command\n else:\n self.__commandHandlers[cmdtype][command]['func'](c, e)\n # command not registered, tell the user\n else:\n self.__privMsg(c, e, (COMMAND_NOT_FOUND % command))\n # database was set, but is not available anymore\n except NoDatabaseException, (error):\n self.__databaseAvailable = False\n self.__privMsg(c, e, DATABASE_CONNECTION_INTERRUPTED)\n # fire event\n if not EVENT_MCX_DATABASE_LOST.isSet():\n EVENT_MCX_DATABASE_LOST.set()\n # command does not exist\n else:\n self.__privMsg(c, e, (COMMAND_NOT_FOUND % command))", "def command(self, cmd):\n self.lmp.command(cmd)", "async def on_command_error(self, ctx: Context, e: errors.CommandError) -> None:\n command = ctx.command\n\n if hasattr(e, \"handled\"):\n log.trace(f\"Command {command} had its error already handled locally; ignoring.\")\n return\n\n debug_message = (\n f\"Command {command} invoked by {ctx.message.author} with error \"\n f\"{e.__class__.__name__}: {e}\"\n )\n\n if isinstance(e, errors.CommandNotFound) and not getattr(ctx, \"invoked_from_error_handler\", False):\n if await self.try_silence(ctx):\n return\n if await self.try_run_fixed_codeblock(ctx):\n return\n await self.try_get_tag(ctx) # Try to look for a tag with the command's name\n elif isinstance(e, errors.UserInputError):\n log.debug(debug_message)\n await self.handle_user_input_error(ctx, e)\n elif isinstance(e, errors.CheckFailure):\n log.debug(debug_message)\n await self.handle_check_failure(ctx, e)\n elif isinstance(e, errors.CommandOnCooldown | errors.MaxConcurrencyReached):\n log.debug(debug_message)\n await ctx.send(e)\n elif isinstance(e, errors.CommandInvokeError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n elif isinstance(e.original, LockedResourceError):\n await ctx.send(f\"{e.original} Please wait for it to finish and try again later.\")\n elif isinstance(e.original, InvalidInfractedUserError):\n await ctx.send(f\"Cannot infract that user. {e.original.reason}\")\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.ConversionError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.DisabledCommand):\n log.debug(debug_message)\n else:\n # ExtensionError\n await self.handle_unexpected_error(ctx, e)", "def accept_command():\n # TODO", "def main():\n args = arg_parser().parse_args()\n logger = _configure_logger(args.verbose, args.debug)\n\n if args.command == \"check\":\n handler = HandleCheck(arg_input=args.INPUT, logger=logger)\n\n if args.command == \"config\":\n handler = HandleConfig(arg_name=args.name, arg_raw=args.raw, logger=logger)\n\n if args.command == \"create\":\n handler = HandleCreate(\n arg_input=args.input,\n arg_labels=args.labels,\n arg_name=args.name,\n arg_size=args.size,\n arg_pixel_size=args.pixel_size,\n arg_testsplit=args.testsplit,\n arg_validsplit=args.validsplit,\n arg_minspots=args.minspots,\n logger=logger,\n )\n\n if args.command == \"download\":\n handler = HandleDownload(\n arg_input=args.input, arg_list=args.list, arg_all=args.all, logger=logger\n )\n\n if args.command == \"predict\":\n handler = HandlePredict(\n arg_model=args.model,\n arg_input=args.input,\n arg_output=args.output,\n arg_radius=args.radius,\n arg_shape=args.shape,\n arg_probability=args.probability,\n arg_pixel_size=args.pixel_size,\n logger=logger,\n )\n\n if args.command == \"train\":\n handler = HandleTrain(arg_config=args.config, arg_gpu=args.gpu, logger=logger)\n\n if args.command == \"visualize\":\n handler = HandleVisualize(\n arg_dataset=args.dataset,\n arg_subset=args.subset,\n arg_index=args.index,\n arg_image=args.image,\n arg_prediction=args.prediction,\n logger=logger,\n )\n\n try:\n handler()\n except UnboundLocalError:\n logger.warning(f\"args.command defined as {args.command}. no handler defined\")", "def handle_command(log, writer, data):\n\n response = 'BAD: Invalid Command'\n commandList = data.split()\n\n try:\n if commandList[0] == 'expose':\n if len(commandList) == 3:\n if commandList[1] == 'light' or commandList[1] == 'dark' or commandList[1] == 'flat':\n expType = commandList[1]\n expTime = commandList[2]\n try:\n float(expTime)\n if float(expTime) > 0: \n expTime = float(expTime)\n fileName = exposure(expType, expTime)\n response = 'OK\\n'+'FILENAME = '+fileName\n else:\n response = 'BAD: Invalid Exposure Time'\n except ValueError:\n response = 'BAD: Invalid Exposure Time'\n elif len(commandList) == 2:\n if commandList[1] == 'bias':\n expType = commandList[1]\n try: \n fileName = exposure(expType, 0.0)\n response = 'OK\\n'+'FILENAME: '+fileName\n except ValueError:\n response = 'BAD: Invalid Exposure Time'\n elif commandList[0] == 'set':\n if len(commandList) >= 1:\n response = setParams(commandList[1:])\n except IndexError:\n response = 'BAD: Invalid Command'\n \n # tell the client the result of their command & log it\n #log.info('RESPONSE = '+response)\n #writer.write((response+'\\n---------------------------------------------------\\n').encode('utf-8'))\n writer.write((response+'\\nDONE\\n').encode('utf-8'))", "def execute_command(command):\r\n if 0 == len(command):\r\n return\r\n\r\n if command[0] in verbs[\"move\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"go where?\")\r\n else:\r\n execute_go(command[1])\r\n\r\n elif command[0] in verbs[\"take\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Take what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_take(item_id)\r\n\r\n elif command[0] in verbs[\"drop\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Drop what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_drop(item_id)\r\n\r\n elif command[0] in verbs[\"use\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"use what?\")\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n execute_use(item_id)\r\n\r\n elif command[0] in verbs[\"look\"]:\r\n if len(command) == 1:\r\n print_room(current_room)\r\n elif command[1] in nouns[\"inventory\"]:\r\n print_inventory_items(inventory)\r\n elif command[1] in nouns[\"self\"]:\r\n print_condition()\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if item_id in inventory.keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif item_id in current_room[\"items\"].keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif entity_id in current_room[\"entities\"].keys():\r\n wrap_print(entities[entity_id][\"description\"])\r\n else:\r\n wrap_print(\"You can not view that.\")\r\n\r\n elif command[0] in verbs[\"attack\"]:\r\n if len(command) > 2:\r\n item_id = get_multi_word_string(command, items)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if len(command) <= 1:\r\n wrap_print(\"attack what?\")\r\n elif entity_id not in current_room[\"entities\"].keys():\r\n wrap_print(\"You cannot attack that.\")\r\n elif len(command) <= 2:\r\n wrap_print(\"What with?\")\r\n elif item_id not in inventory.keys():\r\n wrap_print(\"You do not have a that item.\")\r\n elif items[item_id][\"damage\"] == False:\r\n wrap_print(\"You cannot attack using that item.\")\r\n else:\r\n execute_attack(entity_id, item_id)\r\n\r\n elif command[0] == \"help\":\r\n print(\"To move in a given direction type: go <DIRECTION>\")\r\n print(\"To pick up an item type: take <ITEM>\")\r\n print(\"To drop an item type: drop <ITEM>\")\r\n print(\"To use an item type: use <ITEM>\")\r\n print(\"To look at something of interest type: view <ITEM>\")\r\n print(\"to attack a character type: attack <CHARACTER> with <item>\")\r\n print(\"to : attack <CHARACTER> with <item>\")\r\n print(\"To quit the game type: quit\\n\")\r\n wrap_print(\"\"\"Verb variations are supported, so 'run south', or 'inspect item' are valid inputs.\"\"\")\r\n wrap_print(\"\"\"Items and characters with multiple words in their name are also supported like regular items.\"\"\")\r\n\r\n elif command[0] == \"quit\":\r\n if len(command) == 1:\r\n wrap_print(\"goodbye!\")\r\n global playing\r\n playing = False\r\n\r\n else:\r\n wrap_print(\"That makes no sense.\")", "def __command_handler(self,\n command: int) -> Callable[[Dict[str, int]], None]:\n\n return {\n 0x00: self.__update_health,\n 0x0A: self.__update_warning,\n 0x0C: self.__update_firmware_state,\n 0x05: self.__update_modules,\n 0x07: self.__update_topology,\n 0x1F: self.__update_property,\n }.get(command, lambda _: None)", "def handle_command(command, channel_id):\n current_channel = None\n for channel in channels:\n if channel.get_channel_id == channel_id:\n current_channel = channel\n break\n\n # Default response is help text for the user\n default_response = \"Please use `help` to find ways to interact with me\"\n\n # Finds and executes the given command, filling in response\n response = None\n # This is where you start to implement more commands!\n if command.startswith(\"Hi\"):\n response = \"Hi, sup?\"\n \n elif command.startswith(\"who created you?\"):\n response = \"I was created by Neha and Bari from Pathashala-63, cool people right?\"\n \n elif command.startswith(\"help\"):\n response = \"Here are the commands you can use to interact with me:\\n\" + \"`info` - about me\\n\" + \"`pair info` - information about usage of pairBot for pairing\\n\"\n\n elif command.startswith(\"info\"):\n response = \"Hello, My name is PairingBot\"\n\n elif command.startswith(\"pair info\"):\n response = \"`pair name1, name2, name3, name4….` - sends pair combinations everyday at 9:00 AM\\n\" + \"`pair skip` - skips next day\\n\" + \"`pair display`- displays pairs for current day\\n\"\n\n elif command.startswith(\"start\"):\n add_channel(channel_id)\n handle_given_names(command[6:], current_channel)\n response = \"Successfully generated all the combinations, please use `pairs` command for more information\"\n\n elif command.startswith(\"pair skip\"):\n current_channel.skip()\n response = \"Skipped next notification\"\n\n elif command.startswith(\"pair display\"):\n #display current pair by getting from the database\n response = \"This is not yet implimented\"\n \n if current_channel == None:\n channel_to_send = channel_id\n else:\n channel_to_send = current_channel.get_channel_id()\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel_to_send,\n text=response or default_response\n )", "def work(self):\n\n cmd = self.options.command\n cmdargs = self.options.args\n\n # find function\n fname = \"cmd_\" + cmd.replace('-', '_')\n if not hasattr(self, fname):\n self.log.error('bad subcommand, see --help for usage')\n sys.exit(1)\n fn = getattr(self, fname)\n\n b = inspect.signature(fn).bind(*cmdargs)\n\n fn(*b.args, **b.kwargs)", "def handle_command(command, channel):\n # Default response is help text for the user\n default_response = \"Uknown command. Try *{}* for list of commands and usage.\".format(\"help\")\n\n # Finds and executes the given command, filling in response\n response = None\n \n # get the location, locations and locaiton ids\n l = Locations()\n provinces = l.get_provinces()\n province_ids = l.get_province_ids() \n \n # locations\n if (command == \"locations\"):\n # print a message of the available locations\n provinces_string = \"\\n\"\n for province_no in provinces:\n provinces_string += str(province_no) + \": \" + provinces[province_no] + \"\\n\"\n response = \"Choose a locaton number from\" + provinces_string\n\n # help\n elif (command == \"help\"):\n # print out all the usable commands\n response = \"Here are list of commands to get you started.\" + (\n \"\\n\\n*{}* _product name_ *{}* _location number_\".format(\n \"peek\", \"in\")) + (\n \"\\nEXAMPLE: To get the lowest gtx 1070 in Ontario\") + (\n \"\\n peek gtx 1070 in 0\") + (\n \"\\n\\n*{}* _product name_ *{}* _location number_ *{}* _database name_\".format(\n \"peek\", \"in\", \"saveas\")) + (\n \"\\n EXAMPLE: Find cheapest scarlett 2i2 in BC put it in 'mytable.db'\") + (\n \"\\n peek scarlett 2i2 in 2 saveas mytable\") + (\n \"\\n\\n*{}*\".format(\"locations\")) + (\n \"\\n Lists all the location numbers\")\n \n # peek <product name> in <location number>\"\n # startswith peek\n # has \" in \"\n # product name = command[index(peek): index(in)]\n # location = command[index(in):]\n # remove beggining and end spaces from both \n elif (command.startswith(\"peek \") and \" in \" in command):\n peek_right_index = command.find(\"peek \") + len(\"peek \")\n in_left_index = command.find(\" in \")\n in_right_index = command.find(\" in \") + len(\" in \")\n \n # product is between peek and in\n product = command[peek_right_index:in_left_index]\n\n # check if a valid location filter is used\n after_in = command[in_right_index:]\n \n #print (after_in)\n # if a valid location filter is used\n if (after_in.strip().isdigit()):\n province_num = int(after_in.strip())\n website = url(product, province_num, provinces, province_ids)\n (product_name, product_price, link, date) = cheapest(website, product)\n \n response = \"The cheapest \" + product + \" in \" + provinces[province_num] + (\n \" is the \\n\" + \"<{}|{}>\".format(link, product_name) + \"\\n\") + (\n \"costing $\" + str(product_price) + \", posted \" + date)\n \n # check if the after_in is of the form <number> ... saveas <text>\n elif (\" saveas \" in after_in):\n \n saveas_left_index = after_in.find(\" saveas \")\n saveas_right_index = after_in.find(\" saveas \") + len(\" saveas \")\n \n before_saveas = after_in[:saveas_left_index]\n # check if valid location is used\n if (after_in[:saveas_left_index].strip().isdigit()):\n \n # get the province num\n province_num = int(after_in[:saveas_left_index].strip())\n website = url(product, province_num, provinces, province_ids)\n (product_name, product_price, link, date) = cheapest(website, product) \n \n table_name = after_in[saveas_right_index:]\n db(product_name, product_price,\n provinces[province_num], link, date, table_name)\n response = \"Added cheapest \" + product + \" in \" + (\n provinces[province_num]) + (\n \" \\n\" + \"<{}|{}>\".format(link, product_name) + \"\\n\") + (\n \"costing $\" + str(product_price) + \", posted \" + date + (\n \"\\nto the database named \" + (\n \"*{}*\".format(table_name) + \"!\")))\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def _receive_command(self, command):\n if command.startswith('RET '):\n print(command[4:]) # Return value\n elif command.startswith('ERROR '):\n logger.error('JS - ' + command[6:].strip())\n elif command.startswith('WARN '):\n logger.warn('JS - ' + command[5:].strip())\n elif command.startswith('PRINT '):\n print(command[5:].strip())\n elif command.startswith('INFO '):\n logger.info('JS - ' + command[5:].strip())\n elif command.startswith('SET_PROP '):\n # todo: seems weird to deal with here. implement by registring some handler?\n # Should be better when we implement a more formal protocol\n _, id, name, txt = command.split(' ', 3)\n ob = Model._instances.get(id, None)\n if ob is not None:\n ob._set_prop_from_js(name, txt)\n elif command.startswith('SET_EVENT_TYPES '):\n _, id, txt = command.split(' ', 3)\n ob = Model._instances.get(id, None)\n if ob is not None:\n ob._set_event_types_js(txt)\n elif command.startswith('EVENT '):\n _, id, name, txt = command.split(' ', 3)\n ob = Model._instances.get(id, None)\n if ob is not None:\n ob._emit_from_js(name, txt)\n else:\n logger.warn('Unknown command received from JS:\\n%s' % command)", "def _handle_bot_command(self, bot_command: BotCommand) -> str:\n try:\n player = self.ping_pong_service.get_player(bot_command.sender_id)\n except pingpong_service.PlayerDoesNotExist:\n self.ping_pong_service.add_new_player(bot_command.sender_id)\n return responses.new_player()\n\n if bot_command.command_type is None:\n return responses.unknown_command()\n elif bot_command.command_type == CommandType.HELP:\n return responses.help()\n elif bot_command.command_type == CommandType.NAME:\n if bot_command.command_value:\n success = self.ping_pong_service.update_display_name(player, bot_command.command_value.lower())\n if success:\n return responses.name_updated(bot_command.command_value.lower())\n else:\n return responses.name_taken()\n else:\n return responses.name(player.name)\n elif bot_command.command_type == CommandType.MATCH:\n return self._handle_match_command(bot_command.command_value)\n elif bot_command.command_type == CommandType.STATS:\n name = bot_command.command_value\n if name:\n try:\n rating, wins, losses, ratio = self.ping_pong_service.get_player_stats(name)\n return responses.player_stats(name, rating, ratio, wins, losses)\n except pingpong_service.PlayerDoesNotExist:\n return responses.player_does_not_exist()\n else:\n return responses.stats(\n self.ping_pong_service.get_total_matches(), self.ping_pong_service.get_leaderboard()\n )\n elif bot_command.command_type == CommandType.UNDO:\n return responses.unknown_command()\n # w_name, w_rating, l_name, l_rating = pingpong_service.undo_last_match()\n # return responses.match_undone(w_name, w_rating, l_name, l_rating)\n return responses.unknown_command()", "def command_handling(args, log=COMMAND_LOG):\n # Create the Command object\n command = Command(args, None)\n\n # Resume calls are not logged\n if not command.resume:\n u.sys_log_message(command.command.replace('\\\\', '\\\\\\\\'), log_file=log)\n\n return command", "def send_command_line(self, command):\n raise NotImplementedError", "def commands():", "def do_command(self, args = ()):\n if len(args) == 0:\n self.do_overview()\n elif len(args) != 1:\n raise ValueError('Wrong number of arguments.')\n elif args[0] in self.base.commands.keys():\n self.do_command_help(args[0])\n else:\n raise ValueError('No such command.')", "def handle_command(self, command, user, channel):\n # Default response is help text for the user\n default_response = \"Not sure what you mean\"\n\n # Finds and executes the given command, filling in response\n response = None\n print('From Usr= '+str(user))\n if len(self.game.players)>=1 : \n print('Expected Usr= ' + str(self.game.players[self.cur_player].user_id))\n\n # STATE INIT\n if self.game_state == GAME_STATE_INIT :\n\n # Join game\n if command.startswith(\"join\"):\n res = self.game.add_player(user)\n user_name = self.get_users_displayname(user)\n if res == 1 :\n response = \"Welcome Aboard \" + str(user_name) + \"! :partyparrot:\"\n response_type = \"public\"\n else : \n response = \"You are already playing...\"\n response_type = \"public\"\n\n # Leave Game\n if command.startswith(\"leave\"):\n self.game.leave_game(user)\n response = \"User has left the game\"\n\n # Start Round\n if command.startswith(\"start\"):\n if len(self.game.players) > 1 :\n self.game.start_game()\n response = \"Let's begin\"\n response_type = \"public\"\n self.game_state = GAME_STATE_SET_BLINDS\n # Notify each player of their hand\n for player in self.game.players: \n private_response = \"Your hand: \"\n private_response += player.hand.print_hand()\n self.slack_client.api_call(\n \"chat.postEphemeral\",\n channel=channel,\n text=private_response,\n user=player.user_id \n )\n self.slack_client.api_call(\n \"channels.setTopic\",\n channel=channel,\n topic=\"A game is in progress! :congaparrot::congaparrot::congaparrot::congaparrot:\"\n ) \n else : \n response = \"Not enough players have joined yet.\"\n \n # State Betting\n if self.game_state == GAME_STATE_BETTING :\n responce_type = \"public\"\n # Check if user can actually play...\n if self.game.players[self.cur_player].active and \\\n not self.game.players[self.cur_player].all_in and \\\n self.game.players[self.cur_player].user_id == user:\n # Raising\n valid_command = False\n if command.startswith(\"raise \") :\n raise_str = command[6:].strip()\n if raise_str.isdigit() : \n res = self.game.raise_bet(self.game.players[self.cur_player].user_id,int(raise_str))\n if res == 2 :\n response = \"Player is all in!\"\n valid_command = True\n elif res == 1 :\n response = \"Current bet is set to \" + str(self.game.max_bet)\n valid_command = True\n else : \n response = \"... You can't raise '\" + raise_str +\"'\"\n \n # Calling\n if command.startswith(\"call\"):\n res = self.game.call(self.game.players[self.cur_player].user_id)\n response = \"Player calls.\"\n valid_command = True\n # All In\n if command.startswith(\"all\"):\n self.game.go_all_in(self.game.players[self.cur_player].user_id)\n response = \"Player is all in!\"\n valid_command = True\n # Fold\n if command.startswith(\"fold\"):\n self.game.fold(self.game.players[self.cur_player].user_id)\n response = \"Player folds\"\n valid_command = True\n # Check\n if command.startswith(\"check\"):\n res = self.game.check(user)\n response = \"Player Checks\"\n if res == 1 : \n valid_command = True\n\n # Move onto next player after the current player makes a move\n if valid_command :\n self.cur_player = ((self.cur_player+1)%len(self.game.players))\n while not self.game.players[self.cur_player].active :\n self.cur_player = ((self.cur_player+1)%len(self.game.players))\n print(self.cur_player)\n\n \n \n # Sends the response back to the channel\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "def handle_request_command(self, msg):\n\n\t\t# only one command?\n\t\tcommand = None\n\t\tif msg.arguments:\n\t\t\tcommand = msg.arguments[0]\n\n\t\tmodule_name = moduleManager.module_providing(self.__command_list, command)\n\n\t\ttry:\n\t\t\t# check if the module exists in the module manager\n\t\t\tmoduleManager[module_name]\n\t\texcept KeyError:\n\t\t\t# the module has been removed from moduleManager (probably through a reload)\n\t\t\tCORE.warn('Module %r (command=%r, id=%r) does not exists anymore' % (module_name, command, msg.id))\n\t\t\tmoduleManager.load()\n\t\t\tself._reload_acls_and_permitted_commands()\n\t\t\tmodule_name = None\n\n\t\tif not module_name:\n\t\t\traise Forbidden()\n\n\t\tif msg.arguments:\n\t\t\tif msg.mimetype == MIMETYPE_JSON:\n\t\t\t\tis_allowed = moduleManager.is_command_allowed(self.acls, msg.arguments[0], options=msg.options, flavor=msg.flavor)\n\t\t\telse:\n\t\t\t\tis_allowed = moduleManager.is_command_allowed(self.acls, msg.arguments[0])\n\t\t\tif not is_allowed:\n\t\t\t\traise Forbidden()\n\t\t\tif module_name not in self.__processes:\n\t\t\t\tCORE.info('Starting new module process and passing new request to module %s: %s' % (module_name, str(msg._id)))\n\t\t\t\ttry:\n\t\t\t\t\tmod_proc = ModuleProcess(module_name, debug=MODULE_DEBUG_LEVEL, locale=self.i18n.locale)\n\t\t\t\texcept EnvironmentError as exc:\n\t\t\t\t\tmessage = self._('Could not open the module. %s Please try again later.') % {\n\t\t\t\t\t\terrno.ENOMEM: self._('There is not enough memory available on the server.'),\n\t\t\t\t\t\terrno.EMFILE: self._('There are too many opened files on the server.'),\n\t\t\t\t\t\terrno.ENFILE: self._('There are too many opened files on the server.'),\n\t\t\t\t\t\terrno.ENOSPC: self._('There is not enough free space on the server.')\n\t\t\t\t\t}.get(exc.errno, self._('An unknown operating system error occurred (%s).' % (exc,)))\n\t\t\t\t\traise ServiceUnavailable(message)\n\t\t\t\tmod_proc.signal_connect('result', self.result)\n\n\t\t\t\tcb = notifier.Callback(self._mod_error, module_name)\n\t\t\t\tmod_proc.signal_connect('error', cb)\n\n\t\t\t\tcb = notifier.Callback(self._socket_died, module_name)\n\t\t\t\tmod_proc.signal_connect('closed', cb)\n\n\t\t\t\tcb = notifier.Callback(self._mod_died, module_name)\n\t\t\t\tmod_proc.signal_connect('finished', cb)\n\n\t\t\t\tself.__processes[module_name] = mod_proc\n\n\t\t\t\tcb = notifier.Callback(self._mod_connect, mod_proc, msg)\n\t\t\t\tnotifier.timer_add(50, cb)\n\t\t\telse:\n\t\t\t\tproc = self.__processes[module_name]\n\t\t\t\tif proc.running:\n\t\t\t\t\tCORE.info('Passing new request to running module %s' % module_name)\n\t\t\t\t\tproc.request(msg)\n\t\t\t\t\tself.reset_inactivity_timer(proc)\n\t\t\t\telse:\n\t\t\t\t\tCORE.info('Queuing incoming request for module %s that is not yet ready to receive' % module_name)\n\t\t\t\t\tproc._queued_requests.append(msg)", "def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n logger.info(\"Address {} at {} wrote: '{}'\".format(self.client_address[1], self.client_address[0], data))\n cmd_strn, ret = self.command_service(data)\n print(ret)\n self.command_response(cmd_strn, ret, self.request[1], self.client_address[0],\n self.mapInterface.router[cmd_strn])", "def _setup_command(self):\r\n raise NotImplementedError", "def handleCommand(self, command, prefix, params):\n irc.IRCClient.handleCommand(self, command, prefix, params)\n if len(params) < 2:\n return\n plugins = plugin_manager.filter(\n channel=self.channel, action=command.lower())\n for plugin in plugins:\n plugin.handle_action(protocol=self, action=command.lower(),\n user=prefix, message=params[1])", "def handle_command(command, channel):\n response = \"Not sure what you mean. \" + \\\n \"Try the following commands: \\n\" +\\\n \"@netsilbot alert list\\n\" +\\\n \"@netsilbot alert details <alertID>\\n\" +\\\n \"@netsilbot service list\\n\" +\\\n \"@netsilbot service details <serviceID>\\n\"+\\\n \"(You can add 'text' or 'raw' options for formatting the output)\"\n\n\n if command.startswith(COMMANDS[0]):\n #print command\n subcommand = command.split(' ')[1]\n if(subcommand=='list'):\n if(len(command.split(' '))>2):\n formatOutput = command.split(' ')[2]\n else:\n formatOutput=''\n\n response = GetAlertList(formatOutput)\n\n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subcommand=='details'):\n response = GetAlertDetails([],command.split(' ')[2])\n sendSlackMessage(response, channel)\n\n elif(subcommand=='rule'):\n subsubcommand = command.split(' ')[2]\n if(subsubcommand=='list'):\n if(len(command.split(' '))>3):\n formatOutput = command.split(' ')[3]\n else:\n formatOutput=''\n\n response = GetAlertRuleList(formatOutput)\n \n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subsubcommand=='details'):\n response = GetAlertRuleDetails([],command.split(' ')[3])\n sendSlackMessage(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subcommand=='template'):\n subsubcommand = command.split(' ')[2]\n if(subsubcommand=='list'):\n if(len(command.split(' '))>3):\n formatOutput = command.split(' ')[3]\n else:\n formatOutput=''\n\n response = GetAlertTemplateList(formatOutput)\n \n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subsubcommand=='details'):\n response = GetAlertTemplateDetails([],command.split(' ')[3])\n sendSlackMessage(response, channel)\n\n else:\n sendSlackMessage(response, channel)\n\n elif command.startswith(COMMANDS[1]):\n subcommand = command.split(' ')[1]\n if(subcommand=='list'):\n if(len(command.split(' '))>2):\n formatOutput = command.split(' ')[2]\n else:\n formatOutput=''\n\n response = GetServiceList(formatOutput)\n\n if(formatOutput=='' or formatOutput == 'formatted'):\n sendSlackMessageWithAttactment(response, channel)\n else:\n sendSlackMessage(response, channel)\n\n elif(subcommand=='details'):\n response = GetServiceDetails([],command.split(' ')[2])\n sendSlackMessage(response, channel)\n \n else:\n sendSlackMessage(response, channel)\n\n elif command.startswith(COMMANDS[2]):\n subcommand = command.split(' ')[1]\n if(subcommand=='run'):\n if(len(command.split(' '))>2):\n queryText = command.split('run')[1].strip()\n else:\n queryText=''\n\n print queryText\n\n response=''\n response = RunQuery(query=queryText)\n #print response\n\n sendSlackMessageWithAttactment(response, channel)\n \n else:\n sendSlackMessage(response, channel)\n\n else:\n sendSlackMessage(response, channel)", "def handle_command(command, channel):\n\t# Default response is help text for the user\n\tdefault_response = \"I could not understand your command, i think you may have given me too few parameters or maybe you typed something wrong. Try again using: [tech|soft|group|searchtech] [Option] [Name]\"\n\t# Finds and executes the given command, filling in response\n\tresponse = None\n\n\tcmd_list = str(command.encode('ascii', 'replace')).split()\n\tlist_len = len(cmd_list)\n\tif cmd_list[0] == 'help':\n\t\tresponse = \"To use me, type in channel i am currently on:\\n@mitreattackbot [tech|group|soft|searchtech] [OPTION] [NAME]\\n For example: tech desc powershell\\n Typing this will make me show you the technical description of the PowerShell technique!\"\n\n\tif cmd_list[0] == 'searchtech':\n\t\tpre_return_str = None\n\t\tif list_len > 1:\n\t\t\tif list_len % 2 == 1:\n\t\t\t\tcmd_list.remove(\"searchtech\")\n\t\t\t\tsearch_list = []\n\t\t\t\tfor i in range(0, len(cmd_list), 2):\n\t\t\t\t\tsearch_list.append({'field': cmd_list[i], 'value': cmd_list[i+1]})\n\t\t\t\tresponse = str(att.search(search_list))\n\t\t\telse:\n\t\t\t\tresponse = \"To use the searchtech option i must have a field and a value, if you dont tell me a field and a value i cannnot search things for you. Try searchtech [FIELD] [VALUE]\"\n\n\tif cmd_list[0] == 'tech':\n\t\tpre_return_str = None\n\t\tif list_len > 1:\n\t\t\tif cmd_list[1] == 'groups':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"tech\")\n\t\t\t\t\tcmd_list.remove(\"groups\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findTechnique(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one technique and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the technique by typing \\\"tech NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.groups)\n\t\t\telif cmd_list[1] == 'id':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"tech\")\n\t\t\t\t\tcmd_list.remove(\"id\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findTechnique(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one technique and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the technique by typing \\\"tech NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.ID)\n\t\t\telif cmd_list[1] == 'title':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"tech\")\n\t\t\t\t\tcmd_list.remove(\"title\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findTechnique(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one technique and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the technique by typing \\\"tech NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.displaytitle)\n\t\t\telif cmd_list[1] == 'desc':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"tech\")\n\t\t\t\t\tcmd_list.remove(\"desc\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findTechnique(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one technique and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the technique by typing \\\"tech NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.technical_description)\n\t\t\telif cmd_list[1] == 'url':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"tech\")\n\t\t\t\t\tcmd_list.remove(\"url\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findTechnique(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one technique and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the technique by typing \\\"tech NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.full_url)\n\t\t\telif cmd_list[1] == 'sources':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"tech\")\n\t\t\t\t\tcmd_list.remove(\"sources\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findTechnique(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one technique and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the technique by typing \\\"tech NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.data_sources)\n\t\t\telif cmd_list[1] == 'tactics':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"tech\")\n\t\t\t\t\tcmd_list.remove(\"tactics\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findTechnique(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one technique and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the technique by typing \\\"tech NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.tactics)\n\t\t\telif cmd_list[1] == 'soft':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"tech\")\n\t\t\t\t\tcmd_list.remove(\"soft\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findTechnique(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one technique and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the technique by typing \\\"tech NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.software)\n\t\t\telif cmd_list[1] == 'all':\n\t\t\t\tpre_return_str = str(att.findTechnique(''))\n\t\t\telse:\n\t\t\t\tcmd_list.remove(\"tech\")\n\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\tdata_returned = att.findTechnique(search_str)\n\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\tpre_return_str = str(data_returned)\n\t\t\t\telse:\n\t\t\t\t\tpre_return_str = str(data_returned) + \"\\n\\nID: \" + str(data_returned.ID) + \"\\n\\nTitle:\" + str(data_returned.displaytitle) + \"\\n\\nTechnical Description: \" + str(data_returned.technical_description) + \"\\n\\nURL: \" + str(data_returned.full_url) + \"\\n\\nGroups: \" + str(data_returned.groups).replace(\"u'\", \"\") + \"\\n\\nSoftware: \" + str(data_returned.software).replace(\"u'\", \"\") + \"\\n\\nTactics: \" + str(data_returned.tactics).replace(\"u'\", \"\") + \"\\n\\nData Source: \" + str(data_returned.data_sources).replace(\"u'\", \"\") + \"\\n\"\n\t\t\tresponse = pre_return_str\n\n\tif cmd_list[0] == 'group':\n\t\tpre_return_str = None\n\t\tif list_len > 1:\n\t\t\tif cmd_list[1] == 'techniques':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"group\")\n\t\t\t\t\tcmd_list.remove(\"techniques\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findGroup(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one group and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the group by typing \\\"group NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.techniques)\n\t\t\telif cmd_list[1] == 'id':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"group\")\n\t\t\t\t\tcmd_list.remove(\"id\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findGroup(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one group and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the group by typing \\\"group NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.ID)\n\t\t\telif cmd_list[1] == 'title':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"group\")\n\t\t\t\t\tcmd_list.remove(\"title\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findGroup(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one group and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the group by typing \\\"group NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.displaytitle)\n\t\t\telif cmd_list[1] == 'desc':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"group\")\n\t\t\t\t\tcmd_list.remove(\"desc\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findGroup(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one group and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the group by typing \\\"group NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.description)\n\t\t\telif cmd_list[1] == 'url':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"group\")\n\t\t\t\t\tcmd_list.remove(\"url\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findGroup(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one group and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the group by typing \\\"group NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.fullurl)\n\t\t\telif cmd_list[1] == 'aliases':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"group\")\n\t\t\t\t\tcmd_list.remove(\"aliases\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findGroup(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one group and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the group by typing \\\"group NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.aliases)\n\t\t\telif cmd_list[1] == 'soft':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"group\")\n\t\t\t\t\tcmd_list.remove(\"soft\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findGroup(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one group and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the group by typing \\\"group NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.software)\n\t\t\telif cmd_list[1] == 'all':\n\t\t\t\tpre_return_str = str(att.findGroup(''))\n\t\t\telse:\n\t\t\t\tcmd_list.remove(\"group\")\n\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\tdata_returned = att.findGroup(search_str)\n\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\tpre_return_str = str(data_returned)\n\t\t\t\telse:\n\t\t\t\t\tpre_return_str = str(data_returned) + \"\\n\\nID: \" + str(data_returned.ID) + \"\\n\\nTitle:\" + str(data_returned.displaytitle) + \"\\n\\nTechnical Description: \" + str(data_returned.description) + \"\\n\\nURL: \" + str(data_returned.fullurl) + \"\\n\\nTechniques: \" + str(data_returned.techniques).replace(\"u'\", \"\") + \"\\n\\nSoftware: \" + str(data_returned.software).replace(\"u'\", \"\") + \"\\n\\nAliases: \" + str(data_returned.aliases).replace(\"u'\", \"\") + \"\\n\"\n\t\t\tresponse = pre_return_str\n\n\tif cmd_list[0] == 'soft':\n\t\tpre_return_str = None\n\t\tif list_len > 1:\n\t\t\tif cmd_list[1] == 'techniques':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"soft\")\n\t\t\t\t\tcmd_list.remove(\"techniques\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findSoftware(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one software and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the software by typing \\\"soft NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.techniques)\n\t\t\telif cmd_list[1] == 'id':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"soft\")\n\t\t\t\t\tcmd_list.remove(\"id\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findSoftware(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one software and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the software by typing \\\"soft NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.ID)\n\t\t\telif cmd_list[1] == 'title':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"soft\")\n\t\t\t\t\tcmd_list.remove(\"title\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findSoftware(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one software and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the software by typing \\\"soft NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.displaytitle)\n\t\t\telif cmd_list[1] == 'desc':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"soft\")\n\t\t\t\t\tcmd_list.remove(\"desc\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findSoftware(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one software and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the software by typing \\\"soft NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.description)\n\t\t\telif cmd_list[1] == 'url':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"soft\")\n\t\t\t\t\tcmd_list.remove(\"url\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findSoftware(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one software and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the software by typing \\\"soft NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.fullurl)\n\t\t\telif cmd_list[1] == 'aliases':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"soft\")\n\t\t\t\t\tcmd_list.remove(\"aliases\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findSoftware(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one software and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the software by typing \\\"soft NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.aliases)\n\t\t\telif cmd_list[1] == 'groups':\n\t\t\t\tif list_len > 2:\n\t\t\t\t\tcmd_list.remove(\"soft\")\n\t\t\t\t\tcmd_list.remove(\"groups\")\n\t\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\t\tdata_returned = att.findSoftware(search_str)\n\t\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\t\tdefault_response = \"Your request returned more than one software and i dont know from which you want the data of. Please be more specific. If you need, find out the full name of the software by typing \\\"soft NAME\\\" and redo the question using the full name please.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpre_return_str = str(data_returned.groups)\n\t\t\telif cmd_list[1] == 'all':\n\t\t\t\tpre_return_str = str(att.findSoftware(''))\n\t\t\telse:\n\t\t\t\tcmd_list.remove(\"soft\")\n\t\t\t\tsearch_str = str(\" \".join(cmd_list))\n\t\t\t\tdata_returned = att.findSoftware(search_str)\n\t\t\t\tif type(data_returned) is list:\n\t\t\t\t\tpre_return_str = str(data_returned)\n\t\t\t\telse:\n\t\t\t\t\tpre_return_str = str(data_returned) + \"\\n\\nID: \" + str(data_returned.ID) + \"\\n\\nTitle:\" + str(data_returned.displaytitle) + \"\\n\\nTechnical Description: \" + str(data_returned.description) + \"\\n\\nURL: \" + str(data_returned.fullurl) + \"\\n\\nTechniques: \" + str(data_returned.techniques).replace(\"u'\", \"\") + \"\\n\\nGroups: \" + str(data_returned.groups).replace(\"u'\", \"\") + \"\\n\\nAliases: \" + str(data_returned.aliases).replace(\"u'\", \"\") + \"\\n\"\n\t\t\tresponse = pre_return_str\n\n\t# Sends the response back to the channel\n\tslack_client.api_call(\n\t\t\"chat.postMessage\",\n\t\tchannel=channel,\n\t\ttext=response or default_response\n\t)", "def process_commands(self, commands: List[str]):", "def command(self, command):\n\n self._command = command", "def command(self, command):\n\n self._command = command", "def command(self, command):\n\n self._command = command", "def __call__(self, cmd):\n cmdname = cmd.name\n self.commands[cmdname] = self._prepare_cmd(cmd)\n return cmd", "def run() -> None:\n arguments = sys.argv[1:]\n\n if not arguments:\n help_command(arguments=[])\n return\n\n commands_dict = {\n '--help': help_command,\n 'list': list_command,\n 'create': create_command,\n 'update': update_command,\n 'download': download_command,\n 'delete': delete_command,\n }\n\n command = arguments[0]\n command_handler = commands_dict.get(command)\n if command_handler is not None:\n command_handler(arguments)\n else:\n print(\"Can't perform {0} command. Please read help:\".format(command))\n help_command(arguments=[])", "def messageHandler(self, source, message, messageId):\n try:\n type, params, data = message.split(':',2)\n except:\n # Not a real message\n return\n \n try:\n getattr(self, \"thive_%s\" % type)(messageId, params.split(), data)\n except exceptions.AttributeError, c:\n raise c\n print \"[HIVE] No method bound for command '%s'\" % type", "def __commandparser(self, data):\n # zum bearbeiten einen String daraus machen\n cmdstr = data.decode('utf-8')\n self.log.debug(\"cmd: %s\" % cmdstr)\n # json parsen und dictonary Objekt daraus machen\n cmd = json.loads(cmdstr)\n #\n # ist es ein GET Kommando?\n #\n if 'get' in cmd:\n self.log.debug(\"get cmd recognized...\")\n return self.__get_cmd_parse(cmd['get'])\n elif 'set' in cmd:\n self.log.debug(\"set cmd recognized...\")\n return self.__set_cmd_parse(cmd['set'])\n elif 'delete' in cmd:\n self.log.debug(\"DELETE cmd recognized...\")\n return self.__delete_cmd_parse(cmd['delete'])\n else:\n self.log.warning(\"unknown command recived! Data: <{}>\".format(cmdstr))\n return json.dumps({'error': 'unknown command or not implemented yet'}).encode(encoding='utf-8')\n # ENDE __commandparser", "def handle(self, msg):\n\n if msg.command == \"PING\":\n self._sendmsg(\"PONG :{}\".format(msg.args[0]))\n\n elif msg.command == \"JOIN\":\n name = msg.sendername\n channel = msg.args[0]\n print(\"{} has joined {}\".format(name, channel))\n\n elif msg.command == \"PART\":\n name = msg.sendername\n channel = msg.args[0]\n print(\"{} has left {}\".format(name, channel))\n\n elif msg.command == \"KICK\":\n name = msg.sendername\n channel = msg.args[0]\n victim = msg.args[1]\n print(\"{} has kicked {} from {}\".format(name, victim, channel))\n\n elif msg.command == \"QUIT\":\n name = msg.sendername\n print(\"{} has quit IRC\".format(name))\n\n elif msg.command == \"KILL\":\n name = msg.sendername\n victim = msg.args[0]\n print(\"{} has killed {}\".format(name, victim))\n\n elif msg.command == \"NICK\":\n name = msg.sendername\n newname = msg.args[0]\n print(\"{} is now known as {}\".format(name, newname))\n\n elif msg.command == \"MODE\":\n name = msg.sendername\n target = msg.args[0]\n mode = msg.args[1]\n print(\"{} has set the mode of {} to {}\".format(name, target, mode))\n\n elif msg.command == \"NOTICE\":\n name = msg.sendername\n target = msg.args[0]\n message = msg.args[1]\n print(\"[{} -> {}]! {}\".format(name, target, message))\n\n elif msg.command == \"PRIVMSG\":\n name = msg.sendername\n target = msg.args[0]\n message = msg.args[1]\n print(\"[{} -> {}] {}\".format(name, target, message))\n\n elif msg.command.isdigit():\n print(msg.args[-1])\n\n else:\n print(str(msg))\n\n hooks.handle(self, msg)", "def handle_user_command(self, command_text):\r\n try:\r\n command = json.loads(command_text)\r\n except json.JSONDecodeError:\r\n self.error_callback('Could not parse user command')\r\n return\r\n\r\n target = command.get('target', None)\r\n parameter = command.get('parameter', None)\r\n command = command.get('command', None)\r\n if target == 'controller':\r\n pass\r\n elif target == 'well_and_tank':\r\n # if the pump is controlled automatically, user command has no effect\r\n with self.config_lock:\r\n is_auto = self.config['pump_auto_control']\r\n if is_auto:\r\n self.error_callback('Attempted to execute a manual command on an automated parameter')\r\n return\r\n if parameter == 'pump':\r\n # Find parameter description\r\n for curr_param in self.well_tank_dev.description['parameters']:\r\n if curr_param['name'] == 'pump':\r\n break\r\n if (command != curr_param['commands'][0]) and \\\r\n (command != curr_param['commands'][1]):\r\n self.error_callback('Invalid value {}:{}:{}'.format(target, parameter, command))\r\n return\r\n self.well_tank_dev.send_command(parameter, command)\r\n # No need to call handle_updates as there are no updates yet - the device has not confirmed that its\r\n # state has changed\r\n else:\r\n self.error_callback(\"Cannot control {}'s parameter {}\".format(target, parameter))\r\n return", "def process_command(self, command):\n\t\tif not Mover.executing_action:\n\t\t\tcmd = command.split(' ')[0]\n\t\t\ttry:\n\t\t\t\tparam = float(command.split(' ')[1])\n\t\t\texcept:\n\t\t\t\tparam = None\n\t\t\tfinally:\n\t\t\t\tMover.executing_action = True\n\t\t\t\t#Load sets the thread's run target and parameters\n\t\t\t\tself.action_thread.load(getattr(self, cmd), param)\n\t\t\t\t#spawn an action thread\n\t\t\t\tself.action_thread.run()\n\t\t\t\tMover.executing_action = False", "def process_command(self, command):\r\n if self.visprotocol is not None:\r\n _LOGGER.info(\"client process_command called {0} type is {1}\".format(command, type(self.visprotocol))) \r\n self.visprotocol.process_command(command)\r\n else:\r\n _LOGGER.error(\"[VisonicClient] The pyvisonic command is None\")", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def get_command_handler(self) -> Callable:\n try:\n return globals()[self.command_handler]\n except KeyError:\n logging.error(\"command_handler function '%s' for command '%s' not found in global scope\" %\n (self.command_handler, self.name))\n except AttributeError:\n logging.error(\"command_handler for command '%s' not defined in command_definition.py\" % self.name)", "def cmd(self, message):\n pass", "def DispatchCommand(command, options, args, command_map=None):\n if command_map is None:\n command_map = gclient_command_map\n\n if command in command_map:\n return command_map[command](options, args)\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n command)", "def runCommand(self): \\\n # pylint: disable=no-self-use", "async def command(self,ctx):\n await ctx.send(\"Yes this is a command.\")" ]
[ "0.8338623", "0.75426656", "0.7504038", "0.7325603", "0.7277105", "0.726302", "0.71191317", "0.7092065", "0.70484436", "0.6998338", "0.6996647", "0.6957424", "0.6936447", "0.6885723", "0.683087", "0.6806178", "0.676416", "0.6748807", "0.6729248", "0.6727872", "0.67130303", "0.6700611", "0.6692235", "0.66680294", "0.66654843", "0.6658531", "0.662541", "0.66060925", "0.6582996", "0.65784055", "0.6575767", "0.6569103", "0.6568401", "0.65674776", "0.65659255", "0.6554173", "0.6551949", "0.6547426", "0.65442765", "0.6518916", "0.65151864", "0.6511435", "0.65103245", "0.65093017", "0.6504808", "0.6464974", "0.6447943", "0.6442011", "0.63991", "0.6369065", "0.6359252", "0.6355225", "0.63500226", "0.6349937", "0.6344861", "0.63275826", "0.63233393", "0.6306843", "0.6301095", "0.6291807", "0.6287925", "0.6283421", "0.6280761", "0.6275223", "0.6266036", "0.6265834", "0.6256237", "0.6255957", "0.62533605", "0.62342227", "0.6225541", "0.6222452", "0.62220424", "0.62205833", "0.62181336", "0.621334", "0.62123746", "0.62060803", "0.6196723", "0.6192239", "0.61668134", "0.61668134", "0.61668134", "0.61498576", "0.61490536", "0.6137296", "0.61318797", "0.6118725", "0.6118191", "0.61153144", "0.61123973", "0.60980505", "0.60980505", "0.60980505", "0.60980505", "0.60916775", "0.6090801", "0.6077483", "0.60638326", "0.604178" ]
0.77345246
1
Attach ipmiconsole to target instance specified by its name
def start(instance="default"): # initialize logging global logger_ic logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance) common.init_logger(instance) # initialize environment env.local_env.quit_flag = False common.init_env(instance) pid_file = "{}/{}/.ipmi_console.pid".format(config.infrasim_home, instance) daemon.daemonize(pid_file) with open(pid_file, "r") as fp: logger_ic.info("ipmi-console of {} start with pid {}". format(instance, fp.read().strip())) # parse the sdrs and build all sensors sdr.parse_sdrs() # running thread for each threshold based sensor _start_monitor(instance) _spawn_sensor_thread() _start_console(instance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch_new_instance():\n import IPython\n\n IPython.Shell.start().mainloop()", "def attach(self):\r\n sshpass = \"sshpass -p \\\"akanksha1\\\"\"\r\n remote_Station = \"[email protected]\"\r\n base = \"ssh -t \" + options[\"username\"] + \"@\" + options[\"server\"]\r\n\r\n screen = \" screen -r \"\r\n if self.device_type == \"Wireless_access_point\":\r\n screen += \"WAP_%d\" % self.getID()\r\n elif self.device_type == \"yRouter\":\r\n yrouter = \"yrouter --interactive=1 --config=/root/script_t1_y1.conf test3\"\r\n screen_yrouter = \"%s ssh %s \\\"source /root/.profile; %s\\\"\"%(sshpass, remote_Station, yrouter)\r\n else:\r\n name = self.getName()\r\n pid = mainWidgets[\"tm\"].getPID(name)\r\n if not pid:\r\n return\r\n screen += pid + \".\" + name\r\n\r\n command = \"\"\r\n\r\n window_name = str(self.getProperty(\"Name\")) # the strcast is necessary for cloning\r\n if(self.getName() != window_name):\r\n window_name += \" (\" + self.getName() + \")\"\r\n if environ[\"os\"] == \"Windows\":\r\n\r\n startpath = environ[\"tmp\"] + self.getName() + \".start\"\r\n try:\r\n outfile = open(startpath, \"w\")\r\n outfile.write(screen)\r\n outfile.close()\r\n except:\r\n mainWidgets[\"log\"].append(\"Failed to write to start file!\")\r\n return\r\n\r\n command += \"putty -\"\r\n if options[\"session\"]:\r\n command += \"load \" + options[\"session\"] + \" -l \" + options[\"username\"] + \" -t\"\r\n else:\r\n command += base\r\n command += \" -m \\\"\" + startpath + \"\\\"\"\r\n else:\r\n if self.device_type == \"yRouter\":\r\n command += \"rxvt -T \\\"\" + window_name + \"\\\" -e \" + screen_yrouter\r\n else:\r\n command += \"rxvt -T \\\"\" + window_name + \"\\\" -e \" + base + screen\r\n\r\n self.shell = subprocess.Popen(str(command), shell=True)", "def _set_instance_name(self, name):\n self.__instance_name = name", "def name(self, name: str):\n self.inst['targetname'] = name", "def instance_name(self, instance_name):\n\n self._instance_name = instance_name", "def attach_pwn(args):\n container_name = _read_container_name()\n\n # FIXME Is it better that we just exec it with given name?\n conts = container.list(filters={'name':container_name})\n if len(conts) != 1:\n raise InstallationError('Installation seems to be run. There are more than one image called ancypwn')\n _attach_interactive(conts[0].name)", "def help_show(self):\n print(\"print an instance based on the class name and id\")", "def start_console(self):\n return", "def launch (transparent=False): \n core.registerNew(resonance, str_to_bool(transparent))", "def expert_screen(self):\n executable = 'motor-expert-screen'\n #hutch_location = '~' + blutil.guessBeamline() + 'opr/bin/'\n hutch_location = '/reg/g/xpp/scripts/'\n arg = self.pvname\n if os.path.exists(hutch_location + executable):\n os.system(hutch_location + executable + ' ' + arg)\n else:\n os.system(executable + ' ' + arg)", "def registerConsole(self, userID, key):\r\n self._endpoint.registerConsole(userID, key)", "def get_spice_console(self, instance):\n raise NotImplementedError()", "def start_strace(self):\n\t\ttarget_pid = self.info[\"target_pid\"]\n\t\tlog_file = os.path.join(self.cfg.file_log_dir,self.info[\"hash_md5\"]+\".strace\")\n\t\tself.info[\"strace_log_path\"] = log_file\n\t\tself.p_strace = subprocess.Popen([\"/usr/bin/strace\", \"-f\", \"-tt\", \"-y\", \"-o\"+self.info[\"strace_log_path\"], \"-p\"+str(self.info[\"target_pid\"])], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tself.log.info(\"strace starts, logfile:%s\",self.info[\"strace_log_path\"])", "def start_audit_manager(path) -> WebDriver:\n # close existing instance(s) if exists\n import os\n os.system(\"taskkill /FI \\\"IMAGENAME eq mmc.exe\\\" /F\")\n # run new instance\n exp_cap = {\"app\": \"mmc.exe\",\n \"deviceName\": \"WindowsPC\",\n \"appArguments\": \"\\\"\" + path + \"\\\"\"\n }\n exp_session = webdriver.Remote(\n command_executor='http://127.0.0.1:4723',\n desired_capabilities=exp_cap)\n return exp_session\n # region old\n # desktoppath = r\"C:\\Users\\administrator\\Desktop\"\n # exp_cap = {\"app\": \"explorer.exe\", \"deviceName\": \"WindowsPC\", \"appArguments\": desktoppath}\n # exp_session = webdriver.Remote(\n # command_executor='http://127.0.0.1:4723',\n # desired_capabilities=exp_cap)", "def assign_instance(InstanceId=None, LayerIds=None):\n pass", "def stop(instance=\"default\"):\n global logger_ic\n logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance)\n\n try:\n file_ipmi_console_pid = \"{}/{}/.ipmi_console.pid\".\\\n format(config.infrasim_home, instance)\n with open(file_ipmi_console_pid, \"r\") as f:\n pid = f.readline().strip()\n\n os.kill(int(pid), signal.SIGTERM)\n logger_ic.info(\"SIGTERM is sent to pid: {}\".format(pid))\n os.remove(file_ipmi_console_pid)\n except IOError:\n # When pid file is missing, by e.g., node destroy,\n # find process id by instance name\n if instance == \"default\":\n process_name = \"ipmi-console start$\"\n else:\n process_name = \"ipmi-console start {}\".format(instance)\n\n ps_cmd = r\"ps ax | grep '{}' | grep Sl | awk '{{print $1}}' | head -n1\".format(process_name)\n logger_ic.warning(\"Fail to find ipmi console pid file, check by:\")\n logger_ic.warning(\"> {}\".format(ps_cmd))\n _, pid = run_command(cmd=ps_cmd)\n logger_ic.warning(\"ipmi console pid got: {}\".format(pid))\n if not pid:\n logger_ic.warning(\"ipmi console for instance {} is not running\".format(instance))\n return\n\n os.kill(int(pid), signal.SIGTERM)\n logger_ic.info(\"SIGTERM is sent to pid: {}\".format(pid))\n except Exception:\n logger_ic.warning(traceback.format_exc())\n pass", "def console():\n start_console()", "def __init__ (self, attrs):\n super(MyDistribution, self).__init__(attrs)\n self.console = ['dosage']", "def target_instance(self, target_instance):\n self._target_instance = target_instance", "def aml_run_attach():\n AmlRunWrapper().setup(attach=True)", "def _cli(ctx, input, output):\n print(\"Registering...\")\n ctx.obj = dict(\n component=Registration,\n input=input,\n output=output,\n stack=ImageStack.from_path_or_url(input),\n )", "def msg_engine_connect(self,msg):\r\n log.debug('Adding edit() command to new engine')\r\n engname = msg.get_data()[0]\r\n\r\n #get the new engine interface\r\n app = wx.GetApp()\r\n console = app.toolmgr.get_tool('Console')\r\n eng = console.get_engine_console(engname)\r\n\r\n #When an engine is started add the edit() command\r\n eng.add_builtin(edit, 'edit')\r\n\r\n #add any set breakpoints to this engine's debugger\r\n for bpdata in self.bpoints:\r\n eng.debugger.set_breakpoint(bpdata)", "def insp(ctx, input, mode, interpreter):\n import rasterio.tool\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n logger = logging.getLogger('rio')\n try:\n with rasterio.drivers(CPL_DEBUG=verbosity > 2):\n with rasterio.open(input, mode) as src:\n rasterio.tool.main(\n 'Rasterio %s Interactive Inspector (Python %s)\\n'\n 'Type \"src.meta\", \"src.read(1)\", or \"help(src)\" '\n 'for more information.' % (\n rasterio.__version__,\n '.'.join(map(str, sys.version_info[:3]))),\n src, interpreter)\n except Exception:\n logger.exception(\"Exception caught during processing\")\n raise click.Abort()", "def help_create(self):\n print(\"create instances\")", "def export_insertInstance( self, imageName, instanceName ):\n return gVirtualMachineDB.insertInstance( imageName, instanceName )", "def start_instance(InstanceId=None):\n pass", "def change_to(name):\n global screen_manager\n screen_manager.current = name", "def attach_stream(self, name, mode):\n stream = open(getattr(self, name), mode)\n os.dup2(stream.fileno(), getattr(sys, name).fileno())", "def propose_interaction(self, other: 'Guest', action: str):\n other.interaction_proposals.append((self, action))\n self.interaction_proposals.append((other, action))", "def attach(self):\n args = {\n 'detach': True,\n 'tty': True,\n }\n if self._args.command:\n args['command'] = self._args.command\n\n try:\n try:\n ident = self.client.images.pull(self._args.image)\n img = self.client.images.get(ident)\n except podman.ImageNotFound as e:\n sys.stdout.flush()\n print(\n 'Image {} not found.'.format(e.name),\n file=sys.stderr,\n flush=True)\n return 1\n\n ctnr = img.create(**args)\n ctnr.attach(eot=4)\n\n try:\n ctnr.start()\n print()\n except (BrokenPipeError, KeyboardInterrupt):\n print('\\nContainer disconnected.')\n except podman.ErrorOccurred as e:\n sys.stdout.flush()\n print(\n '{}'.format(e.reason).capitalize(),\n file=sys.stderr,\n flush=True)\n return 1", "def on_xTerm(self):\n path = os.path.normpath(self.pXterm)\n os.system('start \"Toto\" /d \"%s\"' % path)", "def append_inst(self, inst):\n inst.basic_block = self\n self.insts.append(inst)", "def _attach_to_instance(self, instance):\n self._instance = instance", "def startSpawing(self):\n self.girderManager.startSpawing()", "def set_console_xen(self):\n print \"\"\n self.exec_cmd(\"echo \\\"xvc0\\\" >> %s/etc/securetty\" % self.rep_vhosts_vm) \n if os.path.isfile(\"%s/etc/inittab\" % self.rep_vhosts_vm):\n self.exec_cmd(\"echo \\\"7:2345:respawn:/sbin/getty 38400 xvc0\\\" >> %s/etc/inittab\" % self.rep_vhosts_vm) \n\n if os.path.isfile(\"%s/etc/event.d/tty1\" % self.rep_vhosts_vm):\n self.exec_cmd(\"cp %s/etc/event.d/tty1 %s/etc/event.d/xvc0\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n self.exec_cmd(\"sed -i \\\"s@tty1@xvc0@\\\" %s/etc/event.d/xvc0\" % self.rep_vhosts_vm)\n \n if os.path.isfile(\"%s/etc/init/tty1.conf\" % self.rep_vhosts_vm):\n self.exec_cmd(\"cp %s/etc/init/tty1.conf %s/etc/init/xvc0.conf\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n self.exec_cmd(\"sed -i \\\"s@tty1@xvc0@\\\" %s/etc/init/xvc0.conf\" % self.rep_vhosts_vm)", "def use(target, name):\n return \"You insert the \" + name + \" into \" + target.name", "def __enter__(self):\n self.enode.get_shell('bash').send_command('scapy', matches=self.scapy_prompt)\n self.enode.get_shell('bash').send_command('import sys', matches=self.scapy_prompt)\n self.enode.get_shell('bash').send_command('sys.path.append(\".\")', matches=self.scapy_prompt)\n self.enode.get_shell('bash').send_command('sys.path.append(\"/tmp\")', matches=self.scapy_prompt)\n return self", "def ion_instance(self, ion_instance):\n\n self._ion_instance = ion_instance", "def exe(self, name):\n\n return name", "def launch (self):\n path = \"\"\n os.system(path + 'kidlogger_user.exe')", "def add(self, name, command):", "def run(self):\r\n self.inst.write(':RUN')", "def start_ssm(self, ssm_image):\n pass", "def whisper(self,name):\n\n self.sendCommand(\"global /join\",name+self.userName+\" private\")\n self.master.after(300,self.sendCommand,name+self.userName+\" /invite\",name)", "def attach_running_starter(self):\n # pylint disable=broad-except\n match_str = \"--starter.data-dir={0.basedir}\".format(self)\n if self.passvoidfile.exists():\n self.passvoid = self.passvoidfile.read_text(errors=\"backslashreplace\", encoding=\"utf-8\")\n for process in psutil.process_iter([\"pid\", \"name\"]):\n try:\n name = process.name()\n if name.startswith(\"arangodb\"):\n process = psutil.Process(process.pid)\n if any(match_str in s for s in process.cmdline()):\n print(process.cmdline())\n print(\"attaching \" + str(process.pid))\n self.instance = process\n return\n except psutil.NoSuchProcess as ex:\n logging.error(ex)\n raise Exception(\"didn't find a starter for \" + match_str)", "def _AddCmdInstance(self, command_name, cmd, command_aliases=None):\n for name in [command_name] + (command_aliases or []):\n self._cmd_alias_list[name] = command_name\n self._cmd_list[command_name] = cmd", "def silkscreen_commands(self, commands):\n self.pcb_layers[\"silkscreen\"].commands = commands", "def launch(self):", "def _start_monitor(instance=\"default\"):\n global logger_ic\n logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance)\n logger_ic.info(\"ipmi-console monitor thread starts to run.\")\n monitor_thread = threading.Thread(target=monitor, args=(instance,))\n monitor_thread.setDaemon(True)\n monitor_thread.start()", "def ConsoleStart(self):\n pass", "def addButtonIcon(name):\n\n # Set parent to be the scroll layout\n global objectScroll\n cmds.setParent(objectScroll)\n\n # Instance object, with create flag set to True\n AssetIcon(name, True)", "def connect():\n\n crate = get_crate()\n crate.mch_comms.ipmitool_shell_connect()", "def __enter__(self):\n stdout('Starting {}{}'.format(self.name, ' on device {}'.format(self.device) if self.device else ''))\n stdout('')\n self.timer_start('script')\n\n if self.device:\n self._device_ctx.__enter__()\n\n return self", "def start_sysdig(self):\n\t\ttarget_pid = self.info[\"target_pid\"]\n\t\tlog_file = os.path.join(self.cfg.file_log_dir,self.info[\"hash_md5\"]+\".scap\")\n\t\tself.info[\"sysdig_log_path\"] = log_file\n\t\tcmd = [\"/usr/bin/sysdig\",\"-n%d\"%(self.cfg.sysdig_limit),\"-w\"+self.info[\"sysdig_log_path\"] ]\n\t\tself.p_sysdig = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tself.log.info(\"sysdig starts, logfile:%s\",self.info[\"sysdig_log_path\"] )", "def add_command(self, name, fct):\r\n self.cmds[name] = fct", "def instances(args, config):\n print('Does something? More to come.')", "def startami(image, instancetype, accesskey, secretkey, pkname):\n if not is_valid_instance_type(image, instancetype):\n raise ValueError(\"Invalid instance type: '%s'\" % instancetype)\n\n conn = EC2Connection(accesskey, secretkey)\n image = conn.get_image(get_image_id(image))\n reservation = image.run(instance_type=instancetype, key_name=pkname)\n instance = reservation.instances[0]\n\n waitForInstanceToRun(instance)\n\n # [AN] call script instanceStartup.py\n return str(instance.dns_name)", "def __init__(self, parent=None):\n super(embeddedTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "def screen_open(self, sname=None):\n if sname:\n xtitle = sname\n else:\n xtitle = 'msh_' + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))\n\n self.run(\"screen -S %s\" % (xtitle))\n sret = self.run(\"echo $STY\")\n\n return sret", "def setExeName(self, name):\n lines = io.BytesIO(str.encode(name + '\\n'))\n self.ftp.storlines('stor RUN_FEMC.BAT', lines)\n lines.close()", "def launch(**kwargs):\n logger.info('launch dream command')\n launch_gui()", "def start_interaction(self):\n self.__interact()", "def command(name):\n def _decoration(fcn):\n fcn.command = name\n return fcn\n return _decoration", "def command(facade, note):\n print facade, note", "def log_kernel_launch(self, cmd: List[str]) -> None:\n pass", "def print2message():\n return OverrideManager(\"Output Window\")", "def __init__(self):\n import visa\n\n rm = visa.ResourceManager()\n target = 'Agilent Technologies,8163B,MY48208514,V5.25(72637)'\n\n for dev in rm.list_resources():\n try:\n inst = rm.open_resource(dev)\n name = inst.query('*IDN?') # Agilent Technologies,8163B,MY48208514,V5.25(72637)\n if target in name:\n # TODO: check that the slot contains the correct module\n self._inst = inst\n except:\n continue\n\n if self._inst is None:\n raise RuntimeError(\"Target resource {} cannot be found in the VISA resource manager\".format(target))\n print(\"Connected to \" + self.id())", "def activated(self, icon):\n self.statusicon.set_from_stock(gtk.STOCK_PRINT)\n self.statusicon.set_tooltip(\"FolderWatch\")\n subprocess.call([self._command], shell=True)", "def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance", "def set_modprobe(self):\n if version_os[\"OS\"] == \"Debian\":\n self.exec_cmd(\"echo \\\"alias eth0 xennet\\\" >> %s/etc/modprobe.d/aliases\" % self.rep_vhosts_vm)\n else: \n self.exec_cmd(\"echo \\\"alias eth0 xennet\\\" >> %s/etc/modprobe.d/aliases.conf\" % self.rep_vhosts_vm)", "def trigger(self, target: \"pwncat.target.Target\") -> \"pwncat.manager.Session\":", "def console():\n repl(click.get_current_context())", "def entry_point_target(self):\r\n return '%s:%s' % (self.entry_point.module_name,\r\n self.entry_point.attrs[0])", "def emit_pid(self, name, pid):\n event = {}\n event['name'] = 'process_name'\n event['ph'] = 'M'\n event['pid'] = pid\n event['args'] = {'name': name}\n self._metadata.append(event)", "def associate(self, endpoint_name=None, instance_id=None):\n if instance_id is None:\n raise Exception(\"Instance required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/instances/%s' % instance_id, 'POST')\n else:\n self.request('/v1.1/endpoints/%s/instances/%s' % (endpoint_name, instance_id), 'POST')", "def add_spikemonitor(self, obj, filename=\"recording\"):\n filename += '.spikes'\n indices = obj.record\n if isinstance(indices, bool) and indices is True:\n indices = np.arange(self._nr_of_neurons)\n else:\n indices = np.asarray(indices).copy()\n indices += 1\n variables = obj.needed_variables\n self._simulation.add_eventoutputfile(\"eof\", filename)\n # adding eventselection for each recorded neuron\n for i in indices:\n self._simulation.add_eventselection(\"line{}\".format(i),\n \"{}[{}]\".format(self._model_namespace[\"populationname\"], i),\n event_port=\"spike\")", "def cli_set_process_title():\n raise NotImplementedError()", "def enable_instance_inspection(self):\n self._request({\"enable-instance-inspection\": True})", "def _register(self):\n curses.init_pair(self.i, self.font, self.background)", "def manually_launch_instances(self, which_instances, moreargs, waitpid=True, kill_instance=False):\n for instance_type in which_instances:\n for instance in self.all_instances:\n if instance.instance_type == instance_type:\n if kill_instance:\n instance.kill_instance()\n instance.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n waitpid,\n )", "def UseCommandInterface(self, option):\n if option:\n #change prompt\n if self.sim42interp.cmd.currentObj and hasattr(self.sim42interp.cmd.currentObj, 'GetPath'):\n sys.ps1 = self.sim42interp.cmd.currentObj.GetPath() + '> '\n else:\n sys.ps1 = 'Illegal current object> ' \n \n #Say good bye\n self.shell.run(\"print '*************** Changed to Sim42 Command Interface ***************' \", prompt=0, verbose=0)\n \n #Change\n self.shell.interp = self.sim42interp\n\n else:\n #change prompt\n sys.ps1 = '>>> '\n sys.ps2 = '... '\n\n #Change\n self.shell.interp = self.origInterp \n \n #Say hello\n self.shell.run(\"print '*************** Back to python ***************' \", prompt=0, verbose=0)\n \n\n self.shell.autoCompleteKeys = self.shell.interp.getAutoCompleteKeys()", "def attach(app, conf):\n global evolver_conf, serial_connection\n\n sio.attach(app)\n evolver_conf = conf\n\n # Set up the serial comms\n serial_connection = serial.Serial(port=evolver_conf['serial_port'], baudrate = evolver_conf['serial_baudrate'], timeout = evolver_conf['serial_timeout'])", "def startInstance(self, name=None,\n location=None,\n familyName=None,\n styleName=None,\n fileName=None,\n postScriptFontName=None,\n styleMapFamilyName=None,\n styleMapStyleName=None,\n\n ):\n if self.currentInstance is not None:\n # We still have the previous one open\n self.endInstance()\n instanceElement = ET.Element('instance')\n if name is not None:\n instanceElement.attrib['name'] = name\n if location is not None:\n locationElement = self._makeLocationElement(location)\n instanceElement.append(locationElement)\n if familyName is not None:\n instanceElement.attrib['familyname'] = familyName\n if styleName is not None:\n instanceElement.attrib['stylename'] = styleName\n if fileName is not None:\n instanceElement.attrib['filename'] = self._posixPathRelativeToDocument(fileName)\n if postScriptFontName is not None:\n instanceElement.attrib['postscriptfontname'] = postScriptFontName\n if styleMapFamilyName is not None:\n instanceElement.attrib['stylemapfamilyname'] = styleMapFamilyName\n if styleMapStyleName is not None:\n instanceElement.attrib['stylemapstylename'] = styleMapStyleName\n\n self.currentInstance = instanceElement", "def set_attenuator_output(self, state):\n try:\n self.attenuator.write('O {0}'.format(state))\n except visa.VisaIOError as err:\n raise IOError('Error: %s' % err)", "def AddInstanceArgument(parser):\n parser.add_argument(\n 'instance', completer=InstanceCompleter, help='Cloud SQL instance ID.'\n )", "def __init__(self, driver):\n\n # Set the command handler attributes\n self.name = driver.id \n self.driver = driver", "def screen_attach(self, sname):\n self._channel.sendall(\"screen -r %s\\n\" % (sname))\n rdata = '\\n'.join(self._channel.recv(65536).splitlines())\n return rdata", "def attachLeoIcon(self, window: Any) -> None:\n if self.appIcon:\n window.setWindowIcon(self.appIcon)", "def spinupoutputprocess():\n if __name__ == '__main__':\n _hwmgr = HardwareController(OUTPUT_SETTINGS)\n PROCESSES.append(_hwmgr)\n _hwmgr.start()", "def set_instance(self, env, instance, modify_existing):\n\n logger = env.get_logger()\n logger.log_debug('Entering %s.set_instance()' % self.__class__.__name__)\n # TODO create or modify the instance\n raise pywbem.CIMError(pywbem.CIM_ERR_NOT_SUPPORTED) # Remove to implement\n return instance", "def set_name(name):\n\n\tassert ltrace(TRACE_PROCESS, u'| set_name({0})', (ST_NAME, name))\n\n\ttry:\n\t\timport ctypes\n\t\tctypes.cdll.LoadLibrary('libc.so.6').prctl(15, name + '\\0', 0, 0, 0)\n\n\texcept Exception, e:\n\t\tlogging.warning(_(u'Cannot set process name (was %s).') % e)", "def __init__(self, parent=None):\n super(robotTwoTerminal, self).__init__(parent)\n self.process = QtCore.QProcess(self)\n self.urxvtTerminal = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(self.urxvtTerminal)\n # Works also with urxvt:\n self.process.start('urxvt', ['-embed', str(int(self.winId()))])\n self.setGeometry(90, 460, 1160, 125)", "def save_named_instance(self, name: str, instance):\r\n self.save_object(name, instance)", "def show_console(self, ips):\n geometry= ['63x19+0+0', '63x19+645+0', '63x17+645+420', '63x17+0+420']\n os.chdir(INFRA_DEPLOY)\n\n if len(ips)==1:\n ACCESS_VM = \"xterm -e 'ssh -i mykey.private root@\" + ips[0] + \"' &\"\n os.system(ACCESS_VM)\n return True\n\n else:\n i=0\n for ip in ips:\n ACCESS_VM = \"xterm -geometry \" + geometry[i] + \" -e 'ssh -i mykey.private root@\" + ip + \"' &\"\n os.system(ACCESS_VM)\n i+=1\n return True\n return False", "def ConsoleRun(self, command, sender):\n pass", "def additional_command(self):\n pass", "def start_notebook_instance(NotebookInstanceName=None):\n pass", "def manually_launch_instances_for_upgrade(self, which_instances, moreargs, waitpid=True, kill_instance=False):\n for instance_type in which_instances:\n for i in self.all_instances:\n if i.instance_type == instance_type:\n if kill_instance:\n i.kill_instance()\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n waitpid,\n )", "def _ipython_display_(self):\n with self._sc:\n self._box._ipython_display_()", "def add_target(self, pin_name):\n for i in range(self.num_pin_components(pin_name)):\n self.add_pin_component_target(pin_name, i)" ]
[ "0.555184", "0.53565377", "0.5350084", "0.52938074", "0.527807", "0.51141536", "0.503388", "0.5028491", "0.5026515", "0.50112593", "0.50111884", "0.49634692", "0.49329525", "0.4874104", "0.48671383", "0.4858034", "0.48359525", "0.48175284", "0.48108807", "0.47951758", "0.47673643", "0.47611496", "0.47520298", "0.47419986", "0.47409862", "0.47368675", "0.47354543", "0.47259837", "0.47236413", "0.4722342", "0.47190964", "0.4707005", "0.46793866", "0.46793497", "0.46543053", "0.4649682", "0.46475628", "0.464193", "0.4620003", "0.4618824", "0.4615704", "0.46139443", "0.45818183", "0.4579849", "0.4579386", "0.45777693", "0.45700395", "0.4560178", "0.4551176", "0.45510682", "0.45446926", "0.45427564", "0.4539674", "0.45282942", "0.4525315", "0.45098874", "0.45016894", "0.44964015", "0.44949326", "0.44916126", "0.44881645", "0.44868588", "0.44861352", "0.44808778", "0.44797885", "0.44789186", "0.44770265", "0.44761533", "0.44730136", "0.4471995", "0.44648492", "0.44623092", "0.446145", "0.44599286", "0.44583133", "0.44557604", "0.44514212", "0.4446125", "0.44454223", "0.44390002", "0.44378364", "0.44363686", "0.4434853", "0.4430451", "0.44285515", "0.4428176", "0.44229063", "0.44211882", "0.44167852", "0.44165385", "0.44052675", "0.4402553", "0.44002062", "0.43958396", "0.4394483", "0.43932796", "0.43927532", "0.43790257", "0.43788838", "0.4378859" ]
0.5010261
11
Target method used by monitor thread, which polls vbmc status every 3s. If vbmc stops, ipmiconsole will stop.
def monitor(instance="default"): global logger_ic while True: try: with open("{}/{}/.{}-bmc.pid".format( config.infrasim_home, instance, instance), "r") as f: pid = f.readline().strip() if not os.path.exists("/proc/{}".format(pid)): logger_ic.warning("Node {} vBMC {} is not running, " "ipmi-console is ready to quit". format(instance, pid)) break time.sleep(3) except IOError: logger_ic.warning("Node {} workspace is possibly destroyed, " "ipmi-console is ready to quit".format(instance)) break stop(instance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitor(self, rms):\n pass", "def monitor(self):\n while not self.terminated:\n try:\n if (time.time() - self.updated_time) < 5:\n messages = self.messages.copy()\n # procs = np.min([ len(messages), 9 ]) + 1\n # pool = ThreadPool(procs)\n # pool.map(self.process, messages)\n # pool.close()\n # pool.join()\n for message in messages:\n self.process(message)\n elif self.ws:\n self.updated_time += 10\n self.ws.close()\n except Exception as e:\n self.on_error(None, \"Monitoring Error: {}\".format(e))\n continue\n finally:\n time.sleep(0.1)", "async def monitor():\n\n for n in range(6):\n await asyncio.sleep(2)\n print(\"monitor status:\", n, await ps.status())", "def monitor(self):\n if self.startup():\n time.sleep(0.250)\n self.run()", "def monitor(self):", "def watch(self, event):\n\t\tprint ('countdown',self.sec_left )\n\t\tif self.sec_left>-1:\n\t\t\tself.sec_left -= 1\n\t\t\tprint(self.sec_left)\n\t\t\tif self.sec_left in [2,1]:\n\t\t\t\t\n\t\t\t\tprint('ticker', self.sec_left)\n\t\t\t\tself.showMsg(str(self.sec_left-1), 1000, clean =False)\n\t\t\telif self.sec_left in [-1]:\n\t\t\t\tself.Clean()", "def _StatusUpdateThreadMain(self):\n while self._status_update_active:\n self._UpdateStatus()\n time.sleep(self._status_update_interval)", "def periodic_timer(self):\n while self.running:\n self.sendStatusQuery()\n time.sleep(REPORT_INTERVAL)", "def updateStatus(self):\n done = False\n if not self.pg.is_alive():\n done = True\n while not self.pg.msgQueue.empty():\n msg = str(self.pg.msgQueue.get(False))\n self.monitorTextBox.append(msg)\n if done:\n self.timer.stop()\n self.pg.join()\n self.runButton.setEnabled(True)\n self.stopButton.setEnabled(False)\n if self.pg.ex:\n etype, evalue, etrace = self.pg.ex\n el = traceback.format_exception(etype, evalue, etrace)\n for line in el:\n self.monitorTextBox.append(line)\n self.setStatusBar.emit(\n \"Surrogate Failed Elapsed Time: {0}\".format(\n hhmmss(math.floor(time.time() - self.timeRunning))\n )\n )\n else:\n self.setStatusBar.emit(\n \"Surrogate Finished, Elapsed Time: {0}\".format(\n hhmmss(math.floor(time.time() - self.timeRunning))\n )\n )\n if self.pg.driverFile != \"\":\n try:\n df = os.path.abspath(self.pg.driverFile)\n except:\n pass\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Driver File Location\")\n msgBox.setText(\n \"The surrogate model driver file path is: {0}\".format(\n os.path.abspath(df)\n )\n )\n msgBox.exec_()\n else:\n self.refreshContents()\n self.setStatusBar.emit(\n \"Surrogate Model Generation, Elapsed Time: {0}s\".format(\n math.floor(time.time() - self.timeRunning)\n )\n )", "def run(self):\n while self.running:\n self.__update_battery()\n self.__update_signal()\n time.sleep(5)", "def periodicCall(self):\n self.gui.processIncoming()\n if not self.running:\n import sys\n sys.exit(1)\n self.master.after(UPDATE_DELAY, self.periodicCall)", "def periodicCall(self):\n self.gui.processIncoming(self.cdLen, self.goHold, self.songLength)\n if not self.running:\n # This is the brutal stop of the system.\n # should do some cleanup before actually shutting it down.\n import sys\n sys.exit(1)\n self.master.after(200, self.periodicCall)", "def on_timer(self):\n self.read_serial_data()\n # self.update_monitor()", "def run( self ):\n while True:\n try:\n time.sleep( 5 )\n self._monitorProcess()\n except Exception, e:\n self.logger.exception( \"Error starting monitor process\" )", "def backgroundStart(): #Background checks thread\n global currentStatus\n while True:\n currentStatus = checkClassChanges()\n time.sleep(10)", "def monitor(self):\n import curses\n import inspect\n\n stdscr = curses.initscr()\n curses.curs_set(0)\n curses.noecho()\n curses.cbreak()\n width_split = curses.COLS//3-1\n win_done = curses.newwin(curses.LINES-1, width_split, 0, 0)\n win_running = curses.newwin(curses.LINES-1, width_split,\n 0, width_split+1)\n win_pending = curses.newwin(curses.LINES-1, width_split,\n 0, 2*width_split+1)\n stdscr.addstr(curses.LINES-1, 0,\n 'Monitoring started. Press Ctrl+C to stop.')\n stdscr.refresh()\n win_done.addstr(0, 0, 'DONE')\n win_pending.addstr(0, 0, 'PENDING')\n while True:\n try:\n win_done.addstr(1, 0,\n f'{len(self.done)} jobs done')\n list_done = list(self.done)[:curses.LINES-3]\n for idx, fut in enumerate(list_done, start=2):\n fmt_str = f'{id(fut):x} {fut._state}'\n win_done.addstr(idx, 0, fmt_str)\n win_done.refresh()\n\n win_running.clear()\n win_running.addstr(0, 0, 'RUNNING')\n win_running.addstr(1, 0,\n f'{self.running.qsize()} jobs running')\n list_running = list(self.running.items())[:curses.LINES-3]\n for idx, (fut, coro) in enumerate(list_running, start=2):\n coro_state = inspect.getcoroutinestate(coro)\n fmt_str = f'{id(fut):x} {coro_state}'\n win_running.addstr(idx, 0, fmt_str)\n win_running.refresh()\n\n win_pending.clrtoeol()\n win_pending.addstr(1, 0,\n f'{self.pending.qsize()} jobs pending')\n win_pending.refresh()\n time.sleep(.1)\n except KeyboardInterrupt:\n break\n\n curses.nocbreak()\n curses.echo()\n curses.endwin()", "def run(self):\n self.cncLock.acquire()\n self.running = True\n\n # Initialize\n try:\n self.cnc = serial.Serial(self.deviceFile,BAUD_RATE)\n\n self.updaterThread = threading.Thread(target=self.periodic_timer)\n self.updaterThread.start()\n\n # Wake up grbl\n log.info(\"Initializing Grbl...\")\n cmd = \"\\r\\n\\r\\n\"\n self.cnc.write(cmd.encode())\n\n # Wait for grbl to initialize and flush startup text in serial input\n time.sleep(2)\n self.cnc.flushInput()\n self.cncLock.release()\n\n while self.running :\n cmd = self.commandQueue.get().strip() + EOLStr\n if self.running == False:\n break\n self.cncLock.acquire()\n self.cnc.write(cmd.encode())\n\n out = str(self.cnc.readline().strip()) # Wait for grbl response\n if out.find('ok') >= 0 :\n log.debug(f'MSG: {out}') # Debug response\n elif out.find('error') >= 0 :\n log.error(f'ERROR: {out}')\n else:\n log.info(out)\n self.cncLock.release()\n except:\n raise\n finally:\n log.debug(\"CNC main loop left\")\n self.cnc.close()", "def status_watcher(cs, line):\n #print('status watcher watching')\n\n # from the mullvad code, should watch for\n # things like:\n # \"Initialization Sequence Completed\"\n # \"With Errors\"\n # \"Tap-Win32\"\n\n if \"Completed\" in line:\n cs.change_to(cs.CONNECTED)\n return\n\n if \"Initial packet from\" in line:\n cs.change_to(cs.CONNECTING)\n return", "def run(self):\n self.monitor.start()", "def block(self):\n while self.running:\n time.sleep( 1 )", "def pause_while_moving(self,c, ADDR):\r\n\r\n while True:\r\n status = yield self.status(c,ADDR)\r\n if status.startswith(\"STATUS : STOP\"):\r\n break\r\n returnValue('Success!')", "def run():\n logger.verbose(\"bwmon: Thread started\")\n while True:\n lock.wait()\n logger.verbose(\"bwmon: Event received. Running.\")\n database.db_lock.acquire()\n nmdbcopy = copy.deepcopy(database.db)\n database.db_lock.release()\n try:\n if getDefaults(nmdbcopy) and len(bwlimit.tc(\"class show dev %s\" % dev_default)) > 0:\n # class show to check if net:InitNodeLimit:bwlimit.init has run.\n sync(nmdbcopy)\n else: logger.log(\"bwmon: BW limits DISABLED.\")\n except: logger.log_exc(\"bwmon failed\")\n lock.clear()", "def control_c(self) -> None:\n time.sleep(0.1) # sometimes it's better to wait a bit\n send_control_c(self.proc, True)", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def poll(self):\n\tself.met = self.button.poll()", "def check_main_stop(notifier):\n pass", "def periodicCall(self):\r\n if self.queue.qsize() != 0:\r\n self.action = self.queue.get()\r\n print(self.action)\r\n \r\n if not self.running:\r\n # This is the brutal stop of the system. You may want to do\r\n # some cleanup before actually shutting it down.\r\n import sys\r\n sys.exit(1)\r\n self.master.after(100, self.periodicCall)", "def wm_update(self):\n readback = self.get_pvobj(\"readback\")\n show_pos = self._update_cb(0)\n show_pos()\n with CallbackContext(readback, show_pos):\n try:\n while True:\n time.sleep(0.1)\n except KeyboardInterrupt:\n pass", "def _watch(self):\n # self._popen.wait()\n lines_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in lines_iterator:\n line = line.strip()\n # log.log(\"raw\",self.name.upper()+\" SAYS: \"+line)\n # cmd = line.split(' ')[0]\n # args = line.split(' ')[1:]\n if line[0] == '#':\n self.onEvent(line.split(' '))\n if self.onClose:\n self.onEvent([self.onClose])\n self._running.clear()\n if self.stderr is not None:\n self.stderr.close()", "def start_monitoring(self):\n pass", "def update_ticker(self):\n while True:\n Thread(target=self.update_data_check).start()\n time.sleep(60)", "def start(self):\n self.monitor_lc.start(self.interval)", "async def status(self, ctx):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n up_time = time.time() - self.start_time\n m, s = divmod(up_time, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n stime = time.time() - psutil.boot_time()\n m, s = divmod(stime, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n system_uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n mem = psutil.virtual_memory()\n\n pid = os.getpid()\n memory_use = psutil.Process(pid).memory_info()[0]\n\n content = discord.Embed(title=f\"Miso Bot | version {main.version}\")\n content.set_thumbnail(url=self.client.user.avatar_url)\n\n content.add_field(name=\"Bot process uptime\", value=uptime_string)\n content.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent()}%\")\n content.add_field(name=\"System uptime\", value=system_uptime_string)\n\n content.add_field(name=\"System RAM Usage\", value=f\"{mem.percent}%\")\n content.add_field(name=\"Bot memory usage\", value=f\"{memory_use/math.pow(1024, 2):.2f}MB\")\n\n await ctx.send(embed=content)", "def __call__(self):\n status = self.os.popen('circusctl status monitor').read().strip()\n\n if status == 'active':\n return True\n elif status == 'stopped':\n return False", "def _monitor_loop(self):\n while self._continue_running():\n for wl in self._workloads:\n if not wl.running():\n self.log.info('%-20s FAILED', wl.name())\n self._restart_workload(wl)\n else:\n self.log.info('%-20s OK', wl.name())\n\n time.sleep(self._monitor_delay)", "def run(self):\n run=0\n wx.CallAfter(Publisher().sendMessage, \"update\", \"\")\n time.sleep(10)\n while (run==0):\n wx.CallAfter(Publisher().sendMessage, \"updatebuttons\", \"\")\n time.sleep(10)", "async def monitor(self, ctx, channel):\n author = ctx.message.author\n author_channel = ctx.message.channel\n\n def check(m):\n try:\n return channels[int(m.content)]\n except:\n return False\n\n channels = self.bot.get_all_channels()\n channels = [c for c in channels\n if c.name.lower() == channel or c.id == channel]\n channels = [c for c in channels if c.type == discord.ChannelType.text]\n\n\n if not channels:\n await self.bot.say(\"No channels found. Remember to type just \"\n \"the channel name, no `#`.\")\n return\n\n if len(channels) > 1:\n msg = \"Multiple results found.\\nChoose a server:\\n\"\n for i, channel in enumerate(channels):\n msg += \"{} - {} ({})\\n\".format(i, channel.server, channel.id)\n for page in pagify(msg):\n await self.bot.say(page)\n choice = await self.bot.wait_for_message(author=author,\n timeout=30,\n check=check,\n channel=author_channel)\n if choice is None:\n await self.bot.say(\"You haven't chosen anything.\")\n return\n channel = channels[int(choice.content)]\n else:\n channel = channels[0]\n\n rift = OpenRift(source=author_channel, destination=channel)\n msgfilter = ['$', 'pp', 'paypal', 'moneypak', 'giftcard', 'gift card', 'PM me', 'DM', 'cash']\n\n self.open_rifts[author] = rift\n await self.bot.say(\"Monitor started\")\n msg = \"\"\n while msg == \"\" or msg is not None:\n msg = await self.bot.wait_for_message(author=author,\n channel=author_channel)\n if msg is not None and msg.content.lower() != \"exit\":\n try:\n blankvar = \"blankvar\"\n except:\n await self.bot.say(\"Script error #1\")\n elif msg.content.lower() in msgfilter:\n try:\n await self.bot.say(\"Your message may contain words referring to RMT. Your message has been logged and will be reviewed by Discord staff.\")\n except:\n await self.bot.say(\"Script error #2\")\n else:\n break\n del self.open_rifts[author]\n await self.bot.say(\"Stopping monitor.\")", "def timer_thread_function():\n while True:\n for i, timer in enumerate(superglobals.timer_list):\n if timer.seconds - time.perf_counter() <= 0 and timer.bits & 0b01:\n superglobals.timer_list[i].bits &= 0b10\n for _ in range(10):\n curses.beep()\n time.sleep(0.05)\n for i, countdown in enumerate(superglobals.countdown_list):\n if countdown.seconds - time.perf_counter() <= 0 and \\\n countdown.bits & 0b01:\n superglobals.countdown_list[i].bits &= 0b00\n for _ in range(10):\n curses.beep()\n time.sleep(0.05)", "def is_busy(self):\n cons = self.rpc.call(MsfRpcMethod.ConsoleList)['consoles']\n for c in cons:\n if c['id'] == self.cid:\n return c['busy']", "def run(self):\n self._start_servers()\n monitor = KodiMonitor(self.nx_common, self.nx_common.log)\n while not monitor.abortRequested():\n monitor.update_playback_progress()\n try:\n if self.library_update_scheduled() and self._is_idle():\n self.update_library()\n except RuntimeError as exc:\n self.nx_common.log(\n 'RuntimeError: {}'.format(exc), xbmc.LOGERROR)\n if monitor.waitForAbort(5):\n break\n self._shutdown()", "def _loop(self):\n while True:\n if GameLoop.getInstance()._cancelation_token==True:\n break\n self._update_signal.notify_all()\n sleep(1/60)", "async def monitor():\n global counter\n while True:\n time.sleep(1)\n print(counter, 'reqs/sec')\n counter = 0", "def run(self):\n while True:\n self.sm.run()\n time.sleep(0.05)", "def stop_monitoring(self):\n pass", "async def _monitor_recv(self):\n\n while True:\n await RisingEdge(self.clock)\n await ReadOnly()\n if self.bus.valid.value:\n self._recv(int(self.bus.data.value))", "def query_member_status():\n notify_member_status()\n logger.info('signal sent for status report')", "def __loop(self):\n\n self.__update_table()\n self.__update_labels()\n if self.remote_stop:\n self.__stop(\"remote telegram admin\")\n else:\n self.__main_window.after(1000, self.__loop)", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def status(self) -> NoReturn:\n\n curr_status= self.percent_done()\n while(curr_status < 100):\n\n update_status(name=self.name, status=curr_status)\n time.sleep(0.5)\n\n curr_status = self.percent_done()\n\n update_status(name=self.name, status=curr_status)", "def stopped_check(self, timeout=None):", "def timeout_loop(self):\r\n\r\n while not done:\r\n\r\n if (softwareInterfaceTimeout != -1) and (interfaceNotUsed > softwareInterfaceTimeout):\r\n\r\n stop(False)\r\n\r\n \r\n\r\n time.sleep(MONITOR_INTERVAL/1000000.0)\r\n\r\n self.interfaceNotUsed = self.interfaceNotUsed + 1", "def main(self):\n self.logger.info('Main monitor started at {} ({} mode with data logging {} and sms msgs {})'.format(\n print_time(), ('reduced' if self.reduced_mode else 'normal'), ('on' if self.log_data else 'off'),\n ('on' if self.sms_client is not None else 'off')))\n self.logger.info('Refreshing twitter every {} seconds'.format(self.refresh_rate))\n\n while True:\n try:\n self._main()\n except Exception as e:\n self.logger.error(error_msg(e))\n traceback.print_exc()\n self.logger.info('Attempting to restart after 60 seconds'.format(print_time()))\n time.sleep(60)\n self.logger.info('Restarting main monitor')", "def status(self, *args):\n for k, v in self.processers.items():\n if v:\n if v.poll() is None:\n status = 'running'\n else:\n status = 'dead'\n else:\n status = 'stoped'\n print '%s - %s' % (k, status)", "def pauseLoop(self):\n print('SYNC stim detected LOW')\n self.loopRunning = False", "def poll_thread():\n while not stop_flag.wait(0.100): # poll every 100ms\n check_jobs()", "def StatusbarTimer(self):\r\n\t\ttime.sleep(self.statusmsgTimeout)\r\n\t\tself.statusbar.SetStatusText(self.statusmsg)", "def wake_up(self):\n pass", "async def status_update_loop(self):\n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n \n while self.state == CHANNEL_MOVE_STATE_NONE:\n set_value = await self.status_message_update_waiter\n # sleep sets by `None`\n if set_value is not None:\n break\n \n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n await self.update_status_message()\n continue\n \n await self.update_status_message()\n await self.send_done_notification()\n return", "def status_callback(self, data):\n\n print \"arm status callback\", data.data\n if data.data == \"busy\" or data.data == \"error\":\n self.status = 0\n elif data.data == \"free\":\n self.status = 1", "def run(self):\r\n while True:\r\n if self.camera_device.is_detecting():\r\n self.alarm_device.switch_alarm()", "def stop_polling(self):\r\n self.__lib.CC_StopPolling(self.__serno)", "def watch(self):", "def stop(self):\n super(Icmpecho, self).stop()\n self.monitor_thread.join()\n logging.info(\"ICMPecho health monitor plugin: Stopped\")", "def _start_loop_poll(self):\n self._stop_loop_poll() # Stop any existing timer\n self._generator_poll = self._read_qbpm_loop() # Start the loop\n self._timerId_poll = self.startTimer(0) # This is the idle timer\n self.rbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPause))", "def testPeriodic(self):\n wpilib.LiveWindow.run()", "def run(self):\n\n if self.success and not self.running:\n self.log.info(\"Humidity and temp control started...\")\n self.running = True\n elif self.testmode and not self.running:\n self.log.critical(\"Humidity and temp TEST MODE started!!!\")\n self.running = True\n elif not self.running:\n self.log.info(\"Humidity and temp control NOT started...\")\n return\n\n if not self.stop_measurement_loop and self.success:\n try:\n # Query the environemnt etc from Brandbox\n envvalues = self.vcw.query(\n self.resource, self.resource[\"get_environment\"]\n )\n envvalues = envvalues.split(\",\")\n\n # Get dewpoint\n boxvalues = self.vcw.query(\n self.resource, self.resource[\"get_box_environment\"]\n )\n boxvalues = boxvalues.split(\",\")\n\n vacuumvalues = self.vcw.query(\n self.resource, self.resource[\"get_vacuum\"]\n )\n\n # get light\n luxvalues = self.vcw.query(self.resource, self.resource[\"get_lux\"])\n luxvalues = luxvalues.split(\",\")[0]\n\n # if an error happen, as so often with the brandbox read the queue\n if self.readqueue:\n try:\n ans = self.vcw.read(self.resource)\n if ans:\n self.log.critical(\"Brandbox had an non empty queue: {}\".format(ans))\n except:\n self.log.info(\"Brandbox indicated an non empty queue, reading queue yielded no queue...\", exc_info=True)\n\n\n try:\n if float(luxvalues) >= 0.5:\n self.framework[\"Configs\"][\"config\"][\"settings\"][\"lights\"] = True\n else:\n self.framework[\"Configs\"][\"config\"][\"settings\"][\"lights\"] = False\n\n # get door\n # doorvalues = self.vcw.query(self.resource, self.resource[\"get_door\"])\n # doorvalues = doorvalues.split(\",\")[0]\n # if doorvalues == \"1\":\n # self.framework[\"Configs\"][\"config\"][\"settings\"][\"door\"] = False\n # else:\n # self.framework[\"Configs\"][\"config\"][\"settings\"][\"door\"] = True\n\n # get light\n vacuumvalues = vacuumvalues.split(\",\")[0]\n if vacuumvalues == \"1\":\n self.framework[\"Configs\"][\"config\"][\"settings\"][\"vacuum\"] = True\n else:\n self.framework[\"Configs\"][\"config\"][\"settings\"][\"vacuum\"] = False\n\n # Here a list\n self.main.humidity_history = np.append(\n self.main.humidity_history, float(envvalues[1])\n ) # todo: memory leak since no values will be deleted\n self.main.temperatur_history = np.append(\n self.main.humidity_history, float(envvalues[3])\n )\n\n # Write the pt100 and light status and environement in the box to the global variables\n self.framework[\"Configs\"][\"config\"][\"settings\"][\n \"chuck_temperature\"\n ] = float(envvalues[3])\n self.framework[\"Configs\"][\"config\"][\"settings\"][\n \"air_temperature\"\n ] = float(envvalues[0])\n self.framework[\"Configs\"][\"config\"][\"settings\"][\"dew_point\"] = float(\n boxvalues[3]\n )\n\n # Send data to main\n self.queue_to_main.put(\n {\n \"temperature_air\": [float(time()), float(envvalues[0])],\n \"temperature_chuck\": [float(time()), float(envvalues[3])],\n \"dew_point\": [float(time()), float(boxvalues[3])],\n \"humidity\": [float(time()), float(envvalues[1])],\n }\n )\n except:\n self.readqueue = True\n\n except Exception as err:\n self.log.error(\n \"The temperature and humidity controller seems not to be responding. Error: {!s}\".format(\n err\n ),\n exc_info=True,\n )\n\n elif self.testmode:\n self.log.critical(\"Testmode sends message to main!\")\n self.queue_to_main.put(\n {\n \"temperature\": [float(time()), float(random.randint(1, 10))],\n \"humidity\": [float(time()), float(random.randint(1, 10))],\n }\n )\n\n if not self.main.stop_measurement_loop:\n self.start_timer(self.run)\n else:\n self.log.info(\n \"Shutting down environment control due to stop of measurement loop\"\n )", "def monitor_current_oncall(self, connection):\n time.sleep(5)\n #current_oncall = self.get_oncall_from_file()\n current_oncall = self.get_oncall_name_from_statusmk('sysadmin')\n while 1:\n #new_oncall = self.get_oncall_from_file()\n new_oncall = self.get_oncall_name_from_statusmk('sysadmin')\n if new_oncall != current_oncall and not new_oncall.startswith('ERROR:'):\n for channel in self.oncall_channels:\n self.send_oncall_update(connection, channel['name'], new_oncall)\n if self.update_oncall:\n self.set_new_oncall(connection, new_oncall)\n current_oncall = new_oncall\n time.sleep(30)", "def run(self):\n while True:\n # Status message from state machine\n self.updateStatusMessage.emit(self.sm.status_message)\n # Serial errors from rexarm\n self.updateJointErrors.emit(self.rexarm.get_errors())\n # Only get rexarm feedback if initialized\n if self.rexarm.initialized:\n self.updateJointReadout.emit(self.rexarm.position_fb)\n self.updateEndEffectorReadout.emit(self.rexarm.get_wrist_pose())\n time.sleep(0.1)", "def _monitor(cls):\n while not cls._stop_thread.is_set():\n cls.shrink_cache()\n time.sleep(random.random() * 10)", "def monitor_and_terminate(self):\n import time\n import datetime\n\n keep_running = True\n\n while keep_running:\n\n print()\n print(datetime.datetime.now().replace(microsecond=0))\n print(self.get_monitor_string())\n\n time.sleep(30)\n\n _, status = self.reporter.get_job_status(self.info)\n if status[\"active\"]+status[\"running\"] == 0:\n keep_running = False\n\n print(\"All tasks done.\")", "def run(self):\n last_time = time.time()\n while self.running:\n now_time = time.time()\n interval = now_time - last_time\n last_time = now_time\n self.update(interval)\n time.sleep(Options['update interval'])", "def __main_loop(self):\n\n while not self.stop:\n self.__lock_data = True\n self.__bus_messages_copy = deepcopy(self.__bus_messages)\n self.__line_messages_copy = deepcopy(self.__line_messages)\n self.__global_messages_copy = deepcopy(self.__global_messages)\n self.__bus_messages = {}\n self.__line_messages = {}\n self.__global_messages = {\"kick reason\": \"\", \"free text\": \"\"}\n buses_to_kick_copy = deepcopy(self.__buses_to_kick)\n self.__buses_to_kick = list()\n self.__lock_data = False\n\n for bus in buses_to_kick_copy: # handles the buses that need to be kicked\n message = \"kicked for reason:\" + self.__global_messages_copy[\"kick reason\"]\n if bus.line_num in self.__line_messages_copy.keys():\n message += self.__line_messages_copy[bus.line_num][\"kick reason\"]\n if bus.line_num in self.__line_messages_copy.keys() and bus.id in self.__bus_messages_copy[\n bus.line_num].keys():\n message += self.__bus_messages_copy[bus.line_num][bus.id][\"kick reason\"]\n print(f\"sending message{message.strip()}\")\n bus.send_to_bus(message.strip())\n\n global_message = self.__build_global_update()\n for line, buses in self.__bus_controller.bus_dict.items():\n line_message = self.__build_line_update(line)\n for bus in buses:\n bus_message = self.__build_bus_update(bus)\n message = global_message + line_message + bus_message\n message = message.strip(\"\\n\")\n if message != \"\":\n bus.send_to_bus(message)\n\n sleep(MessagesSender.SLEEP_TIME)\n\n self.__shut_down()\n print(\"polling thread stopped\")", "def update_status(self):\n if self.pwm:\n if self.state == GPIO.HIGH:\n thread = threading.Thread(target=self._pwm_on, args=())\n thread.start()\n elif self.state == GPIO.LOW:\n thread = threading.Thread(target=self._pwm_off, args=())\n thread.start()\n else:\n GPIO.output(self.id_, self.state)\n\n return self.get_status()", "def loop_exit_on_q(self, stats_period):\n start_time = time.time() # This is the only way I managed to make a curse application with\n while time.time() - start_time <= stats_period: # screen refreshing exit on key pressed: make window.getch()\n key = self.myscreen.getch() # non blocking with curses.nodelay(1) (otherwise main loop is interrupted)\n if key == ord('q'): # and check it every [10-50]ms to be responsive.\n curses.endwin()\n hacked_print(\"Monitoring ended by user\") # cf hacked_print method\n return 1\n curses.napms(self.GETCH_REFRESH_MS)", "def _stop_loop_poll(self): # Connect to Stop-button clicked()\n if self._timerId_poll is not None:\n self.killTimer(self._timerId_poll)\n self._generator_poll = None\n self._timerId_poll = None\n self.rbtn.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay))\n self.heartbeat = time.time()", "async def status(self, update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:\n msg = \"running ✅\" if self._running else \"stopped 🚫\"\n await update.message.reply_text(text=f\"The alarm is {msg}\")", "def monitor(self, s):\n raise NotImplementedError()", "def run_command(self):\r\n self.update_settings()\r\n self.run = True\r\n self.pause = False\r\n if self.run_call is not None:\r\n self.wm.after(1, self.run_call)", "def refresh_status() -> None:\n ...", "def stop():", "def stop():", "def stop():", "def stop():", "def busy(self):\n pass", "def keepalive(self) -> None:", "def lmcScan(self, record=True):\n # LMC Send and configure DAQ\n self.lmc.cmd_send.put(1)\n daq.configure(0, record=record)\n time.sleep(0.1)\n while(self.lmc.disable.get()==1):\n time.sleep(0.05)\n \n # LMC Wait and start DAQ\n self.lmc.cmd_wait.put(1)\n print('Start run')\n daq.begin()\n #print('Open pulse picker')\n #pp.open()\n time.sleep(0.2)\n\n # Send run number to lmc\n print('Send run number to LMC')\n run = daq._control.runnumber()\n print(f'Run {run}')\n self.lmc.cmd_runnum.put(run)\n time.sleep(0.1)\n\n # Start scan\n print('Send trigger to LMC')\n self.lmc.cmd_trig.put(1)\n \n time.sleep(3) # just to make sure we start monitoring the PV when scan_running=1\n redo = 0\n\n # Wait for scan to end\n #print(self.lmc.running.get())\n while(self.lmc.running.get()==1):\n time.sleep(0.2)\n time.sleep(0.1)\n daq.end_run()\n #pp.close()\n redo_daq = 0\n \n run_param = requests.get(self.ws_url).json()['value']['params']\n while not 'DAQ Detector Totals/Events' in run_param.keys():\n time.sleep(0.1)\n run_param = requests.get(self.ws_url).json()['value']['params']\n nEvents = run_param['DAQ Detector Totals/Events']\n print('We got ' + str(nEvents) + ' events')\n if nEvents<1000:\n redo_daq = 3\n #redo_daq=0\n\n redo += redo_daq\n print('Run ended, close pulse picker.\\n')\n\n print('Tell the LMC to save trajectory')\n self.lmc.cmd_save.put(1)\n \n #if run > 0:\n # print('Copy LMC files.')\n # self.get_lmc_files(run)\n return redo", "def Listen(self):\n while True:\n time.sleep(1)", "def notify_stop(self):\n self._notify_stop()", "def onCheckTimeOut(self):\r\n\r\n self.pros += 1\r\n self.pb_load.setValue(self.pros * 5)\r\n \r\n # timeout error\r\n if(self.pros == 20):\r\n self.check_timer.stop()\r\n self.onCheckConnectionError()\r\n # connected to server\r\n if(self.pros > 5 and self.check_result == 0): \r\n self.check_timer.stop()\r\n self.checkSession()", "def blink(self):\n\n # Dict hack since nonlocal doesn't exist in py2.7\n blinks = {'': 3}\n period = 150\n\n def _red_callback():\n self.window.statusStringDisplay.setStyleSheet('color: red')\n QtCore.QTimer.singleShot(period, _white_callback)\n\n def _white_callback():\n self.window.statusStringDisplay.setStyleSheet('color: white')\n blinks[''] -= 1\n if blinks[''] > 0:\n QtCore.QTimer.singleShot(period, _red_callback)\n\n _red_callback()", "def run( self ):\r\n \r\n # Execute the per-cycle work specifed by the user\r\n for f in self.updateFuncList:\r\n f() # Please make these lightweight and pertain to UI drawing!\r\n \r\n # Update window\r\n self.rootWin.update_idletasks() # idk , draw or something!\r\n \r\n # Wait remainder of period\r\n elapsed = time.time() * 1000 - self.last\r\n if elapsed < self.stepTime:\r\n sleepTime = int( self.stepTime - elapsed ) \r\n else:\r\n sleepTime = 0\r\n # 4.e. Mark beginning of next loop\r\n self.last = time.time() * 1000 \r\n self.rootWin.after( sleepTime , self.run )", "def your_process(seconds):\r\n global STATUS\r\n sleep(seconds)\r\n STATUS = True", "def watch(self, func, seconds=3600):\n func\n time.sleep(seconds)", "def main_loop(self):\n center_point = self.mot.center_point\n\n screen_width = center_point[0] * 2\n screen_height = center_point[1] * 2\n\n time.sleep(1)\n pretty_progress_bar(\n 3,\n )\n\n # while int(time.time()) - start <= 10:\n while not self.mot.abort:\n object1_position = self.mot.position(self.color1)[0]\n object2_velocity = self.mot.speed(self.color2)\n # print(object2_velocity)\n\n self.compare(object1_position[0] < 0.25 * screen_width, 'left')\n self.compare(object1_position[0] > 0.75 * screen_width, 'right')\n self.compare(object1_position[1] < 0.25 * screen_height, 'jump')\n self.burst_compare(object2_velocity > 150, 'fire')\n\n # print('KEYBOARD ABORT')", "def wait_for_instance_status(config, status):\n client = config.create_api_client()\n InstanceId = config.get('InstanceId')\n while True:\n time.sleep(20)\n req = DescribeInstancesRequest.DescribeInstancesRequest()\n result = do_action(client, req)\n items = result[\"Instances\"][\"Instance\"]\n lookups = {item['InstanceId']: item for item in items}\n if lookups[InstanceId]['Status'] == status:\n return\n else:\n click.echo(\"Instance's current status: {}; transfer to status {} ...\".format(\n lookups[InstanceId]['Status'], status\n ))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))\n time.sleep(2)", "def monitor(self, target):\n while self.RUNNING:\n check_time = datetime.now()\n next_check = check_time + timedelta(seconds=target[\"frequency\"])\n\n try:\n self.produce(\n get(target[\"url\"], timeout=target[\"frequency\"] - 0.5),\n target.get(\"regex\"),\n check_time,\n )\n except Timeout:\n self.logger.warning(\"Check for %s timed out\", target[\"url\"])\n except RequestException as e:\n self.logger.error(e)\n except re.error as e:\n self.logger.error(e)\n break\n\n # Busy loop until next check_time\n while datetime.now() < next_check:\n sleep(1)", "async def _check_status(\n self, update: Update, context: ContextTypes.DEFAULT_TYPE\n ) -> None:\n self.system_status_lock.acquire()\n info = self.system_status_proxy._getvalue()\n self.system_status_lock.release()\n await update.message.reply_markdown(\"*System Status*\")\n for key in info:\n await update.message.reply_text(f\"{key}: {info[key]}\")", "def wait_for_status(self, status):\n code = self.instance.state['Code']\n while code != status:\n time.sleep(3)\n self.instance.reload()\n code = self.instance.state['Code']" ]
[ "0.6409184", "0.627425", "0.62064976", "0.61520576", "0.61488384", "0.608878", "0.6028219", "0.60113215", "0.58789355", "0.586533", "0.5813013", "0.5793242", "0.57749766", "0.5774655", "0.5764802", "0.57504106", "0.57363623", "0.5703783", "0.57020646", "0.56902844", "0.5681848", "0.56774795", "0.56361425", "0.56279284", "0.56196636", "0.5615766", "0.5613882", "0.56128395", "0.55970484", "0.5590245", "0.55793816", "0.55604243", "0.5554408", "0.55537635", "0.5527861", "0.55232024", "0.55161005", "0.5508241", "0.55029154", "0.54935664", "0.5483543", "0.54694796", "0.54637253", "0.5462207", "0.546049", "0.5459711", "0.5458668", "0.5451601", "0.5451601", "0.5449231", "0.5446287", "0.54392785", "0.5424381", "0.54172915", "0.54129076", "0.54098123", "0.5407357", "0.54059845", "0.5398523", "0.539815", "0.53955066", "0.5395281", "0.5380233", "0.5366593", "0.5363177", "0.5358817", "0.5354157", "0.53461415", "0.534469", "0.53392726", "0.5329803", "0.53284866", "0.53265923", "0.5324979", "0.531033", "0.5296898", "0.5294879", "0.52831763", "0.5280321", "0.5265707", "0.5256756", "0.5256756", "0.5256756", "0.5256756", "0.5253796", "0.5236741", "0.52358085", "0.5231999", "0.5231351", "0.5229913", "0.52287775", "0.5223323", "0.5219369", "0.5217374", "0.52150285", "0.52118665", "0.5211083", "0.5204791", "0.5200058", "0.5196771" ]
0.65879107
0
Create a monitor thread to watch vbmc status.
def _start_monitor(instance="default"): global logger_ic logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance) logger_ic.info("ipmi-console monitor thread starts to run.") monitor_thread = threading.Thread(target=monitor, args=(instance,)) monitor_thread.setDaemon(True) monitor_thread.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _StartStatusUpdateThread(self):\n self._status_update_active = True\n self._status_update_thread = threading.Thread(\n name='Status update', target=self._StatusUpdateThreadMain)\n self._status_update_thread.start()", "def run(self):\n self.monitor.start()", "def run():\n logger.verbose(\"bwmon: Thread started\")\n while True:\n lock.wait()\n logger.verbose(\"bwmon: Event received. Running.\")\n database.db_lock.acquire()\n nmdbcopy = copy.deepcopy(database.db)\n database.db_lock.release()\n try:\n if getDefaults(nmdbcopy) and len(bwlimit.tc(\"class show dev %s\" % dev_default)) > 0:\n # class show to check if net:InitNodeLimit:bwlimit.init has run.\n sync(nmdbcopy)\n else: logger.log(\"bwmon: BW limits DISABLED.\")\n except: logger.log_exc(\"bwmon failed\")\n lock.clear()", "def monitor(self, rms):\n pass", "def monitor(self):", "def monitor(instance=\"default\"):\n global logger_ic\n while True:\n try:\n with open(\"{}/{}/.{}-bmc.pid\".format(\n config.infrasim_home, instance, instance), \"r\") as f:\n pid = f.readline().strip()\n if not os.path.exists(\"/proc/{}\".format(pid)):\n logger_ic.warning(\"Node {} vBMC {} is not running, \"\n \"ipmi-console is ready to quit\".\n format(instance, pid))\n break\n time.sleep(3)\n except IOError:\n logger_ic.warning(\"Node {} workspace is possibly destroyed, \"\n \"ipmi-console is ready to quit\".format(instance))\n break\n stop(instance)", "async def monitor():\n\n for n in range(6):\n await asyncio.sleep(2)\n print(\"monitor status:\", n, await ps.status())", "def backgroundStart(): #Background checks thread\n global currentStatus\n while True:\n currentStatus = checkClassChanges()\n time.sleep(10)", "async def _start_service_monitor(cls):\n cls.service_monitor = Monitor()\n await cls.service_monitor.start()", "def monitor(self):\n if self.startup():\n time.sleep(0.250)\n self.run()", "def start(self):\r\n monitor_thread = Thread(target = self.monitor)\r\n monitor_thread.setDaemon(True)\r\n monitor_thread.start()\r\n\r\n main_thread = Thread(target = self.run)\r\n main_thread.setDaemon(True)\r\n main_thread.start()", "def __init__(self):\n self._monitor_lock = threading.Lock() # type: threading.Lock", "def start(self):\n logging.info(\"ICMPecho health monitor plugin: Starting to watch \"\n \"instances.\")\n\n self.monitor_thread = threading.Thread(target = self.start_monitoring,\n name = self.thread_name)\n self.monitor_thread.daemon = True\n self.monitor_thread.start()", "def _StatusUpdateThreadMain(self):\n while self._status_update_active:\n self._UpdateStatus()\n time.sleep(self._status_update_interval)", "def getMonitor(self) -> ghidra.util.task.TaskMonitor:\n ...", "def __init__(self, task=\"example\", test=False):\n # Set up the calling task that set up the monitor and if this is a test instance\n self.test = test\n self.task = task\n \n # Set the callbacks and monitors\n self.wx.callback(windmon)\n self.wx.monitor()\n\n self.ok2open.callback(okmon)\n self.ok2open.monitor()\n\n self.dmtimer.callback(dmtimemon)\n self.dmtimer.monitor()\n\n self.countrate.callback(countmon)\n self.countrate.monitor()\n \n self.fwhm.callback(fwhmmon)\n self.fwhm.monitor()\n \n self.teqmode.monitor()\n self.vmag.monitor()\n self.ldone.monitor()\n self.counts.monitor()\n self.decker.monitor()\n self.mv_perm.monitor()\n self.chk_close.monitor()\n\n self.sunel.monitor()\n self.aaz.monitor()\n self.ael.monitor()\n self.fspos.monitor()\n self.rspos.monitor()\n self.aafocus.monitor()\n\n # Grab some initial values for the state of the telescope\n \n self.wx.poll()\n self.fwhm.poll()\n self.countrate.poll()\n self.ok2open.poll()", "def __init__(self):\n super(MemoryMonitoringThread, self).__init__()\n self.daemon = True", "async def _async_start_monitor(self) -> None:\n if not sys.platform.startswith(\"linux\"):\n return\n info = await system_info.async_get_system_info(self.hass)\n if info.get(\"docker\"):\n return\n\n from pyudev import ( # pylint: disable=import-outside-toplevel\n Context,\n Monitor,\n MonitorObserver,\n )\n\n try:\n context = Context()\n except (ImportError, OSError):\n return\n\n monitor = Monitor.from_netlink(context)\n try:\n monitor.filter_by(subsystem=\"tty\")\n except ValueError as ex: # this fails on WSL\n _LOGGER.debug(\n \"Unable to setup pyudev filtering; This is expected on WSL: %s\", ex\n )\n return\n observer = MonitorObserver(\n monitor, callback=self._device_discovered, name=\"usb-observer\"\n )\n observer.start()\n\n def _stop_observer(event: Event) -> None:\n observer.stop()\n\n self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_observer)\n self.observer_active = True", "def start_monitoring(self):\n pass", "def run( self ):\n while True:\n try:\n time.sleep( 5 )\n self._monitorProcess()\n except Exception, e:\n self.logger.exception( \"Error starting monitor process\" )", "def start_monitor():\n monitor_enabled = config_json[env]['MONITOR_ENABLED']\n monitor_trigger_interval_s = int( config_json[env]['MONITOR_TRIGGER_INTERVAL_S'] )\n\n # IF SCHEDULE IS ENABLED IN CONFIG:\n if monitor_enabled == \"1\":\n\n print(\"\\nSpace Weather Service Monitor: ENABLED (running every %s seconds)\" % monitor_trigger_interval_s)\n\n # RUN INITIAL CHECK SPACE WEATHER\n processes.process_check_space_weather()\n\n # CREATE SCHEDULER W/ INTERVAL TRIGGER AND START\n scheduler = BackgroundScheduler()\n scheduler.add_job(\n func = processes.process_check_space_weather,\n trigger = IntervalTrigger( seconds = monitor_trigger_interval_s ),\n id = 'check_space_weather',\n name = 'Checking Space Weather Every 30 Seconds')\n scheduler.start()\n atexit.register( lambda: scheduler.shutdown() )\n else:\n print(\"\\nSpace Weather Service Monitor: DISABLED\")", "def main():\n channel_watcher = ChannelWatcher()\n channel_watcher.create_threads()\n for thread in channel_watcher.threads:\n thread.join()\n return", "def monitoredRun(self, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def monitoredRun(self, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def status_watcher(cs, line):\n #print('status watcher watching')\n\n # from the mullvad code, should watch for\n # things like:\n # \"Initialization Sequence Completed\"\n # \"With Errors\"\n # \"Tap-Win32\"\n\n if \"Completed\" in line:\n cs.change_to(cs.CONNECTED)\n return\n\n if \"Initial packet from\" in line:\n cs.change_to(cs.CONNECTING)\n return", "def __initializeMonitor( self ):\n if self.__moduleProperties[ 'standalone' ]:\n self.monitor = gMonitor\n else:\n self.monitor = MonitoringClient()\n self.monitor.setComponentType( self.monitor.COMPONENT_AGENT )\n self.monitor.setComponentName( self.__moduleProperties[ 'fullName' ] )\n self.monitor.initialize()\n self.monitor.registerActivity( 'CPU', \"CPU Usage\", 'Framework', \"CPU,%\", self.monitor.OP_MEAN, 600 )\n self.monitor.registerActivity( 'MEM', \"Memory Usage\", 'Framework', 'Memory,MB', self.monitor.OP_MEAN, 600 )\n # Component monitor\n for field in ( 'version', 'DIRACVersion', 'description', 'platform' ):\n self.monitor.setComponentExtraParam( field, self.__codeProperties[ field ] )\n self.monitor.setComponentExtraParam( 'startTime', Time.dateTime() )\n self.monitor.setComponentExtraParam( 'cycles', 0 )\n self.monitor.disable()\n self.__monitorLastStatsUpdate = time.time()", "def __init__(self, main, framework, update_interval=5000):\n\n Thread.__init__(self)\n self.main = main\n self.framework = framework\n self.stop_measurement_loop = self.main.stop_measurement_loop\n self.resource = framework[\"Devices\"][\"temphum_controller\"]\n self.update_interval = float(update_interval)\n self.queue_to_main = framework[\"Message_to_main\"]\n self.vcw = framework[\"VCW\"]\n self.log = logging.getLogger(__name__)\n self.testmode = False\n self.running = False\n self.readqueue = False\n\n # First try if visa_resource is valid\n self.success = False\n try:\n first_try = self.vcw.query(self.resource, self.resource[\"get_environment\"])\n self.framework[\"Configs\"][\"config\"][\"settings\"][\"light\"] = True # Dummy\n if first_try:\n self.success = True\n\n except Exception as e:\n self.log.error(\n \"The temperature and humidity controller seems not to be responding. Error:\"\n + str(e)\n )", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def start(self):\n self.monitor_lc.start(self.interval)", "def monitor(self):\n while not self.terminated:\n try:\n if (time.time() - self.updated_time) < 5:\n messages = self.messages.copy()\n # procs = np.min([ len(messages), 9 ]) + 1\n # pool = ThreadPool(procs)\n # pool.map(self.process, messages)\n # pool.close()\n # pool.join()\n for message in messages:\n self.process(message)\n elif self.ws:\n self.updated_time += 10\n self.ws.close()\n except Exception as e:\n self.on_error(None, \"Monitoring Error: {}\".format(e))\n continue\n finally:\n time.sleep(0.1)", "def monitoredQueue(self, monitorFunc):\n\t\tm_queue = q.Queue()\n\t\tm_thread = t.Thread(target=monitorFunc, args=[m_queue])\n\t\tm_thread.setDaemon(True)\n\t\tm_thread.start()\n\t\treturn m_queue", "def start_cache_monitor(cls):\n with cls._monitor_lock:\n cls._stop_thread.clear()\n if not cls._monitor_thread:\n cls._monitor_thread = threading.Thread(target=cls._monitor)\n if not cls._monitor_thread.is_alive():\n cls._monitor_thread.daemon = True\n cls._monitor_thread.start()", "async def start_monitor(self):\n self._logger.info(\"Starting monitor...\")\n org1_admin = self.fabric_client.get_user(org_name='org1.example.com', name='Admin')\n\n self._logger.info(\"Starting monitor...\")\n cmd = \"/home/martijn/go/bin/go run \" \\\n \"/home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/fabric-cli.go event listenblock \" \\\n \"--cid mychannel --peer localhost:8001 \" \\\n \"--config /home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/config.yaml\"\n out_file = open(\"transactions.txt\", \"w\")\n my_env = os.environ.copy()\n my_env[\"GOPATH\"] = \"/home/martijn/gocode\"\n self.monitor_process = subprocess.Popen(cmd.split(\" \"), env=my_env, stdout=out_file,\n cwd=\"/home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/\")\n\n async def get_latest_block_num():\n self._logger.info(\"Getting latest block nr...\")\n response = await self.fabric_client.query_info(\n requestor=org1_admin,\n channel_name='mychannel',\n peers=['peer0.org1.example.com'],\n decode=True\n )\n print(response)\n\n latest_block = response.height\n if latest_block > self.latest_block_num:\n self._logger.info(\"Updating to block nr %d\", latest_block)\n old_latest_block_num = self.latest_block_num\n self.latest_block_num = latest_block\n confirm_time = int(round(time.time() * 1000))\n for confirmed_block_num in range(old_latest_block_num + 1, latest_block + 1):\n self.block_confirm_times[confirmed_block_num] = confirm_time\n\n self.monitor_lc = run_task(get_latest_block_num, interval=0.1)", "def start_monitoring(self, widget, data):\n\t\t#cambio le impostazioni dei locks\n\t\tself.RunFlag = True\n\t\tself.SetLocks()\n\n\t\tprint \"### Sending start signal to Monitor...\"\n\t\tself.start_monitor()\n\t\ttime.sleep(Configure.Interval)", "def startMonitor(user, queueTwoMin, queueTenMin, queueHour, queueAlerts, queueTermination, testDic = None):\n Monitor(user, queueTwoMin, queueTenMin, queueHour, queueAlerts, queueTermination, testDic = testDic)", "def __call__(self):\n status = self.os.popen('circusctl status monitor').read().strip()\n\n if status == 'active':\n return True\n elif status == 'stopped':\n return False", "def monitor(self):\n import curses\n import inspect\n\n stdscr = curses.initscr()\n curses.curs_set(0)\n curses.noecho()\n curses.cbreak()\n width_split = curses.COLS//3-1\n win_done = curses.newwin(curses.LINES-1, width_split, 0, 0)\n win_running = curses.newwin(curses.LINES-1, width_split,\n 0, width_split+1)\n win_pending = curses.newwin(curses.LINES-1, width_split,\n 0, 2*width_split+1)\n stdscr.addstr(curses.LINES-1, 0,\n 'Monitoring started. Press Ctrl+C to stop.')\n stdscr.refresh()\n win_done.addstr(0, 0, 'DONE')\n win_pending.addstr(0, 0, 'PENDING')\n while True:\n try:\n win_done.addstr(1, 0,\n f'{len(self.done)} jobs done')\n list_done = list(self.done)[:curses.LINES-3]\n for idx, fut in enumerate(list_done, start=2):\n fmt_str = f'{id(fut):x} {fut._state}'\n win_done.addstr(idx, 0, fmt_str)\n win_done.refresh()\n\n win_running.clear()\n win_running.addstr(0, 0, 'RUNNING')\n win_running.addstr(1, 0,\n f'{self.running.qsize()} jobs running')\n list_running = list(self.running.items())[:curses.LINES-3]\n for idx, (fut, coro) in enumerate(list_running, start=2):\n coro_state = inspect.getcoroutinestate(coro)\n fmt_str = f'{id(fut):x} {coro_state}'\n win_running.addstr(idx, 0, fmt_str)\n win_running.refresh()\n\n win_pending.clrtoeol()\n win_pending.addstr(1, 0,\n f'{self.pending.qsize()} jobs pending')\n win_pending.refresh()\n time.sleep(.1)\n except KeyboardInterrupt:\n break\n\n curses.nocbreak()\n curses.echo()\n curses.endwin()", "def start_monitoring(interval: int, devices_list: list):\n s.enter(0, 0, update_statuses, (interval, devices_list))\n s.run()", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "def start_monitor(self, collector):\n pass", "def start(self):\n self._logger.info(\"Starting download monitor (interval: %d seconds)\" % self.interval)\n self.monitor_lc = ensure_future(looping_call(0, self.interval, self.monitor_downloads))", "def run(self, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def watch(self):", "def run_monitor(self):\n data = self.get_site_to_check(self.config_file_path)\n results = self.check_sites_status(data)\n self.notify_failures(results)", "def __init__(self, monitor, event_handler, *args, **kwargs):\n Thread.__init__(self, *args, **kwargs)\n\n self.monitor = monitor\n # observer threads should not keep the interpreter alive\n self.daemon = True\n self._stop_event_source, self._stop_event_sink = os.pipe()\n self._handle_event = event_handler", "def start(self) -> None:\n self.bus.subscribe(\"cache:ready\", self.revive)\n self.bus.subscribe(\"scheduler:add\", self.add)\n self.bus.subscribe(\"scheduler:persist\", self.persist)\n self.bus.subscribe(\"scheduler:remove\", self.remove)\n self.bus.subscribe(\"scheduler:upcoming\", self.upcoming)\n self.scheduler = sched.scheduler(time.time, time.sleep)\n cherrypy.process.plugins.Monitor.start(self)", "def monitor(bot):\n\n timestamp = datetime.now()\n \n # If the monitor status file exists, clubroom is open.\n if os.path.exists(STATUSFILE):\n if not get_state():\n print(timestamp.strftime(\"[%d %b, %H:%M]\") + \" -- Open!\")\n set_state(OPEN)\n \n # Randomly choose and send alert message to channel\n alert = ALERT[random.randint(0, len(ALERTS) - 1)]\n for channel in bot.channels:\n bot.msg(channel, alert)\n else:\n if get_state():\n print(timestamp.strftime(\"[%d %b, %H:%M]\") + \" -- Closed!\")\n for channel in bot.channels:\n bot.msg(channel, \"Activity ended.\"))\n clear_topic()\n set_state(CLOSED)\n update_report(\"\", \"\", \"\")", "def main():\n # Get the current event loop\n loop = asyncio.get_event_loop()\n # While the moonitor is running\n with aiomonitor.start_monitor(loop=loop):\n # Keep the loop working\n loop.run_forever()", "def monitor(self):\n if self._monitor is None:\n self._monitor = Monitor(self)\n return self._monitor", "def __init__(self, status):\n self.period_s = 0.1\n self.distance_threshold = 5.0\n self.time_below_threshold_s = 0.2\n self.time_above_threshold_s = 2.0\n self.ir_sensor = InfraredSensor(INPUT_4)\n self.ir_sensor.mode = InfraredSensor.MODE_IR_PROX\n self.last_below_threshold_time_s = None\n self.last_above_threshold_time_s = None\n self.status = status\n self.exit = threading.Event()\n super(CollisionWatch, self).__init__()", "def start(self):\n gv.logger.info(\"Videoqualityprobe healthcheck thread started\")\n thread = Thread(target=self.run, args=())\n thread.daemon = True\n self.thread = thread\n thread.start()", "def run(self):\n self.empty_pid_file()\n self.queue = Queue()\n self.monitor_process = Process(\n target=ResourceMonitor.monitor_function,\n args=(self.launcher, self.pid_file, self.frequency, self.queue)\n )\n self.monitor_process.start()", "async def monitor():\n global counter\n while True:\n time.sleep(1)\n print(counter, 'reqs/sec')\n counter = 0", "async def main(self) -> None:\n async with ClientSession() as session:\n tasks = [\n asyncio.create_task(self.monitor(session, params)) for params in self._monitor_list\n ]\n await asyncio.gather(*tasks)", "def monitor(self):\n\t\tresponse = self._request(\"/demovibes/ajax/monitor/{}/\".format(self.next_event))\n\t\tif not response:\n\t\t\treturn None\n\t\t\n\t\tdata = response.read()\n\t\treturn self.parse_monitor(data)", "def poll_thread():\n while not stop_flag.wait(0.100): # poll every 100ms\n check_jobs()", "def updateStatus(self):\n done = False\n if not self.pg.is_alive():\n done = True\n while not self.pg.msgQueue.empty():\n msg = str(self.pg.msgQueue.get(False))\n self.monitorTextBox.append(msg)\n if done:\n self.timer.stop()\n self.pg.join()\n self.runButton.setEnabled(True)\n self.stopButton.setEnabled(False)\n if self.pg.ex:\n etype, evalue, etrace = self.pg.ex\n el = traceback.format_exception(etype, evalue, etrace)\n for line in el:\n self.monitorTextBox.append(line)\n self.setStatusBar.emit(\n \"Surrogate Failed Elapsed Time: {0}\".format(\n hhmmss(math.floor(time.time() - self.timeRunning))\n )\n )\n else:\n self.setStatusBar.emit(\n \"Surrogate Finished, Elapsed Time: {0}\".format(\n hhmmss(math.floor(time.time() - self.timeRunning))\n )\n )\n if self.pg.driverFile != \"\":\n try:\n df = os.path.abspath(self.pg.driverFile)\n except:\n pass\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Driver File Location\")\n msgBox.setText(\n \"The surrogate model driver file path is: {0}\".format(\n os.path.abspath(df)\n )\n )\n msgBox.exec_()\n else:\n self.refreshContents()\n self.setStatusBar.emit(\n \"Surrogate Model Generation, Elapsed Time: {0}s\".format(\n math.floor(time.time() - self.timeRunning)\n )\n )", "def test_monitor_creation(processor, measure, dialog_sleep):\n def run(measure):\n t = Thread(target=processor._start_monitors, args=(measure,))\n t.start()\n while t.is_alive():\n process_app_events()\n sleep(0.001)\n process_app_events()\n sleep(dialog_sleep)\n\n processor.engine = processor.plugin.create('engine', 'dummy')\n\n measure.add_tool('monitor', 'dummy')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 1\n\n measure.add_tool('monitor', 'dummy2')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n measure.remove_tool('monitor', 'dummy2')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 1\n\n measure.add_tool('monitor', 'dummy3')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n measure.add_tool('monitor', 'dummy4')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n processor.plugin.stop()\n assert not processor.monitors_window", "def monitoring_group(ctx):\n pass", "def monitor_start(event: Event) -> None:\n _LOGGER.info(\"Starting scanner for Eddystone beacons\")\n mon.start()", "def monitor(self, s):\n raise NotImplementedError()", "def run(self):\n self.cncLock.acquire()\n self.running = True\n\n # Initialize\n try:\n self.cnc = serial.Serial(self.deviceFile,BAUD_RATE)\n\n self.updaterThread = threading.Thread(target=self.periodic_timer)\n self.updaterThread.start()\n\n # Wake up grbl\n log.info(\"Initializing Grbl...\")\n cmd = \"\\r\\n\\r\\n\"\n self.cnc.write(cmd.encode())\n\n # Wait for grbl to initialize and flush startup text in serial input\n time.sleep(2)\n self.cnc.flushInput()\n self.cncLock.release()\n\n while self.running :\n cmd = self.commandQueue.get().strip() + EOLStr\n if self.running == False:\n break\n self.cncLock.acquire()\n self.cnc.write(cmd.encode())\n\n out = str(self.cnc.readline().strip()) # Wait for grbl response\n if out.find('ok') >= 0 :\n log.debug(f'MSG: {out}') # Debug response\n elif out.find('error') >= 0 :\n log.error(f'ERROR: {out}')\n else:\n log.info(out)\n self.cncLock.release()\n except:\n raise\n finally:\n log.debug(\"CNC main loop left\")\n self.cnc.close()", "def monitor(self, **kwargs):\n self.show_info(monitor=True, **kwargs)", "def start(interval: int, devices_list: list):\n t = threading.Thread(target=start_monitoring, args=(interval, devices_list))\n t.start()\n with lock:\n global running\n running = True", "def cli_watch(ctx):\n uuids = ctx.obj['uuids']\n\n def handler(dev):\n name = uuids.get(dev.get('ID_FS_UUID'))\n conf = settings.devices.get(name, dict())\n devname = dev['DEVNAME']\n label = conf.get('label', dev.get('ID_FS_LABEL'))\n print('Block device %s %s (name=%s, label=%s, uuid=%s)%s' %\n (dev.action, devname, name, label, dev.get('ID_FS_UUID'),\n ' (nop)' if not conf else ''))\n if not conf:\n return\n\n command = conf.get(ACTIONS.get(dev.action))\n if command:\n print('Running command: %s' % command)\n call(command, shell=True)\n if dev.action == 'add' and conf.get('auto_mount'):\n PMount(conf, name, devname, label=label).mount(error='ignore')\n if dev.action == 'remove':\n info = get_mount_info(devname, label)\n if info:\n PMount(conf, name, info.device, label=label).umount(\n error='ignore')\n\n poll(handler)", "def query_member_status():\n notify_member_status()\n logger.info('signal sent for status report')", "def thread_status(self,status): # general function to get datas/infos from all threads back to the main\n if status[0]==\"Update_Status\":\n if len(status)>2:\n self.update_status(status[1],wait_time=self.wait_time,log_type=status[2])\n else:\n self.update_status(status[1],wait_time=self.wait_time)\n\n elif status[0]==\"Update_scan_index\":\n #status[1] = [ind_scan,ind_average]\n self.ind_scan=status[1][0]\n self.ui.indice_scan_sb.setValue(status[1][0])\n self.ind_average = status[1][1]\n self.ui.indice_average_sb.setValue(status[1][1])\n\n elif status[0]==\"Scan_done\":\n self.ui.scan_done_LED.set_as_true()\n self.save_scan()\n if not self.overshoot:\n self.set_ini_positions()\n self.ui.set_scan_pb.setEnabled(True)\n self.ui.set_ini_positions_pb.setEnabled(True)\n self.ui.start_scan_pb.setEnabled(True)\n elif status[0]==\"Timeout\":\n self.ui.log_message.setText('Timeout occurred')", "def __init__(self, notify_window):\n Thread.__init__(self)\n self._notify_window = notify_window\n self._want_abort = 0\n # This starts the thread running on creation, but you could\n # also make the GUI thread responsible for calling this\n self.start()", "async def status(self, ctx):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n up_time = time.time() - self.start_time\n m, s = divmod(up_time, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n stime = time.time() - psutil.boot_time()\n m, s = divmod(stime, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n system_uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n mem = psutil.virtual_memory()\n\n pid = os.getpid()\n memory_use = psutil.Process(pid).memory_info()[0]\n\n content = discord.Embed(title=f\"Miso Bot | version {main.version}\")\n content.set_thumbnail(url=self.client.user.avatar_url)\n\n content.add_field(name=\"Bot process uptime\", value=uptime_string)\n content.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent()}%\")\n content.add_field(name=\"System uptime\", value=system_uptime_string)\n\n content.add_field(name=\"System RAM Usage\", value=f\"{mem.percent}%\")\n content.add_field(name=\"Bot memory usage\", value=f\"{memory_use/math.pow(1024, 2):.2f}MB\")\n\n await ctx.send(embed=content)", "def monitor_behavior_status(self):\n self._flexbe_status_subscriber = rospy.Subscriber('/flexbe/status', BEStatus, self.callback_flexbe_status)", "def start_threading(self):\n\n # For Demo only commented\n self.progress_label.config(text='Running...')\n # self.progress_label.config(text='Estimating Time Reamining')\n self.prev_prog = progressbar.current\n self.submit_thread = threading.Thread(target=self.run)\n self.start_time = time.time()\n self.submit_thread.daemon = True\n self.submit_thread.start()\n self.after(1000, self.check_thread)", "def monitorVirtualMachine(self,node,vmid,command):\n post_data = {'command': str(command)}\n data = self.connect('post',\"nodes/%s/qemu/%s/monitor\" % (node,vmid), post_data)\n return data", "def status_task():\n props = [\n (STAT_TIME, current_time),\n (STAT_CONDITION, weather_condition)\n ]\n\n # Send the status request with the current time and condition.\n send_status_request(props)\n\n # Create and start a timer to repeat this task periodically.\n t = Timer(report_interval, status_task)\n t.setDaemon(True)\n t.start()", "def start(self):\n if self.active:\n # already started\n return\n # start monitor\n self.active = True\n try:\n if self.started:\n # only restart if the previous thread has finished or after error\n self.logger.info(\"Restarting monitor for {}\".format(self.monitor_id))\n self.running()\n else:\n # first time start\n self.started = True\n self.logger.info(\"Starting monitor for {}\".format(self.monitor_id))\n self.running()\n self.finished = True\n except BaseException as ex:\n self.logger.exception(\"Exception during monitor execution for monitor {}: {}\".format(self.monitor_id,\n str(ex)))\n # is currently not active due to error\n self.active = False\n # wait for one execution loop to avoid error spamming\n time.sleep(self.wait_time)\n self.start()", "def __init__(self):\n # This has an internal lock, so it is reused for any operations that\n # need to be synchronized\n self.cond = threading.Condition()\n # A set of clients (psutil.Process objects, which are hashable).\n # As long as monitor_stdin() hasn't been called yet, we can assume the\n # parent is still alive, and this can be changed later\n self.clients = {ClientMonitor._PARENT}\n # start background tasks\n self.thread = start_daemon_thread(target=self._background_loop)\n self.server = IPCServer.run_in_background(\n on_message=self._handle_message\n )", "def __new__(cls, *args, **kwargs):\n if not cls.__instances__:\n cls.__instances__ = super().__new__(cls, *args, **kwargs)\n cls._thread_runing = True\n cls._thread = cls._run_monitor_thread()\n return cls.__instances__", "def centec_manager_monitor_timer(self, args):\n try:\n # kill manager if any is running\n daemon = CentecDaemon(CentecTorMechanismDriver.pid_file,\n CentecTorMechanismDriver.centec_manger_name)\n pid = 0\n if daemon.is_running():\n pid = daemon.read()\n utils.execute(['kill', '-9', pid], CentecTorMechanismDriver.root_helper)\n except Exception as e:\n LOG.error(_(\"Can't kill centec manager pid: %(pid)s.\"\n \"Exception: %(exception)s\"), {'pid': pid, 'exception': e})\n\n try:\n monitor_timer = loopingcall.FixedIntervalLoopingCall(self._centec_manager_monitor, args)\n # check manager running for every 10 seconds\n monitor_timer.start(interval=CentecTorMechanismDriver.monitor_interval)\n except Exception as e:\n LOG.error(_(\"Centec manager monitor thread can't start.\"\n \"Exception: %(exception)s\"), {'exception': e})", "def discover_thread(\n callback, interval=60, include_invisible=False, interface_addr=None, *, start=True\n):\n thread = StoppableThread(\n target=_discover_thread,\n args=(callback, interval, include_invisible, interface_addr),\n )\n if start:\n thread.start()\n return thread", "def start(self):\n self._watchdog_thread.start()", "def main(self):\n self.logger.info('Main monitor started at {} ({} mode with data logging {} and sms msgs {})'.format(\n print_time(), ('reduced' if self.reduced_mode else 'normal'), ('on' if self.log_data else 'off'),\n ('on' if self.sms_client is not None else 'off')))\n self.logger.info('Refreshing twitter every {} seconds'.format(self.refresh_rate))\n\n while True:\n try:\n self._main()\n except Exception as e:\n self.logger.error(error_msg(e))\n traceback.print_exc()\n self.logger.info('Attempting to restart after 60 seconds'.format(print_time()))\n time.sleep(60)\n self.logger.info('Restarting main monitor')", "def monitor(self) -> HwMonitor:\n return self._montior", "def watch_process(self):\n psutil.wait_procs([psutil.Process(self._proc.pid)],\n callback=self.start)", "def start(self):\n if self.isAlive == False:\n try:\n time.sleep(1)\n os.remove(os.path.join(self.inbox, 'stop_service.txt'))\n except:\n pass\n try:\n time.sleep(1)\n os.remove(os.path.join(self.inbox, 'ReadDirectoryChangesW.txt'))\n except:\n pass\n return\n \n serviceconfig.logger.debug('*** \"%s\": Starting the worker thread' % self.inbox)\n self.queue = Queue()\n t = Thread(target=self.worker)\n t.start()\n \n \"\"\"\n If files were dropped during the recovering process,\n we need to handle those files\n \"\"\"\n timer = Timer(1, self.triggerChangeEvent, kwargs={})\n timer.start()\n \n while self.isAlive:\n self.queue.put(win32file.ReadDirectoryChangesW (\n self.hDir,\n 1024,\n True,\n win32con.FILE_NOTIFY_CHANGE_FILE_NAME |\n win32con.FILE_NOTIFY_CHANGE_DIR_NAME |\n win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |\n win32con.FILE_NOTIFY_CHANGE_SIZE |\n win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |\n win32con.FILE_NOTIFY_CHANGE_SECURITY,\n None,\n None\n ))\n self.queue.join()\n timer.join()\n \n \"\"\"\n Delete the stop_service.txt file generated by stopping the service\n \"\"\"\n try:\n os.remove(os.path.join(self.inbox, 'stop_service.txt'))\n except:\n pass", "def __init__(self, monitor_id: str, wait_time: int, run_interval: bool = False,\n custom_logger: Optional[Logger] = None, ping_interval: int = 60, debug: bool = False,\n debug_logger: Optional[Logger] = None):\n super().__init__(debug, debug_logger)\n # set meta flags\n self.active = False\n self.finished = False\n self.started = False\n\n # store parameters\n self.monitor_id = monitor_id\n self.wait_time = wait_time\n self.ping_interval = ping_interval\n self.run_interval = run_interval\n\n # init logger\n if custom_logger is not None:\n self.logger = custom_logger\n else:\n self.logger = Logger(self.__class__.__name__)\n # handle ping interval issues\n if wait_time < ping_interval:\n self.logger.info(f\"WARNING, monitor wait time {ping_interval} is longer than {wait_time} - overriding\")\n self.ping_interval = wait_time\n # runtime variables\n self.next_execution = 0", "async def monitor(self, ctx, channel):\n author = ctx.message.author\n author_channel = ctx.message.channel\n\n def check(m):\n try:\n return channels[int(m.content)]\n except:\n return False\n\n channels = self.bot.get_all_channels()\n channels = [c for c in channels\n if c.name.lower() == channel or c.id == channel]\n channels = [c for c in channels if c.type == discord.ChannelType.text]\n\n\n if not channels:\n await self.bot.say(\"No channels found. Remember to type just \"\n \"the channel name, no `#`.\")\n return\n\n if len(channels) > 1:\n msg = \"Multiple results found.\\nChoose a server:\\n\"\n for i, channel in enumerate(channels):\n msg += \"{} - {} ({})\\n\".format(i, channel.server, channel.id)\n for page in pagify(msg):\n await self.bot.say(page)\n choice = await self.bot.wait_for_message(author=author,\n timeout=30,\n check=check,\n channel=author_channel)\n if choice is None:\n await self.bot.say(\"You haven't chosen anything.\")\n return\n channel = channels[int(choice.content)]\n else:\n channel = channels[0]\n\n rift = OpenRift(source=author_channel, destination=channel)\n msgfilter = ['$', 'pp', 'paypal', 'moneypak', 'giftcard', 'gift card', 'PM me', 'DM', 'cash']\n\n self.open_rifts[author] = rift\n await self.bot.say(\"Monitor started\")\n msg = \"\"\n while msg == \"\" or msg is not None:\n msg = await self.bot.wait_for_message(author=author,\n channel=author_channel)\n if msg is not None and msg.content.lower() != \"exit\":\n try:\n blankvar = \"blankvar\"\n except:\n await self.bot.say(\"Script error #1\")\n elif msg.content.lower() in msgfilter:\n try:\n await self.bot.say(\"Your message may contain words referring to RMT. Your message has been logged and will be reviewed by Discord staff.\")\n except:\n await self.bot.say(\"Script error #2\")\n else:\n break\n del self.open_rifts[author]\n await self.bot.say(\"Stopping monitor.\")", "async def rndactivity_add_watching(self, ctx: commands.Context, *, status: str):\n await self._add_status(ctx, status, game_type=3)", "async def schedule_status():\n while True:\n if controller.scheduled_status_date is not None:\n return\n controller.scheduled_status_date = datetime.now()+timedelta(hours=23)\n await wait_until(controller.scheduled_status_date)\n channel = await client.fetch_channel(Guard.AUTHOR_DM)\n await channel.send(**{\n 'content': controller.get_status(),\n })\n controller.scheduled_status_date = None", "def start_thread(self):\n\n self.thread = threading.Thread(target=self.enoviacheck)\n # Pop up progress bar to shwo login status\n self.progressframe = tk.Toplevel(self, background='white')\n self.progressframe.lift()\n self.progressframe.focus_force()\n self.progressframe.grab_set()\n self.progressframe.resizable(False, False)\n self.progressframe.minsize(width=200, height=50)\n progressbar = ttk.Progressbar(self.progressframe, mode='indeterminate', length=200)\n progressbar.pack(pady=(10, 0), padx=5)\n progressbar.start(10)\n progresslabel = tk.Label(self.progressframe, text='Logging into Enovia', background='white')\n progresslabel.pack(pady=(0, 10))\n # Thread setup\n self.thread.daemon = True\n self.thread.start()\n self.after(20, self.check_thread)", "def create_daemon(self, handle, refresh_delay=5):\n self.handle = handle\n self.refresh_delay = refresh_delay\n self.thread = threading.Thread(name=\"Reddit Daemon\", target=self._keep_getting_new_messages)\n self.thread.daemon = True\n self.thread.start()", "def openValkka(self):\n \n self.livethread =LiveThread(\"livethread\")\n # reserve 10 frames, 300 KB each\n self.shmem_filter =ShmemFrameFilter(shmem_name_tag,10,300*1024)\n # ShmemFrameFilter instantiates the server side of shmem bridge\n # in a separate process do:\n # rb=SharedMemRingBuffer(shmem_name_tag,10,30*1024*1024,False) # shmem ring buffer on the client side\n self.live_out_filter =InfoFrameFilter(\"live_out_filter\",self.shmem_filter)\n \n # Start all threads\n self.livethread.startCall()", "def _monitorProcess( self ):\n self.processContainer.lock.acquire()\n try:\n try:\n if self.processContainer.agent is None or self.processContainer.agent.poll() is not None:\n self.processContainer.agent = self._launchAgentProcess()\n except Exception, e:\n self.logger.exception( \"Error starting monitor process\" )\n finally:\n self.processContainer.lock.release()", "def _monitor(self):\n # while CONF.weight == 'bw':\n while True:\n self._send_echo_request()\n self.create_link_delay()\n # self.get_loss()\n self.stats['flow'] = {}\n self.stats['port'] = {}\n for dp in self.datapaths.values():\n self.port_features.setdefault(dp.id, {})\n self.link_loss.setdefault(dp.id,{})\n self._request_stats(dp)\n # refresh data.\n self.capabilities = None\n self.best_paths = None\n hub.sleep(setting.MONITOR_PERIOD)\n self.show_stat()", "def run(self):\n print('Starting CloudWatchLogsMonitor.')\n\n # Initialize pool for multithreading.\n # Use ThreadPool for shared memory (used for keeping track of last polled timestamp)\n pool = ThreadPool()\n\n while True:\n\n # Check for new LogGroups and LogStreams.\n self.update()\n\n for log_group in self.log_groups:\n # For every log group get and append log events to log file.\n # This is run in parallel and is non-blocking.\n pool.map_async(LogStream.get_and_append_log_events, log_group.log_streams)\n\n # These lines run the agent synchronously.\n # You need to comment out the pool.map_async line above if using synchronous loop.\n # for log_stream in log_group.log_streams:\n # LogStream.get_and_append_log_events(log_stream)\n\n # Sleep for the polling interval.\n time.sleep(self.default_polling_interval)", "def watch(self):\n wm = pyinotify.WatchManager()\n self.notifier = pyinotify.Notifier(wm, default_proc_fun=self.callback)\n wm.add_watch(self.directory, pyinotify.ALL_EVENTS)\n try:\n self.notifier.loop()\n except (KeyboardInterrupt, AttributeError):\n print_notification(\"Stopping\")\n finally:\n self.notifier.stop()\n self.terminate_processes()", "def _run_monitor_thread(self):\n while True:\n chunk = self.stream.read(1024)\n if not chunk:\n # EOF - subprocess has exited, so trigger shutdown\n trigger_exit(ExitMode.CHILD)\n break\n self.output_deque.appendleft(chunk)", "def test_wait_for_dispatched_statuses(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_statuses(1, 'fooconn')\n self.assertNoResult(d)\n\n msg = msg_helper.make_status(\n status='down',\n component='foo',\n type='bar',\n message='baz')\n\n yield self._add_to_dispatched(\n worker_helper.broker, 'fooconn.status', msg, kick=True)\n dispatched = success_result_of(d)\n self.assertEqual(dispatched, [msg])", "def watch(self):\n observer = Observer()\n observer.schedule(ActionHandler(self.actionHandler),\n path=self.projectPath,\n recursive=True)\n observer.start()\n try:\n while True:\n sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()", "def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()", "def watch(self, *argv, **kwargs):\n pass", "def __init__(self, runner):\n self.__runner = runner\n self.__wait_time = 1800.0\n\n self.__condition = threading.Condition()\n self.__running = False\n self.__stopping = False\n self.__ready = False\n self.__run = None\n\n self.__thread = threading.Thread(target=self.run, name=\"RunMinder\")\n self.__thread.start()" ]
[ "0.63383204", "0.6285425", "0.6240025", "0.5985517", "0.59852713", "0.59686154", "0.5919293", "0.59185845", "0.5913581", "0.5894443", "0.58611625", "0.58052415", "0.579297", "0.5792904", "0.5758416", "0.5757731", "0.57059795", "0.57036275", "0.5703507", "0.56903523", "0.56861776", "0.5680529", "0.5655633", "0.5655633", "0.5652062", "0.5635369", "0.562589", "0.56108993", "0.5605568", "0.5601744", "0.55924183", "0.5551491", "0.5549589", "0.5540101", "0.5503239", "0.54878575", "0.547059", "0.54669994", "0.5466156", "0.5433251", "0.5389879", "0.53895783", "0.5345563", "0.533621", "0.5334033", "0.531922", "0.5318404", "0.5296115", "0.52746445", "0.52698296", "0.5249077", "0.52295554", "0.52189595", "0.5210312", "0.5204355", "0.5168602", "0.5153384", "0.51375043", "0.51278466", "0.5116089", "0.5106263", "0.51002365", "0.509505", "0.509321", "0.50906813", "0.50823295", "0.508055", "0.507686", "0.50736773", "0.5069135", "0.5067623", "0.50643516", "0.50308704", "0.5022388", "0.50153416", "0.5001555", "0.5001455", "0.49979317", "0.49842897", "0.49800885", "0.49799138", "0.4975853", "0.49676967", "0.49560034", "0.49427179", "0.49426934", "0.49266085", "0.4925887", "0.4924415", "0.49215874", "0.49200264", "0.49143395", "0.49110025", "0.49057028", "0.48944083", "0.4891554", "0.48869377", "0.4884518", "0.48775828", "0.48715684" ]
0.62090486
3
Stop ipmiconsole of target instance specified by its name
def stop(instance="default"): global logger_ic logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance) try: file_ipmi_console_pid = "{}/{}/.ipmi_console.pid".\ format(config.infrasim_home, instance) with open(file_ipmi_console_pid, "r") as f: pid = f.readline().strip() os.kill(int(pid), signal.SIGTERM) logger_ic.info("SIGTERM is sent to pid: {}".format(pid)) os.remove(file_ipmi_console_pid) except IOError: # When pid file is missing, by e.g., node destroy, # find process id by instance name if instance == "default": process_name = "ipmi-console start$" else: process_name = "ipmi-console start {}".format(instance) ps_cmd = r"ps ax | grep '{}' | grep Sl | awk '{{print $1}}' | head -n1".format(process_name) logger_ic.warning("Fail to find ipmi console pid file, check by:") logger_ic.warning("> {}".format(ps_cmd)) _, pid = run_command(cmd=ps_cmd) logger_ic.warning("ipmi console pid got: {}".format(pid)) if not pid: logger_ic.warning("ipmi console for instance {} is not running".format(instance)) return os.kill(int(pid), signal.SIGTERM) logger_ic.info("SIGTERM is sent to pid: {}".format(pid)) except Exception: logger_ic.warning(traceback.format_exc()) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self):\n self.scion_sh('stop')", "def processStop(name):\n imrclient.update_server_info()\n imrclient.process_stop(name)", "def stop_notebook_instance(NotebookInstanceName=None):\n pass", "def stop(self):\n if self.debug:\n print(\"%s stop\" % self.name)\n self.force_exit()", "def stop_instance(InstanceId=None, Force=None):\n pass", "def stop(self):\r\n self.inst.write(':STOP')", "def stop_run(arn=None):\n pass", "def stop(self):\n print(\"Stopping accessory.\")", "def stop_console(self):\n return", "def kill_instance(py, accelerator, sig_name):\n acc_client = get_accelerator_client(py, accelerator)\n acc_client.kill_instance(sig_name)", "def ec2_stop(resource, metadata):\n instances = resource.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']},\n {'Name': 'tag:Name', 'Values': [metadata['fqdn']]}, ])\n\n for instance in instances:\n print(\"Terminating vm id {0} name {1}\".format(instance.id, instance.tags[0]['Value']))\n # resource.instances.filter(InstanceIds=[instance.id]).stop()\n resource.instances.filter(InstanceIds=[instance.id]).terminate()", "def stop_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"stop %s\" % item.strip())", "def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'", "def stop(self):\n\n self.active = False", "def _stop(self):", "def stop(self):\n self.active = False", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def stop_procedure(self):\n pass", "def stop():", "def stop():", "def stop():", "def stop():", "def stop (self):\n pass", "def stop (self):\n pass", "def stop() -> None:", "def stop(self) -> None:\n ...", "def stop(self):\n self._run = False\n self.IA.stop()", "def stop(self) -> str:\n return self.rpc_call(\"stop\")", "def stop():\n\n crate = get_crate()\n # Tell the thread to stop\n crate.mch_comms.stop = True\n # Stop the ipmitool shell process\n try:\n if crate.mch_comms.ipmitool_shell:\n crate.mch_comms.ipmitool_shell.terminate()\n crate.mch_comms.ipmitool_shell.kill()\n except:\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def _stop(self):\n self._pi.stop()", "def stop(self):\n self.api.stop()", "def stop(self):\n\t\tpass", "def stop_app(self, name, stateless):\n raise NotImplementedError", "def Stop(self, *_):\n self.Log('Stopping...')\n self._stop = True", "def stop(self):\n self.killed = True", "def stop(self):", "def stop(self):", "def stop(self) -> None:", "def stop(self) -> None:", "def stop(self):\n self.ae.stop()", "def stop(self):\r\n pass", "def _stop(self, terminate_sim: bool = True):\n steppable_registry = CompuCellSetup.persistent_globals.steppable_registry\n steppable_registry.on_stop()", "def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()", "def cmd_stop(self, app_name=None):\n rc = self.socket_command_with_project('stop', app_name)\n return rc", "def stop_ssm(self, ssm_id):\n pass", "def stop(self):\n if self.p.is_alive():\n self.p.terminate()", "def stop(self) -> None:\r\n stop_command = \"docker stop %s\" % self.name\r\n self.ip.exec_command(stop_command)\r\n print('node %s of blockchain %s at %s:%s stopped' % (self.node_index, self.blockchain_id,\r\n self.ip.address, self.rpc_port))", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect\n pass", "def stop(self):\n self.unhook()", "def stop(self):\n self.setWindowTitle(self.name + ': stopped')\n self._timer.stop()", "def stop(self) -> None:\n pass", "def stop(self, session, params=None):\n session.set_status('stopping')\n self._run = False", "def _stop(self):\n self.display_end_message()", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "async def stop(self, now=False):\n alive = await self.remote_signal(15)\n\n try:\n self.stop_ec2_instance(self.ec2_instance_id) # function that uses boto3 to stop an instance based on instance_id\n except Exception as e:\n self.log.error(\"Error in terminating instance\") # easy to save the instance id when you start the instance\n self.log.error(str(e)) # this will print the error on our JupyterHub process' output\n\n self.clear_state()", "def stop_execution(self):\n self.send_message(\"control.stop\",None)", "def stop(self):\n return self._send_command(\"stop\")", "def power_off(self, ec2_session, ami_id):\n instance = self.aws_api.get_instance_by_id(ec2_session, ami_id)\n instance.stop()\n self.instance_waiter.wait(instance, self.instance_waiter.STOPPED)\n return True", "def _stop_and_parse_instance(self, iid, **kwargs):\n inst = self._lhost.ui.iperf.instances.get(iid)\n if inst:\n self._lhost.ui.iperf.stop(iid, **kwargs)\n inst_res = self._lhost.ui.iperf.get_results(iid)\n if inst_res:\n cmd = inst.get('iperf_cmd')\n units = cmd.get('format', 'm')\n threads = cmd.get('parallel', 1)\n return self._lhost.ui.iperf.parse(inst_res, units=units, threads=threads)", "def stopSpawing(self):\n self.girderManager.stopSpawing()", "def stop(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.stop_server(server)\n return r", "def stop(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"stop\"\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def stop(self):\n self.disconnect()", "def stop(self):\n self.disconnect()", "def stop(self):\n\n self.detach()\n self._pi.stop()\n self._stopped = True", "def Stop(self) :\n\t\t...", "def stop(self, ids: list) -> str:\n # If no ids are passed raise Nothing to do\n if 'None' in ids:\n raise EC2Error('Nothing to do. Need IDS! Arrgh!!!')\n\n try:\n status = self.ec2.instances.filter(InstanceIds=ids).stop()\n return status\n except IOError as e:\n raise EC2Error('Error stopping EC2 Instances {}'.format(e))", "def stop_current_episode(self):\n raise NotImplementedError", "def stop(self, *args, **kwargs):\n return self(AbilityId.STOP, *args, **kwargs)" ]
[ "0.69108975", "0.6825068", "0.6671944", "0.6663248", "0.66268575", "0.65905327", "0.65741503", "0.6553872", "0.6435648", "0.6377335", "0.6354143", "0.63085943", "0.6253596", "0.6247487", "0.62375504", "0.62224996", "0.62192184", "0.61786765", "0.6176067", "0.6176067", "0.6176067", "0.6176067", "0.6175612", "0.6175612", "0.61472714", "0.6140968", "0.6139031", "0.6110157", "0.61087245", "0.6090885", "0.6090885", "0.6090885", "0.6090885", "0.6090885", "0.6090885", "0.6090885", "0.6090885", "0.6090885", "0.6090885", "0.6090885", "0.6075065", "0.6070911", "0.6064815", "0.6062119", "0.6060667", "0.60528153", "0.60478705", "0.60478705", "0.6029264", "0.6029264", "0.60184014", "0.60179603", "0.59979683", "0.5982688", "0.5982167", "0.5971649", "0.5966058", "0.59414804", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59243405", "0.59201616", "0.5913375", "0.59071726", "0.5904653", "0.58977896", "0.58974004", "0.58974004", "0.58974004", "0.58974004", "0.58974004", "0.58929193", "0.58915615", "0.58905655", "0.5886698", "0.58857584", "0.5874893", "0.5870039", "0.5869634", "0.5863877", "0.5863877", "0.58576584", "0.5855428", "0.58538425", "0.5849663", "0.58396375" ]
0.6886424
1
Return an upload ID.
def startupload(request): if "parent" in request.POST: parent = models.Character.objects.get(pk=request.POST["parent"]) else: parent = None c = models.Character(parent=parent) c.save() d = json.loads(request.POST['data']) for k, v in d["texts"].items(): set_text(c, k, v) for k, v in d["sounds"].items(): set_sound(c, k, v) return JsonResponse({"id": c.pk})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initiate_multipart_upload(self):\n request = self.s3.create_request(\"OBJECT_POST\", uri = self.uri, headers = self.headers_baseline, extra = \"?uploads\")\n response = self.s3.send_request(request)\n data = response[\"data\"]\n self.upload_id = getTextFromXml(data, \"UploadId\")\n return self.upload_id", "def upload(self, upload_id):\r\n return u.Upload(self, upload_id)", "def id(self) -> FileID:\n _args: list[Arg] = []\n _ctx = self._select(\"id\", _args)\n return _ctx.execute_sync(FileID)", "def get_upload(arn=None):\n pass", "def get_file_guid(file):\r\n\r\n try:\r\n return str(file.url_guid).replace('-', '')\r\n except FileUpload.DoesNotExist:\r\n return ''", "def create_file_upload(self, upload_filename,\n pub_user, module_supplier_id):\n up_response = self.do_request(\n self.base_url +\n \"/oasis/createFileUpload/\" +\n pub_user + \"/\" +\n upload_filename + \"/\" +\n str(module_supplier_id) + \"/\"\n )\n logger.debug(\"createFileUpload respons: {resp}\".format(\n resp=up_response.content\n ))\n up_id = int(json.loads(up_response.content)['taskId'])\n return up_id", "def get_picture_id(path):\n\t\tif path is None:\n\t\t\treturn\n\t\tcon = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')\n\t\twith con:\n\t\t\tquery = \"SELECT id from fileuploader_picture WHERE file=%s\" % (path)\n\t\t\tcur = con.cursor()\n\t\t\tcur.execute(query)\n\t\t\tdata = cur.fetchall()\n\t\t\tprint \"len(data)\"\n\t\t\tprint data\n\t\t\tif len(data) > 0:\n\t\t\t\treturn data[0]\n\t\t\treturn None", "def get_upload_key(self):\n\n if not hasattr(self, '_upload_key'):\n self._upload_key = self.get_storage().bucket.get_key(\n self.cleaned_data['key_name'])\n return self._upload_key", "def get_file_id(username, filename) -> str:\r\n unique_name = username + \"/#/\" + filename\r\n return hashlib.sha224(unique_name.encode(\"utf-8\")).hexdigest()", "def get_image_id(filename):\n del filename\n global GLOBAL_IMG_ID\n GLOBAL_IMG_ID += 1\n return GLOBAL_IMG_ID", "def image_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"image_id\")", "def image_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"image_id\")", "def image_id(cls):\n return str(uuid.uuid4())", "def get_id(self):\n #return self.__str__().__hash__()\n object_type = self['object_type']\n shortname = self.get_description()\n object_name = self['name']\n filename = self['filename']\n id = \"%s-%s-%s-%s\" % ( object_type, shortname, object_name, filename)\n import md5\n return md5.new(id).hexdigest()\n return id", "def image_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"image_id\")", "def image_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"image_id\")", "def image_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_id\")", "def image_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_id\")", "def get_file_name(self):\n return self.upload.name[6:]", "def get_imageId_from_fileName(filename):\n filename = os.path.splitext(filename)[0]\n if filename.isdigit():\n return int(filename)\n return id_iter", "def getID(self):\n return str(self._storage_id)", "def upload(self, filepath):\n if self.ver is not None and LooseVersion(str(self.ver)) < LooseVersion('1.4.0'):\n raise VersionMismatchError('File upload')\n\n try:\n with open(filepath, 'rb') as stream:\n url = '{0}{1}'.format(self.url, '/uploads.json')\n response = self.request('post', url, data=stream, headers={'Content-Type': 'application/octet-stream'})\n except IOError:\n raise NoFileError()\n\n return response['upload']['token']", "def get_file_id(file_name, model, workspace, header, user):\n uri = (\"https://api.anaplan.com/1/3/workspaces/{}/models/{}/\"\n \"files/\").format(workspace, model)\n response = requests.get(uri, headers = header)\n response_json = json.loads(response.text.encode(\"utf-8\"))\n for file in response_json:\n if file[u\"name\"] == unicode(file_name):\n return file[u\"id\"]", "def get_upload_status(self, upload_id: str, token: str) -> Upload:\n data, _, _ = self.json('get', f'/{upload_id}', token)\n return self._parse_upload_status(data)", "def image_id(self):\n return self._image_id", "def get_imageId_from_fackmask(filename):\n filename = os.path.splitext(filename)[0]\n regex = re.compile(r'\\d+')\n iid = regex.search(filename).group(0)\n image_id = int(iid)\n if filename.isdigit():\n return int(filename)\n return image_id", "def get_file_id(self, lfn):\n\n raise NotImplementedError('get_file_id')", "def upload_file_to_drive(self, filename):\n file_metadata = {\n 'name': filename,\n 'mimeType': 'application/vnd.google-apps.document'\n }\n\n media = MediaFileUpload(filename)\n file = self.drive_service.files().create(body=file_metadata,\n media_body=media,fields='id').execute()\n return file.get('id')", "def upload(self) :\n\t\ttry :\n\t\t\treturn self._upload\n\t\texcept Exception as e:\n\t\t\traise e", "def _job_id(files: list, extra: str):\n files_str = \"\"\n for file in files:\n files_str += file\n job_id = hashlib.sha1(files_str.encode() + extra.encode()).hexdigest()\n return job_id", "def get_file(self) -> int:\r\n return self.file", "def get_id(path):\n fid, ext, _ = path.strip().split('/')[-1].partition('.crf')\n if not fid or ext != '.crf':\n filetype = 'Co-Reference Input file'\n error = 'has incorrect file type'\n raise FilenameException(\"Error: %s %s\" % (filetype, error))\n return fid", "def file_system_id(self) -> str:\n return pulumi.get(self, \"file_system_id\")", "def upload():\n return handle_upload(app, request)", "def get_job_id(self, filename):\n return Jobs.get_job_id(filename)", "def get_imageId_from_fileName(filename, id_iter):\n filename = os.path.splitext(filename)[0]\n if filename.isdigit():\n return int(filename)\n return id_iter", "def fileid(self):\n if self._fileid is None:\n rv = M.mexec('''set s1=$order(^DIC(\"B\",s0,0))''', str(self.filename[:30]), M.INOUT(\"\"))[0]\n if rv != '':\n self._fileid = rv\n return self._fileid", "def getID(filePath):\r\n\r\n fileID = rmn.fstopenall(filePath,rmn.FST_RO)\r\n return fileID", "def media_entry_id(self):\n return self.getattr('media_entry_id')", "def upload_location(instance, filename):\n new_id = randint(0, 1000)\n return \"%s/%s\" % (new_id, filename)", "def get_upload_folder_id(user_id):\n folder_id = None\n folder_dict = get_user_folders_dict(user_id)\n if UPLOAD_FOLDER_FULL_PATH in folder_dict:\n folder_id = folder_dict[UPLOAD_FOLDER_FULL_PATH]\n else:\n folder_json = canvas_api.create_folder(user_id, UPLOAD_FOLDER_PARENT_PATH, UPLOAD_FOLDER_NAME)\n folder_id = folder_json['id']\n return folder_id", "def anon_upload(infile: str):\n if exists(infile):\n URL = upload(infile)\n return URL\n return 5", "def get_object_id(path):\n return str.split(os.path.basename(path), \"_\")[1][0]", "def get_id(self, name, tenant=None):\n queue = self._get(name, tenant, fields=[\"_id\"])\n return queue.get(\"_id\")", "def upload_file(self, file_name, file_path, target_folder_id):\n file_metadata = {\n 'name': file_name,\n 'parents': [target_folder_id]\n }\n media = MediaFileUpload(file_path + file_name, resumable=True)\n file = self.service.files().create(body=file_metadata,\n media_body=media,\n fields='id').execute()\n print('File ID: %s' % file.get('id'))\n return file.get('id')", "def _get_id(self):\n return self.id", "def get_id(self):\n\n\t\treturn self.__id", "def image_upload_path(instance, filename):\n return \"adverts/{}/{}\".format(instance.uuid, filename)", "def __get_image_id(self):\n return self.__get_multi_images_ids(1)", "def path(self):\n return self.get_upload_set().path(self.filename)", "def get_id(self, url):\n return url.split('/')[-1]", "def get_itemid(filename):\n return os.path.splitext(os.path.basename(filename))[0]", "def id(self):\n return self.metadata[\"id\"]", "def get_id(self):\n return self.__id", "def get_id(self):\n return self.__id", "def image_upload_filename(instance, filename):\n prefix = 'photos'\n uhash = abs(hash(u'%s%s' % (datetime.now(), filename)))\n user = instance.album.user.username\n return u'%s/%s/%s_%s' % (prefix, user, uhash, filename)", "def id(self):\n return self.raw_resource[\"id\"]", "def upload_file(client, folder_id, file_name):\n\n new_file = client.folder(folder_id).upload(file_name)\n print(f\"File {new_file.name} uploaded to Box with file ID {new_file.id}\")\n return new_file.id", "def get_upload_path(instance, filename):\n \n return \"user_{username}/{goal}/{file}\".format(id=instance.user.username, \n goal=instance.name.slug, file=filename)", "def upload_to(instance, filename):\n return upload_image_path(filename, 'products')", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def id(self):\n if settings.env_root:\n retpath = self.filename[len(settings.cases_dir):]\\\n .lstrip(os.path.sep)\n base = os.path.splitext(retpath)[0]\n else:\n base = os.path.splitext(os.path.basename(self.filename))[0]\n return base.replace(os.path.sep, '.')", "def id(self):\n if self.cloudserver:\n return self.cloudserver.id\n else:\n return None", "def media_id(self):\n try:\n return Html.toId(self.content)\n except:\n Mp3Error(1)", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_multipart_upload(metadata):\n multipart_upload = None\n metadata_mp_id = None\n filename = metadata.path\n if filename.startswith(u\"/\"):\n filename = filename[1:]\n plug.logger.debug(u\"Getting multipart upload of {}\", filename)\n # Retrieve the stored multipart upload ID\n try:\n metadata_mp_id = metadata.extra['mp_id']\n except KeyError: # No multipart upload ID\n # Raise now is faster (doesn't go through all the MP uploads)\n raise DriverError(\"Unable to retrieve multipart upload ID\")\n if metadata_mp_id not in cache:\n # Try to only request multipart uploads of this file\n for mp in S3Conn.list_multipart_uploads(prefix=filename):\n # Go through all the multipart uploads\n # to find the one of this transfer\n if mp.uploadId == metadata_mp_id:\n multipart_upload = mp\n add_to_cache(mp)\n break\n else:\n multipart_upload = cache[metadata_mp_id]\n # At this point it shouldn't be None in any case\n if multipart_upload is None:\n raise DriverError(\"Cannot find upload for file '{}'\"\n .format(filename))\n plug.logger.debug(u\"Found multipart upload of {} - ID {}\",\n filename, multipart_upload.uploadId)\n return multipart_upload", "def id(self):\n return self.raw_resource.uuid", "def getIdFilename(self):\n\n # This checks seem hacky. Ideally checking against the credetnial type\n # to get the filename is right thing to do\n\n cred_file = None\n if self.filename:\n cred_file = self.filename\n elif self.key_fname:\n cred_file = self.key_fname\n elif self.pilot_fname:\n cred_file = self.pilot_fname\n return cred_file", "def storage_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"storage_id\")", "def storage_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"storage_id\")", "def base_image_id(self):\n return self._base_image_id", "def getID(self):\n return self.__id", "def get_id(self):\n return self.uid", "def unique_id(self):\n if self._uuid != '':\n return \"linkplay_media_\" + self._uuid", "def get_id(self):\n \"\"\"Requires use of Python 3\"\"\"\n return str(self.id)", "def get_upload_ticket(self):\n r = HTTPClient().fetch(self.config['apiroot'] + self.ticket_path, method=\"POST\",\n body=urlencode({'type': 'streaming'}), headers = self.standard_headers,\n validate_cert=not self.config['dev'])\n response = json.loads(r.body)\n return response['ticket_id'], response['upload_link_secure'], response['complete_uri']" ]
[ "0.6757901", "0.67014873", "0.6528737", "0.6503102", "0.644656", "0.6368345", "0.6355548", "0.62546957", "0.6124786", "0.6089393", "0.6034663", "0.6034663", "0.5999296", "0.5985921", "0.59466124", "0.59466124", "0.5939695", "0.5939695", "0.5924135", "0.5912577", "0.5890405", "0.58533216", "0.58111763", "0.5751019", "0.5743787", "0.5738529", "0.5735213", "0.57226956", "0.5721274", "0.57200915", "0.571993", "0.57170254", "0.57123363", "0.5708746", "0.5708012", "0.5693518", "0.5692572", "0.5672993", "0.5672308", "0.5666629", "0.56583023", "0.5657555", "0.5651072", "0.5650815", "0.56425846", "0.56425637", "0.56360465", "0.56327456", "0.56278706", "0.5591024", "0.5582301", "0.55678505", "0.55571175", "0.5549623", "0.5549623", "0.55382425", "0.55315745", "0.55281174", "0.55177873", "0.5513956", "0.5513299", "0.5513299", "0.5513299", "0.5513299", "0.55119413", "0.5499681", "0.5483037", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54829437", "0.54804826", "0.54804826", "0.54804826", "0.54804826", "0.54804826", "0.54804826", "0.547121", "0.5470288", "0.546679", "0.54645026", "0.54645026", "0.54582256", "0.5456607", "0.5451718", "0.5450949", "0.5450708", "0.54496205" ]
0.0
-1
Creates a handle to the Ceph Cluster.
def connect(ceph_config_file, timeout = CEPH_TIMEOUT): handle = rados.Rados(conffile = ceph_config_file) LOGGER.info("librados version: " + str(handle.version())) LOGGER.info("Attempting to connect to: " + str(handle.conf_get('mon initial members'))) handle.connect() #timeout shoudl be specified LOGGER.info("Cluster ID" + handle.get_fsid()) return handle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ceph_info(handle, ceph_config, timeout):\n cluster = dict()\n\n cluster['status'] = ceph_mon_command(handle,\n 'status', timeout)\n cluster['version'] = shell_command('ceph -v') + b'\\n'\n\n # ceph versions command was introduced in mimic\n version = cluster['version']\n version = str(version.decode('utf-8')).split(' ')[2].split(\".\")[0]\n\n if int(version) >= 13:\n cluster['versions'] = shell_command('ceph versions') + b'\\n'\n\n\n fsid = handle.get_fsid() + '\\n'\n cluster['fsid'] = str.encode(fsid)\n\n with open(ceph_config, 'r') as f:\n ceph_conf = f.read()\n\n cephconf = str(ceph_conf)\n cluster['ceph_conf'] = str.encode(cephconf)\n\n return cluster", "def create_cluster():\n config = get_kube_config()\n command = CLUSTER_CREATE_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n machine_type=config['machine_type'],\n disk_size=config['disk_size'],\n nodes=config['nodes'],\n zone=config['zone'])\n print \"Creating cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))\n command = AUTH_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n zone=config['zone'])\n print \"Authenticating with cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))", "def createcluster(self):\n for hostitem in OTHER_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n alive = str(REMAINING_NODES)[1:-1]\n print \"{}\\nThe following nodes are alive in cluster:{}\\n {}\".format(\n RED, WHITE, alive)\n print \"\\n\\nTo boostrap a new cluster you need to switch them off\\n\"\n os.sys.exit(1)\n else:\n if self.mode == \"new\" and not self.force:\n ask('\\nThis operation will destroy the local data')\n clean_dir(self.datadir)\n initialize_mysql(self.datadir)\n bootstrap_mysql(self.mode)\n if self.mode == \"new\":\n create_monitor_table()\n ALL_NODES.append(\"localhost\")\n for creditem in CREDENTIALS:\n create_users(creditem)\n print \"\"\n drop_anonymous()", "def _setup_test_cluster(self, return_cluster, name, create_args):\n stack_name = '{0}_stack'.format(name)\n templ, self.stack = self._setup_test_stack(stack_name, TEMPLATE)\n cluster_instance = cbd.CloudBigData('%s_name' % name,\n templ.resource_definitions(\n self.stack)['cbd_cluster'],\n self.stack)\n self._stubout_create(return_cluster)\n return cluster_instance", "def add(cls, config: Dict) -> None:\n id_ = config[\"id\"]\n client_file = f\"/etc/ceph/ceph.{id_}.keyring\"\n\n # Create client\n cmd = [\"ceph\", \"auth\", \"get-or-create\", f\"{id_}\"]\n [cmd.append(f\"{k} '{v}'\") for k, v in config.get(\"caps\", {}).items()]\n cnt_key, err = cls.shell(args=cmd)\n\n def put_file(client, file_name, content, file_mode, sudo=True):\n file_ = client.remote_file(sudo=sudo, file_name=file_name, file_mode=file_mode)\n file_.write(content)\n file_.flush()\n file_.close()\n\n nodes_ = config.get(\"nodes\", config.get(\"node\"))\n default_version = str(cls.cluster.rhcs_version.version[0])\n use_cdn = cls.cluster.use_cdn\n if nodes_:\n if not isinstance(nodes_, list):\n nodes_ = [{nodes_: {}}]\n\n def setup(host):\n name = list(host.keys()).pop()\n _build = list(host.values()).pop()\n _node = get_node_by_id(cls.cluster, name)\n if _build.get(\"release\"):\n rhcs_version = _build[\"release\"]\n if not isinstance(rhcs_version, str):\n rhcs_version = str(rhcs_version)\n elif use_cdn:\n rhcs_version = default_version\n else:\n rhcs_version = \"default\"\n\n rhel_version = _node.distro_info[\"VERSION_ID\"][0]\n log.debug(\n f\"RHCS version : {rhcs_version} on host {_node.hostname}\\n\"\n f\"with RHEL major version as : {rhel_version}\"\n )\n enable_cmd = \"subscription-manager repos --enable=\"\n disable_all = [\n r\"subscription-manager repos --disable=*\",\n r\"yum-config-manager --disable \\*\",\n ]\n cmd = 'subscription-manager repos --list-enabled | grep -i \"Repo ID\"'\n cdn_ceph_repo = {\n \"7\": {\"4\": [\"rhel-7-server-rhceph-4-tools-rpms\"]},\n \"8\": {\n \"4\": [\"rhceph-4-tools-for-rhel-8-x86_64-rpms\"],\n \"5\": [\"rhceph-5-tools-for-rhel-8-x86_64-rpms\"],\n },\n \"9\": {\n \"5\": [\"rhceph-5-tools-for-rhel-9-x86_64-rpms\"],\n \"6\": [\"rhceph-6-tools-for-rhel-9-x86_64-rpms\"],\n },\n }\n\n rhel_repos = {\n \"7\": [\"rhel-7-server-rpms\", \"rhel-7-server-extras-rpms\"],\n \"8\": [\n \"rhel-8-for-x86_64-baseos-rpms\",\n \"rhel-8-for-x86_64-appstream-rpms\",\n ],\n \"9\": [\n \"rhel-9-for-x86_64-appstream-rpms\",\n \"rhel-9-for-x86_64-baseos-rpms\",\n ],\n }\n\n # Collecting already enabled repos\n out, _ = _node.exec_command(sudo=True, cmd=cmd, check_ec=False)\n enabled_repos = list()\n if out:\n out = out.strip().split(\"\\n\")\n for entry in out:\n repo = entry.split(\":\")[-1].strip()\n enabled_repos.append(repo)\n log.debug(f\"Enabled repos on the system are : {enabled_repos}\")\n\n if rhcs_version != \"default\":\n # Disabling all the repos and enabling the ones we need to install the ceph client\n for cmd in disable_all:\n _node.exec_command(sudo=True, cmd=cmd, timeout=1200)\n\n # Enabling the required CDN repos\n for repos in rhel_repos[rhel_version]:\n _node.exec_command(sudo=True, cmd=f\"{enable_cmd}{repos}\")\n\n for repos in cdn_ceph_repo[rhel_version][rhcs_version]:\n _node.exec_command(sudo=True, cmd=f\"{enable_cmd}{repos}\")\n\n # Clearing the release preference set and cleaning all yum repos\n # Observing selinux package dependency issues for ceph-base\n wa_cmds = [\"subscription-manager release --unset\", \"yum clean all\"]\n for wa_cmd in wa_cmds:\n _node.exec_command(sudo=True, cmd=wa_cmd)\n\n # Copy the keyring to client\n _node.exec_command(sudo=True, cmd=\"mkdir -p /etc/ceph\")\n put_file(_node, client_file, cnt_key, \"w\")\n\n if config.get(\"copy_ceph_conf\", True):\n # Get minimal ceph.conf\n ceph_conf, err = cls.shell(\n args=[\"ceph\", \"config\", \"generate-minimal-conf\"]\n )\n # Copy the ceph.conf to client\n put_file(_node, \"/etc/ceph/ceph.conf\", ceph_conf, \"w\")\n\n # Copy admin keyring to client node\n if config.get(\"copy_admin_keyring\"):\n admin_keyring, _ = cls.shell(\n args=[\"ceph\", \"auth\", \"get\", \"client.admin\"]\n )\n put_file(\n _node, \"/etc/ceph/ceph.client.admin.keyring\", admin_keyring, \"w\"\n )\n\n # Install ceph-common\n if config.get(\"install_packages\"):\n for pkg in config.get(\"install_packages\"):\n _node.exec_command(\n cmd=f\"yum install -y --nogpgcheck {pkg}\", sudo=True\n )\n if config.get(\"git_clone\", False):\n log.info(\"perform cloning operation\")\n role = config.get(\"git_node_role\", \"client\")\n ceph_object = cls.cluster.get_ceph_object(role)\n node_value = ceph_object.node\n utils.perform_env_setup(config, node_value, cls.cluster)\n\n out, _ = _node.exec_command(cmd=\"ls -ltrh /etc/ceph/\", sudo=True)\n log.info(out)\n\n # Hold local copy of the client key-ring in the installer node\n if config.get(\"store-keyring\"):\n put_file(cls.installer, client_file, cnt_key, \"w\")\n\n with parallel() as p:\n for node in nodes_:\n if not isinstance(node, dict):\n node = {node: {}}\n p.spawn(\n setup,\n node,\n )\n time.sleep(20)", "async def open(cls, loop, *, aliases=None, configfile=None, **config):\n cluster = cls(loop, aliases=aliases, **config)\n if configfile:\n cluster.config_from_file(configfile)\n await cluster.establish_hosts()\n return cluster", "def __create(self):\n pass\n\n # create at cluster-provider\n # get kubeconfig\n # wait for api\n # ^ could be async and seperate steps?", "def _create_cluster(self, server_instance):\n return Cluster([server_instance])", "def run(ceph_cluster, **kw):\n config = kw[\"config\"]\n\n build = config.get(\"build\", config.get(\"rhbuild\"))\n ceph_cluster.rhcs_version = build\n\n # Manage Ceph using ceph-admin orchestration\n command = config.pop(\"command\")\n log.info(\"Executing client %s\" % command)\n orch = Orch(cluster=ceph_cluster, **config)\n method = MAP_[command]\n method(orch, config)\n return 0", "def create_database():\n # This should make a connection to a Cassandra instance your local machine \n # (127.0.0.1)\n\n from cassandra.cluster import Cluster\n cluster = Cluster()\n\n # To establish connection and begin executing queries, need a session\n session = cluster.connect()\n \n #Create a Keyspace \n try:\n session.execute(\"\"\"\n CREATE KEYSPACE IF NOT EXISTS cassandra_project \n WITH REPLICATION = \n { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }\"\"\"\n )\n\n except Exception as e:\n print(e)\n \n # Set the Keyspace\n try:\n session.set_keyspace(\"cassandra_project\")\n except Exception as e:\n print(e)\n \n return session, cluster", "def cluster_connected(hacluster):\n with charms_openstack.charm.provide_charm_instance() as placement_charm:\n placement_charm.configure_ha_resources(hacluster)\n placement_charm.assess_status()", "def initialize_cluster(cluster):\n logger.info('Creating a new cluster for %s...', cluster)\n\n configuration = ClusterConfiguration(version=__version__)\n ztransaction = cluster.zookeeper.transaction()\n ztransaction.create(cluster.path, BinaryCodec(ClusterConfiguration).encode(configuration))\n ztransaction.create(cluster.get_set_path())\n commit(ztransaction)", "def create(self):\n print(\"+ Creating cluster: {}. This may take a few minutes ...\".format(self.name_hyphenated))\n if self.num_gpus == 0:\n out = util.syscall(\"gcloud container clusters create {} -m {} --disk-size {} --num-nodes {} {}\".\n format(self.name_hyphenated, self.machine_type, self.disk_size, self.num_nodes,\n \"--zone \" + self.location if self.location else \"\"), return_outputs=\"as_str\")\n else:\n out = util.syscall(\"gcloud container clusters create {} --enable-cloud-logging --enable-cloud-monitoring \"\n \"--accelerator type={},count={} {} -m {} --disk-size {} --enable-kubernetes-alpha \"\n \"--image-type UBUNTU --num-nodes {} --cluster-version 1.9.2-gke.1 --quiet\".\n format(self.name_hyphenated, self.gpu_type, self.gpus_per_node,\n \"--zone \"+self.location if self.location else \"\", self.machine_type, self.disk_size,\n self.num_nodes), return_outputs=\"as_str\")\n # check output of cluster generating code\n if re.search(r'error', out, re.IGNORECASE):\n raise util.TFCliError(out)\n else:\n print(\"+ Successfully created cluster.\")\n self.instances, self.primary_name = util.get_compute_instance_specs(self.name_hyphenated)\n self.started = True\n\n # install NVIDIA drivers on machines per local kubectl\n if self.num_gpus > 0:\n print(\"+ Installing NVIDIA GPU drivers and k8s device plugins ...\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/GoogleCloudPlatform/\"\n \"container-engine-accelerators/k8s-1.9/daemonset.yaml\")\n util.syscall(\"kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n\n print(\"+ Done. Cluster: {} created.\".format(self.name_hyphenated))", "def createServer():\n cd('/')\n srv = cmo.createServer(managedServername) \n srv.setCluster(getMBean('/Clusters/%s' % cluster_name))\n srv.setListenPort(managedServerPort)\n return srv", "def __cassandra_connect(self):\n self.cluster = Cluster()\n self.session = self.cluster.connect('demo')", "def create_cluster(module, switch_list):\n global CHANGED_FLAG\n output = ''\n new_cluster = False\n\n node1 = switch_list[0]\n node2 = switch_list[1]\n\n name = node1 + '-' + node2 + '-cluster'\n\n cli = pn_cli(module)\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = run_cli(module, cli)\n\n if cluster_list is not None:\n cluster_list = cluster_list.split()\n if name not in cluster_list:\n new_cluster = True\n\n if new_cluster or cluster_list is None:\n cli = pn_cli(module)\n cli += ' switch %s cluster-create name %s ' % (node1, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n output += '%s: Created cluster %s\\n' % (node1, name)\n\n return output", "def _connect(self):\n cluster = Cluster('http://{}:{}'.format(self.host, self.port))\n authenticator = PasswordAuthenticator('Administrator', self.password)\n cluster.authenticate(authenticator)\n self.client = cluster.open_bucket(self.bucket)", "def create_cluster(self, cluster: dict) -> None:\n if self.master_url:\n return\n try:\n self._cluster_client.create_cluster(\n request={\n 'project_id': self.cluster_metadata.project_id,\n 'region': self.cluster_metadata.region,\n 'cluster': cluster\n })\n _LOGGER.info(\n 'Cluster created successfully: %s',\n self.cluster_metadata.cluster_name)\n self.master_url = self.get_master_url(self.cluster_metadata)\n except Exception as e:\n if e.code == 409:\n _LOGGER.info(\n 'Cluster %s already exists. Continuing...',\n ie.current_env().clusters.default_cluster_name)\n elif e.code == 403:\n _LOGGER.error(\n 'Due to insufficient project permissions, '\n 'unable to create cluster: %s',\n self.cluster_metadata.cluster_name)\n raise ValueError(\n 'You cannot create a cluster in project: {}'.format(\n self.cluster_metadata.project_id))\n elif e.code == 501:\n _LOGGER.error(\n 'Invalid region provided: %s', self.cluster_metadata.region)\n raise ValueError(\n 'Region {} does not exist!'.format(self.cluster_metadata.region))\n else:\n _LOGGER.error(\n 'Unable to create cluster: %s', self.cluster_metadata.cluster_name)\n raise e", "def startCluster():\n # attempt to create a cluster\n print(\"Creating a Redshift cluster...\")\n try:\n redshift.create_cluster(\n\n # hardware parameters\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n # database access configuration\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n\n # accesses\n IamRoles=[iam.get_role(RoleName=DWH_IAM_ROLE_NAME)[\"Role\"][\"Arn\"]]\n )\n except Exception as e:\n print(e)\n return\n\n # wait for cluster to spin up\n print(\"Waiting for cluster to be available...\")\n while redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )[\"Clusters\"][0][\"ClusterStatus\"] != \"available\":\n time.sleep(30)\n print(\"\\tChecking status again...\")", "def create_cluster(self, name, cluster_type, params, ssh_key, *args, **kwargs):\n raise NotImplementedError", "def create_flink_cluster(self) -> None:\n cluster = {\n 'project_id': self.cluster_metadata.project_id,\n 'cluster_name': self.cluster_metadata.cluster_name,\n 'config': {\n 'software_config': {\n 'optional_components': ['DOCKER', 'FLINK']\n },\n 'gce_cluster_config': {\n 'metadata': {\n 'flink-start-yarn-session': 'true'\n },\n 'service_account_scopes': [\n 'https://www.googleapis.com/auth/cloud-platform'\n ]\n },\n 'endpoint_config': {\n 'enable_http_port_access': True\n }\n }\n }\n self.create_cluster(cluster)", "def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)", "def run(ceph_cluster, **kw):\n log.info(run.__doc__)\n config = kw[\"config\"]\n cephadm = CephAdmin(cluster=ceph_cluster, **config)\n rados_obj = RadosOrchestrator(node=cephadm)\n mon_obj = MonConfigMethods(rados_obj=rados_obj)\n checksum = \"crc32c\"\n\n def restart_osd_service():\n osd_services = rados_obj.list_orch_services(service_type=\"osd\")\n for osd_service in osd_services:\n cephadm.shell(args=[f\"ceph orch restart {osd_service}\"])\n time.sleep(30)\n\n def create_pool_write_iops(param, pool_type):\n try:\n pool_name = f\"{pool_type}_pool_{param}\"\n assert (\n rados_obj.create_pool(pool_name=pool_name)\n if \"repli\" in pool_type\n else rados_obj.create_erasure_pool(\n name=pool_name, **{\"pool_name\": pool_name}\n )\n )\n if param == checksum:\n # set checksum value for the pool\n rados_obj.set_pool_property(\n pool=pool_name, props=\"csum_type\", value=param\n )\n # verify checksum value for the pool\n assert (\n param\n == rados_obj.get_pool_property(pool=pool_name, props=\"csum_type\")[\n \"csum_type\"\n ]\n )\n # rados bench will perform IOPs and also verify the num of objs written\n assert rados_obj.bench_write(\n pool_name=pool_name, **{\"max_objs\": 500, \"verify_stats\": False}\n )\n except Exception:\n raise\n finally:\n assert rados_obj.detete_pool(pool=pool_name)\n\n def modify_cache_size(factor):\n cache_value = int(1073741824 * factor)\n cache_cfg = {\n \"section\": \"osd\",\n \"name\": \"bluestore_cache_size_hdd\",\n \"value\": cache_value,\n }\n assert mon_obj.set_config(**cache_cfg)\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_hdd\")\n log.info(\n f\"bluestore_cache_size_hdd modified value - {out} | Expected {cache_value}\"\n )\n assert int(out.strip(\"\\n\")) == cache_value\n\n cache_value = int(3221225472 * factor)\n cache_cfg = {\n \"section\": \"osd\",\n \"name\": \"bluestore_cache_size_ssd\",\n \"value\": cache_value,\n }\n assert mon_obj.set_config(**cache_cfg)\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_ssd\")\n log.info(\n f\"bluestore_cache_size_ssd modified value - {out} | Expected {cache_value}\"\n )\n assert int(out.strip(\"\\n\")) == cache_value\n\n if config.get(\"checksums\"):\n doc = (\n \"\\n #CEPH-83571646\"\n \"\\n\\t Apply all the applicable different checksum algorithms on pools backed by bluestore\"\n \"\\n\\t\\t Valid algos: none, crc32c, crc32c_16, crc32c_8, xxhash32, xxhash64\"\n \"\\n\\t 1. Create individual replicated pools for each checksum\"\n \"\\n\\t 2. Verify the default checksum algorithm is crc32c\"\n \"\\n\\t 3. Set different checksum algorithm as global and for each pool\"\n \"\\n\\t 4. Verify the checksum algo being set correctly\"\n \"\\n\\t 5. Write data to each pool using rados bench\"\n \"\\n\\t 6. cleanup - Remove all the pools created\"\n )\n log.info(doc)\n log.info(\"Running test case to verify BlueStore checksum algorithms\")\n checksum_list = config.get(\"checksums\")\n\n try:\n # verify default checksum value\n out, _ = cephadm.shell([\"ceph config get osd bluestore_csum_type\"])\n log.info(f\"BlueStore OSD default checksum: {out} | Expected: crc32c\")\n assert \"crc32c\" in out\n\n for checksum in checksum_list:\n # create pools with given config when OSD csum_type is default crc32c\n create_pool_write_iops(\n param=checksum, pool_type=\"replicated\"\n ) if \"crc\" in checksum else create_pool_write_iops(\n param=checksum, pool_type=\"ec\"\n )\n\n for checksum in checksum_list:\n # set the global checksum value\n cfg = {\n \"section\": \"osd\",\n \"name\": \"bluestore_csum_type\",\n \"value\": checksum,\n }\n assert mon_obj.set_config(**cfg)\n\n # verify the newly set global checksum value\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_csum_type\")\n assert checksum in out\n log.info(f\"global checksum set verified - {out}\")\n\n # create pools with given config when OSD csum_type is varied\n create_pool_write_iops(\n param=checksum, pool_type=\"replicated\"\n ) if \"crc\" in checksum else create_pool_write_iops(\n param=checksum, pool_type=\"ec\"\n )\n\n except Exception as E:\n log.error(f\"Verification failed with exception: {E.__doc__}\")\n log.error(E)\n log.exception(E)\n return 1\n finally:\n # reset global checksum config\n assert mon_obj.remove_config(\n **{\"section\": \"osd\", \"name\": \"bluestore_csum_type\"}\n )\n\n # restart osd services\n restart_osd_service()\n wait_for_clean_pg_sets(rados_obj, timeout=300, _sleep=10)\n\n log.info(\"BlueStore Checksum algorithm verification completed.\")\n return 0\n\n if config.get(\"bluestore_cache\"):\n doc = (\n \"\\n #CEPH-83571675\"\n \"\\n\\t Verify BlueStore cache default values.\"\n \"\\n\\t Tune cache parameters and perform IOPS\"\n \"\\n\\t 1. Verify the default value for - bluestore_cache_size(0)\"\n \" | bluestore_cache_size_hdd (1GB) | bluestore_cache_size_ssd (3GB)\"\n \"\\n\\t 2. Modify the value of bluestore_cache_size_ssd and bluestore_cache_size_hdd\"\n \"\\n\\t 3. Verify the values being reflected in ceph config\"\n \"\\n\\t 4. Create replicated and ec pool and perform IOPS\"\n \"\\n\\t 5. cleanup - Remove all the pools created and reset configs modified\"\n )\n log.info(doc)\n log.info(\"Running test case to verify BlueStore Cache size tuning\")\n\n try:\n # verify default value for bluestore cache\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size\")\n log.info(f\"bluestore_cache_size default value - {out} | Expected 0\")\n assert int(out.strip(\"\\n\")) == 0\n\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_hdd\")\n log.info(\n f\"bluestore_cache_size_hdd default value - {out} | Expected 1073741824\"\n )\n assert int(out.strip(\"\\n\")) == 1073741824\n\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_ssd\")\n log.info(\n f\"bluestore_cache_size_ssd default value - {out} | Expected 3221225472\"\n )\n assert int(out.strip(\"\\n\")) == 3221225472\n\n # modify ssd and hdd cache (increase)\n modify_cache_size(factor=1.5)\n\n # restart osd services\n restart_osd_service()\n\n # perform iops\n create_pool_write_iops(param=\"cache_inc\", pool_type=\"replicated\")\n create_pool_write_iops(param=\"cache_inc\", pool_type=\"ec\")\n\n # modify ssd and hdd cache (decrease)\n modify_cache_size(factor=0.7)\n\n # restart osd services\n restart_osd_service()\n\n # perform iops\n create_pool_write_iops(param=\"cache_dec\", pool_type=\"replicated\")\n create_pool_write_iops(param=\"cache_dec\", pool_type=\"ec\")\n\n except Exception as E:\n log.error(f\"Verification failed with exception: {E.__doc__}\")\n log.error(E)\n log.exception(E)\n return 1\n finally:\n # reset modified cache configs\n mon_obj.remove_config(\n **{\"section\": \"osd\", \"name\": \"bluestore_cache_size_hdd\"}\n )\n mon_obj.remove_config(\n **{\"section\": \"osd\", \"name\": \"bluestore_cache_size_ssd\"}\n )\n\n # restart osd services\n restart_osd_service()\n wait_for_clean_pg_sets(rados_obj, timeout=300, _sleep=10)\n\n log.info(\"BlueStore cache size tuning verification completed.\")\n return 0", "def connect(connstr, # type: str\n *options, # type: ClusterOptions\n **kwargs, # type: Dict[str, Any]\n ) -> Cluster:\n cluster = Cluster(connstr, *options, **kwargs)\n return cluster", "def do_create(self):\n cluster_id = self.entity.cluster_id\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Check cluster size constraint if target cluster is specified\n cluster = cm.Cluster.load(self.context, cluster_id)\n desired = no.Node.count_by_cluster(self.context, cluster_id)\n result = su.check_size_params(cluster, desired, None, None, True)\n if result:\n # cannot place node into the cluster\n no.Node.update(self.context, self.entity.id,\n {'cluster_id': '', 'status': consts.NS_ERROR})\n return self.RES_ERROR, result\n\n res, reason = self.entity.do_create(self.context)\n\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Update cluster's desired_capacity and re-evaluate its status no\n # matter the creation is a success or not because the node object\n # is already treated as member of the cluster and the node\n # creation may have changed the cluster's status\n cluster.eval_status(self.context, consts.NODE_CREATE,\n desired_capacity=desired)\n if res:\n return self.RES_OK, 'Node created successfully.'\n else:\n return self.RES_ERROR, reason", "def run(ceph_cluster, **kw):\n try:\n log.info(f\"MetaData Information {log.metadata} in {__name__}\")\n fs_util = FsUtils(ceph_cluster)\n\n config = kw.get(\"config\")\n build = config.get(\"build\", config.get(\"rhbuild\"))\n clients = ceph_cluster.get_ceph_objects(\"client\")\n clients[0].upload_file(\n \"tests/cephfs/clients/file_lock_utitlity.py\",\n \"/home/cephuser/file_lock_utility.py\",\n sudo=True,\n )\n clients[1].upload_file(\n \"tests/cephfs/clients/file_lock_utitlity.py\",\n \"/home/cephuser/file_lock_utility.py\",\n sudo=True,\n )\n version, rc = clients[0].exec_command(\n sudo=True, cmd=\"ceph version --format json\"\n )\n fs_util.prepare_clients([clients[0]], build)\n fs_util.auth_list([clients[0], clients[1]])\n if not build.startswith((\"3\", \"4\", \"5\")):\n if not fs_util.validate_fs_info(clients[0], \"cephfs\"):\n log.error(\"FS info Validation failed\")\n return 1\n mounting_dir = \"\".join(\n random.choice(string.ascii_lowercase + string.digits)\n for _ in list(range(10))\n )\n fuse_mounting_dir = f\"/mnt/cephfs_fuse{mounting_dir}/\"\n fs_util.fuse_mount([clients[0], clients[1]], fuse_mounting_dir)\n\n kernel_mounting_dir = f\"/mnt/cephfs_kernel{mounting_dir}/\"\n mon_node_ips = fs_util.get_mon_node_ips()\n fs_util.kernel_mount(\n [clients[0], clients[1]], kernel_mounting_dir, \",\".join(mon_node_ips)\n )\n rc = unlink_file(\n clients[0],\n clients[1],\n \"fuse_mount.txt\",\n fuse_mounting_dir,\n validate_from=[kernel_mounting_dir],\n )\n\n if rc:\n raise CommandFailed(\"Unlink of the file is failing when file is locked\")\n rc = unlink_file(\n clients[0],\n clients[1],\n \"kernel_mount.txt\",\n kernel_mounting_dir,\n validate_from=[fuse_mounting_dir],\n )\n if rc:\n raise CommandFailed(\"Unlink of the file is failing when file is locked\")\n\n return 0\n\n except Exception as e:\n log.error(e)\n log.error(traceback.format_exc())\n return 1\n finally:\n log.info(\"---clean up---------\")\n fs_util.client_clean_up(\n \"umount\", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir\n )\n fs_util.client_clean_up(\n \"umount\",\n kernel_clients=[clients[0]],\n mounting_dir=kernel_mounting_dir,\n )", "def cluster_manager(self):\n # Lazily instantiate the cluster manager the first time it is asked for.\n if not hasattr(self, '_cluster_manager'):\n if self._cluster_engine:\n self._cluster_manager = self._cluster_engine.create_manager(\n self._username,\n self._tenancy\n )\n else:\n self._cluster_manager = None\n # If there is still no cluster manager, clusters are not supported\n if not self._cluster_manager:\n raise errors.UnsupportedOperationError(\n 'Clusters are not supported for this tenancy.'\n )\n return self._cluster_manager", "def test_create_cluster_network(self):\n pass", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def getClusterSetup(self):\n data = {}\n data[\"parameters\"] = self.config.getACSParams()\n \n fqdn = {}\n fqdn[\"master\"] = self.getManagementEndpoint()\n fqdn[\"agent\"] = self.getAgentEndpoint()\n data[\"domains\"] = fqdn\n \n data[\"sshTunnel\"] = \"ssh -o StrictHostKeyChecking=no -L 80:localhost:80 -N \" + self.config.get('ACS', 'username') + \"@\" + self.getManagementEndpoint() + \" -p 2200\"\n\n azure = {}\n azure['resourceGroup'] = self.config.get('Group', 'name')\n data[\"azure\"] = azure\n\n return data", "def get_cluster_conf(self):\n # Load cluster configuration file\n fpath = Path(self.cmd_opts.get(CLI_CMDOPT.DCOS_CLUSTERCFGPATH))\n\n # Unblock irrelevant local operations\n if str(fpath) == 'NOP':\n self.cluster_conf_nop = True\n LOG.info(f'{self.msg_src}: cluster_conf: NOP')\n return {}\n\n if not fpath.is_absolute():\n if self.inst_storage.cfg_dpath.exists():\n fpath = self.inst_storage.cfg_dpath.joinpath(fpath)\n else:\n fpath = Path('.').resolve().joinpath(fpath)\n\n cluster_conf = cr_utl.rc_load_ini(\n fpath, emheading='Cluster setup descriptor'\n )\n\n # CLI options take precedence, if any.\n # list(tuple('ipaddr', 'port'))\n cli_master_priv_ipaddrs = [\n ipaddr.partition(':')[::2] for ipaddr in\n self.cmd_opts.get(CLI_CMDOPT.MASTER_PRIVIPADDR, '').split(' ') if\n ipaddr != ''\n ]\n mnode_sects = [\n sect for sect in cluster_conf if sect.startswith('master-node')\n ]\n # iterator(tuple('ipaddr', 'port'), str)\n change_map = zip(cli_master_priv_ipaddrs, mnode_sects)\n for item in change_map:\n if item[0][0]:\n cluster_conf[item[1]]['privateipaddr'] = item[0][0]\n if item[0][1]:\n try:\n port = int(item[0][1])\n except (ValueError, TypeError):\n port = cm_const.ZK_CLIENTPORT_DFT\n port = (port if 0 < port < 65536 else\n cm_const.ZK_CLIENTPORT_DFT)\n cluster_conf[item[1]]['zookeeperclientport'] = port\n\n # Add extra 'master-node' sections, if CLI provides extra arguments\n extra_cli_items = cli_master_priv_ipaddrs[len(mnode_sects):]\n for n, item in enumerate(extra_cli_items):\n if item[0]:\n # TODO: Implement collision tolerance for section names.\n cluster_conf[f'master-node-extra{n}'] = {}\n cluster_conf[f'master-node-extra{n}']['privateipaddr'] = (\n item[0]\n )\n if item[1]:\n try:\n port = int(item[1])\n except (ValueError, TypeError):\n port = cm_const.ZK_CLIENTPORT_DFT\n port = (port if 0 < port < 65536 else\n cm_const.ZK_CLIENTPORT_DFT)\n cluster_conf[f'master-node-extra{n}'][\n 'zookeeperclientport'\n ] = port\n # DC/OS storage distribution parameters\n cli_dstor_url = self.cmd_opts.get(CLI_CMDOPT.DSTOR_URL)\n cli_dstor_pkgrepo_path = self.cmd_opts.get(\n CLI_CMDOPT.DSTOR_PKGREPOPATH\n )\n cli_dstor_pkglist_path = self.cmd_opts.get(\n CLI_CMDOPT.DSTOR_PKGLISTPATH\n )\n cli_dstor_dcoscfg_path = self.cmd_opts.get(\n CLI_CMDOPT.DSTOR_DCOSCFGPATH\n )\n if not cluster_conf.get('distribution-storage'):\n cluster_conf['distribution-storage'] = {}\n\n if cli_dstor_url:\n cluster_conf['distribution-storage']['rooturl'] = cli_dstor_url\n if cli_dstor_pkgrepo_path:\n cluster_conf['distribution-storage']['pkgrepopath'] = (\n cli_dstor_pkgrepo_path\n )\n if cli_dstor_pkglist_path:\n cluster_conf['distribution-storage']['pkglistpath'] = (\n cli_dstor_pkglist_path\n )\n if cli_dstor_dcoscfg_path:\n cluster_conf['distribution-storage']['dcoscfgpath'] = (\n cli_dstor_dcoscfg_path\n )\n\n # Local parameters of DC/OS node\n cli_local_priv_ipaddr = self.cmd_opts.get(CLI_CMDOPT.LOCAL_PRIVIPADDR)\n if not cluster_conf.get('local'):\n cluster_conf['local'] = {}\n\n if cli_local_priv_ipaddr:\n cluster_conf['local']['privateipaddr'] = cli_local_priv_ipaddr\n\n return cluster_conf", "def Cluster(request, io_loop):\n\n def ClusterConstructor(**kwargs):\n log = logging.getLogger(__file__)\n log.setLevel(logging.DEBUG)\n log.handlers = [logging.StreamHandler(sys.stdout)]\n kwargs['log'] = log\n engine_launcher_class = kwargs.get(\"engine_launcher_class\")\n\n if (\n isinstance(engine_launcher_class, str)\n and \"MPI\" in engine_launcher_class\n and shutil.which(\"mpiexec\") is None\n ):\n pytest.skip(\"requires mpiexec\")\n\n cfg = kwargs.setdefault(\"config\", Config())\n cfg.EngineLauncher.engine_args = ['--log-level=10']\n cfg.ControllerLauncher.controller_args = ['--log-level=10']\n kwargs.setdefault(\"controller_args\", ['--ping=250'])\n\n c = cluster.Cluster(**kwargs)\n assert c.config is cfg\n request.addfinalizer(c.stop_cluster_sync)\n return c\n\n yield ClusterConstructor", "def get_cluster_config(cohesity_client):\n config = cohesity_client.cluster.get_cluster()\n return config", "def create_cluster(self, provision_details, project_id=\"\"):\n response = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/clusters'\n ,body=provision_details)\n return response", "def add_delete_compute_cinder_ceph(self):\n\n self.env.revert_snapshot('ready_with_9_slaves')\n\n self.show_step(1, initialize=True)\n data = {\n 'volumes_lvm': True,\n 'volumes_ceph': False,\n 'images_ceph': True,\n 'osd_pool_size': '2',\n 'tenant': 'scalegroup5',\n 'user': 'scalegroup5',\n 'password': 'scalegroup5',\n \"net_provider\": 'neutron',\n \"net_segment_type\": settings.NEUTRON_SEGMENT['tun']\n }\n\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n settings=data\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller'],\n 'slave-02': ['controller'],\n 'slave-03': ['controller'],\n 'slave-04': ['compute', 'ceph-osd', 'cinder'],\n 'slave-05': ['compute', 'ceph-osd', 'cinder']\n }\n )\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(2)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(3)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(4)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-06': ['compute', 'ceph-osd', 'cinder']\n }\n )\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(5)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(6)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(7)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-07': ['compute', 'ceph-osd', 'cinder']\n }\n )\n\n with self.fuel_web.get_ssh_for_node('slave-04') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-04': ['compute', 'ceph-osd', 'cinder']\n },\n pending_addition=False,\n pending_deletion=True\n )\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(10)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(11)\n with self.fuel_web.get_ssh_for_node('slave-07') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-07': ['compute', 'ceph-osd', 'cinder']\n },\n pending_addition=False,\n pending_deletion=True\n )\n self.show_step(12)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(13)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(14)\n self.fuel_web.run_ostf(cluster_id)\n\n self.env.make_snapshot('add_delete_compute_cinder_ceph')", "def _create_connection(self):\r\n if not self._hosts:\r\n raise CQLConnectionError(\"At least one host required\")\r\n\r\n hosts = copy(self._hosts)\r\n random.shuffle(hosts)\r\n\r\n for host in hosts:\r\n try:\r\n transport = self._create_transport(host)\r\n new_conn = cql.connect(\r\n host.name,\r\n host.port,\r\n user=self._username,\r\n password=self._password,\r\n consistency_level=self._consistency,\r\n transport=transport\r\n )\r\n new_conn.set_cql_version('3.0.0')\r\n return new_conn\r\n except Exception as exc:\r\n logging.debug(\"Could not establish connection to\"\r\n \" {}:{} ({!r})\".format(host.name, host.port, exc))\r\n\r\n raise CQLConnectionError(\"Could not connect to any server in cluster\")", "def cluster_create():\n logger.info(\"/cluster action=\" + r.method)\n request_debug(r, logger)\n if not r.form[\"name\"] or not r.form[\"host_id\"] or not \\\n r.form[\"consensus_plugin\"] or not r.form[\"size\"]:\n logger.warning(\"cluster post without enough data\")\n response_fail[\"error\"] = \"cluster POST without enough data\"\n response_fail[\"data\"] = r.form\n return jsonify(response_fail), CODE_BAD_REQUEST\n else:\n name, host_id, consensus_plugin, consensus_mode, size = \\\n r.form['name'], r.form['host_id'], r.form['consensus_plugin'],\\\n r.form['consensus_mode'] or CONSENSUS_MODES[0], int(r.form[\n \"size\"])\n if consensus_plugin not in CONSENSUS_PLUGINS:\n logger.debug(\"Unknown consensus_plugin={}\".format(\n consensus_plugin))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if consensus_plugin != CONSENSUS_PLUGINS[0] and consensus_mode \\\n not in CONSENSUS_MODES:\n logger.debug(\"Invalid consensus, plugin={}, mode={}\".format(\n consensus_plugin, consensus_mode))\n return jsonify(response_fail), CODE_BAD_REQUEST\n\n if size not in CLUSTER_SIZES:\n logger.debug(\"Unknown cluster size={}\".format(size))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if cluster_handler.create(name=name, host_id=host_id,\n consensus_plugin=consensus_plugin,\n consensus_mode=consensus_mode,\n size=size):\n logger.debug(\"cluster POST successfully\")\n return jsonify(response_ok), CODE_CREATED\n else:\n logger.debug(\"cluster creation failed\")\n response_fail[\"error\"] = \"Failed to create cluster {}\".format(\n name)\n return jsonify(response_fail), CODE_BAD_REQUEST", "def add_delete_compute_cinder_ceph_ephemeral(self):\n\n self.env.revert_snapshot('ready_with_9_slaves')\n\n self.show_step(1, initialize=True)\n data = {\n 'volumes_lvm': True,\n 'volumes_ceph': False,\n 'images_ceph': True,\n 'ephemeral_ceph': True,\n 'osd_pool_size': '2',\n 'tenant': 'scalegroup6',\n 'user': 'scalegroup6',\n 'password': 'scalegroup6'\n }\n\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n settings=data\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller'],\n 'slave-02': ['controller'],\n 'slave-03': ['controller'],\n 'slave-04': ['compute'],\n 'slave-05': ['ceph-osd', 'cinder'],\n 'slave-06': ['ceph-osd', 'cinder']\n }\n )\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(2)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(3)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(4)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-07': ['ceph-osd', 'cinder']\n }\n )\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(5)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(6)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(7)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-08': ['ceph-osd', 'cinder']\n }\n )\n with self.fuel_web.get_ssh_for_node('slave-05') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-05': ['ceph-osd', 'cinder']\n },\n pending_addition=False,\n pending_deletion=True\n )\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(10)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(11)\n with self.fuel_web.get_ssh_for_node('slave-08') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-08': ['ceph-osd', 'cinder']\n },\n pending_addition=False,\n pending_deletion=True\n )\n self.show_step(12)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(13)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(14)\n self.fuel_web.run_ostf(cluster_id)\n self.env.make_snapshot(\"add_delete_compute_cinder_ceph_ephemeral\")", "def setup_node(\n *,\n # Change this to take host, user, and identity_file?\n # Add some kind of caching for SSH connections so that they\n # can be looked up by host and reused?\n ssh_client: paramiko.client.SSHClient,\n services: list,\n cluster: FlintrockCluster):\n host = ssh_client.get_transport().getpeername()[0]\n ssh_check_output(\n client=ssh_client,\n command=\"\"\"\n set -e\n\n echo {private_key} > \"$HOME/.ssh/id_rsa\"\n echo {public_key} >> \"$HOME/.ssh/authorized_keys\"\n\n chmod 400 \"$HOME/.ssh/id_rsa\"\n \"\"\".format(\n private_key=shlex.quote(cluster.ssh_key_pair.private),\n public_key=shlex.quote(cluster.ssh_key_pair.public)))\n\n with ssh_client.open_sftp() as sftp:\n sftp.put(\n localpath=os.path.join(SCRIPTS_DIR, 'setup-ephemeral-storage.py'),\n remotepath='/tmp/setup-ephemeral-storage.py')\n\n logger.info(\"[{h}] Configuring ephemeral storage...\".format(h=host))\n # TODO: Print some kind of warning if storage is large, since formatting\n # will take several minutes (~4 minutes for 2TB).\n storage_dirs_raw = ssh_check_output(\n client=ssh_client,\n command=\"\"\"\n set -e\n python /tmp/setup-ephemeral-storage.py\n rm -f /tmp/setup-ephemeral-storage.py\n \"\"\")\n storage_dirs = json.loads(storage_dirs_raw)\n\n cluster.storage_dirs.root = storage_dirs['root']\n cluster.storage_dirs.ephemeral = storage_dirs['ephemeral']\n\n ensure_java8(ssh_client)\n\n for service in services:\n service.install(\n ssh_client=ssh_client,\n cluster=cluster)", "def _createMaster(self):\n\n host = self.to_config['address']\n port = self.to_config['port']\n master = modbus_tcp.TcpMaster(host=host, port=port,\n timeout_in_sec=10.0) #@TODO: Put timeout in configuration\n return master", "def connect(self):\n self.cluster = Cluster([self.db_node])\n try:\n self.session = self.cluster.connect()\n self.session.default_timeout = DEFAULT_TIMEOUT\n except Exception as e:\n raise StorageError(\"Cannot connect to {}\".format(self.db_node), e)", "def setup_keyspace():\n\n try: \n # Make a connection to a Cassandra instance in the local machine (127.0.0.1)\n cluster = Cluster(['127.0.0.1']) \n # To establish connection and begin executing queries, need a session\n session = cluster.connect()\n except Exception as e:\n print(e)\n\n # Create a Keyspace\n try:\n session.execute(keyspace_create)\n except Exception as e:\n print(e)\n\n # Set KEYSPACE to the keyspace specified above\n try:\n session.set_keyspace('darien_cassandra')\n except Exception as e:\n print(e)\n\n return cluster, session", "def test_crud_cluster(self):\n # create the object\n response = self._create_cluster()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED,\n response.content)\n\n # list the object\n cluster_id = self._list_cluster()\n # Assert that the originally created cluster id is the same as the one\n # returned by list\n self.assertEquals(response.data['id'], cluster_id)\n self.assertEquals(response.data['default_vm_type'], 'm5.24xlarge')\n self.assertEquals(response.data['default_zone']['name'], 'us-east-1b')\n\n # check details\n cluster_id = self._check_cluster_exists(cluster_id)\n\n # update cluster\n response = self._update_cluster(cluster_id)\n self.assertEquals(response['name'], 'new_name')\n\n # delete the object\n response = self._delete_cluster(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data)\n\n # check it no longer exists\n self._check_no_clusters_exist()", "def test_crud_cluster_node(self):\n # create the parent cluster\n cluster_id = self._create_cluster()\n\n # create cluster node\n response = self._create_cluster_node(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n # list existing objects\n node_id = self._list_cluster_node(cluster_id)\n\n # check it exists\n node_id = self._check_cluster_node_exists(cluster_id, node_id)\n\n # delete the object\n response = self._delete_cluster_node(cluster_id, node_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # check it no longer exists\n self._check_no_cluster_nodes_exist(cluster_id)", "def cluster_create_znode(cluster_name, znode, headers=None):\n\n _zclient = get_client(cluster_name,\n headers or request.headers)\n acl_config = wildutils.ACLConfig(headers or request.headers)\n data = request_data(request)\n real_path_list = list()\n for (_znode, _zdata) in data.items():\n _znodepath = _zclient.create(_znode,\n value=bytes(_zdata),\n makepath=True,\n acl=acl_config.make_acl(),\n ephemeral=False,\n sequence=False)\n real_path_list.append(_znodepath)\n resp = Response(str(real_path_list),\n status=200,\n mimetype=\"text/plain\")\n return resp", "def cluster_create(self, cluster_name, license):\n return self.request( \"cluster-create\", {\n 'cluster_name': [ cluster_name, 'cluster-name', [ basestring, 'None' ], False ],\n 'license': [ license, 'license', [ basestring, 'license-code-v2' ], False ],\n }, {\n } )", "def _initialize_cluster(filename):\n\tstar_cluster = cluster.Cluster(filename)\n\tprint(\"\\nYour star cluster is being created ...\")\n\tstar_cluster.populate_celestials()\n\treturn star_cluster", "def create_dedicated_clusters(ws,number_of_clusters, number_of_nodes, idle_time_out):\n clusters = {}\n for i in range (0,number_of_clusters):\n dig = '{0}{1}'.format(''.join(random.sample(string.digits, 2)),''.join(random.sample(string.ascii_letters, 2)))\n cluster_name = 'NC6-D{1}-{0}'.format(dig,number_of_nodes)\n try:\n compute_target = ComputeTarget(workspace=ws, name=cluster_name) \n except ComputeTargetException:\n compute_config = AmlCompute.provisioning_configuration(vm_size=vmsize,\n max_nodes=number_of_nodes, \n idle_seconds_before_scaledown=idle_time_out)\n compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n compute_target.wait_for_completion(show_output=True)\n clusters[i] = compute_target\n return clusters", "def cluster(self, cluster_id, serve_nodes=3):\n return Cluster(cluster_id, self, serve_nodes=serve_nodes)", "def validate_cluster(self, resources, instances):\n instances_names = list(instances.values())\n assert ocp.wait_for_nodes_ready(instances_names), (\n \"Not all nodes reached status Ready\"\n )\n\n ceph_cluster = CephCluster()\n assert ceph_health_check(\n namespace=config.ENV_DATA['cluster_namespace']\n )\n ceph_cluster.cluster_health_check(timeout=60)\n\n # Create resources and run IO for both FS and RBD\n # Unpack resources\n projects, secrets, pools, storageclasses, pvcs, pods = resources[:6]\n\n # Project\n projects.append(helpers.create_project())\n\n # Secrets\n secrets.append(helpers.create_secret(constants.CEPHBLOCKPOOL))\n secrets.append(helpers.create_secret(constants.CEPHFILESYSTEM))\n\n # Pools\n pools.append(helpers.create_ceph_block_pool())\n pools.append(helpers.get_cephfs_data_pool_name())\n\n # Storageclasses\n storageclasses.append(\n helpers.create_storage_class(\n interface_type=constants.CEPHBLOCKPOOL,\n interface_name=pools[0].name,\n secret_name=secrets[0].name\n )\n )\n storageclasses.append(\n helpers.create_storage_class(\n interface_type=constants.CEPHFILESYSTEM,\n interface_name=pools[1],\n secret_name=secrets[1].name\n )\n )\n\n # PVCs\n pvcs.append(helpers.create_pvc(\n sc_name=storageclasses[0].name, namespace=projects[0].namespace)\n )\n pvcs.append(helpers.create_pvc(\n sc_name=storageclasses[1].name, namespace=projects[0].namespace)\n )\n\n # Pods\n pods.append(\n helpers.create_pod(\n interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvcs[0].name,\n namespace=projects[0].namespace\n )\n )\n pods.append(\n helpers.create_pod(\n interface_type=constants.CEPHFILESYSTEM, pvc_name=pvcs[1].name,\n namespace=projects[0].namespace\n )\n )\n\n # Run IO\n for pod in pods:\n pod.run_io('fs', '1G')\n for pod in pods:\n fio_result = pod.get_fio_results()\n logger.info(f\"IOPs after FIO for pod {pod.name}:\")\n logger.info(\n f\"Read: {fio_result.get('jobs')[0].get('read').get('iops')}\"\n )\n logger.info(\n f\"Write: {fio_result.get('jobs')[0].get('write').get('iops')}\"\n )", "def _create_cluster_from_index(self, index):\n return Cluster(index=index)", "def create_and_delete_cluster(self, flavor, node_count,\n plugin_name=\"vanilla\",\n hadoop_version=\"2.3.0\"):\n\n tenant_id = self.clients(\"keystone\").tenant_id\n image_id = self.context()[\"sahara_images\"][tenant_id]\n\n LOG.debug(\"Using Image: %s\" % image_id)\n\n cluster = self._launch_cluster(flavor_id=flavor,\n image_id=image_id,\n node_count=node_count,\n plugin_name=plugin_name,\n hadoop_version=hadoop_version)\n\n self._delete_cluster(cluster)", "def _create_cluster_from_index(self, index):\n return VRPCluster(index=index, demand=self.demands[index])", "def launch_cluster(self):\n version = self.get_latest_spark_version()\n import os\n real_path = os.path.dirname(os.path.realpath(__file__))\n if self.is_aws():\n with open(real_path+'/../data/aws_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n else:\n with open(real_path+'/../data/azure_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n # set the latest spark release regardless of defined cluster json\n cluster_json['spark_version'] = version['key']\n c_info = self.post('/clusters/create', cluster_json)\n self.wait_for_cluster(c_info['cluster_id'])\n return c_info['cluster_id']", "def create_redshift_cluster(DWH_CLUSTER_TYPE, DWH_NODE_TYPE, DWH_NUM_NODES, DWH_DB,\n DWH_CLUSTER_IDENTIFIER, DWH_DB_USER, DWH_DB_PASSWORD):\n \n # Create a Redshift cluster\n try:\n response = redshift.create_cluster( \n #HW\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n #Identifiers & Credentials\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n\n #Roles (for s3 access)\n IamRoles=[roleArn] \n )\n except Exception as e:\n print(e)", "def create_data_center(\n dc_name, cluster_name, host_name, comp_version=config.COMPATIBILITY_VERSION\n):\n testflow.step(\"Add data-center %s\", dc_name)\n assert ll_dc.addDataCenter(\n True, name=dc_name, local=False, version=comp_version\n ), \"Failed to create dc %s\" % dc_name\n\n testflow.step(\"Add cluster %s\", cluster_name)\n assert ll_clusters.addCluster(\n True, name=cluster_name, cpu=config.CPU_NAME,\n data_center=dc_name, version=comp_version\n ), \"addCluster %s with cpu %s and version %s to datacenter %s failed\" % (\n cluster_name, config.CPU_NAME, comp_version, dc_name\n )\n testflow.step(\"Move host %s to cluster %s\", host_name, cluster_name)\n assert hl_hosts.move_host_to_another_cluster(\n host=host_name, cluster=cluster_name, activate=True\n ), \"Failed to move host %s to cluster %s\" % (host_name, cluster_name)", "def create_keyspace():\n\n cluster = Cluster(['127.0.0.1'], port=9042)\n session = cluster.connect()\n\n session.execute(\"\"\"\n CREATE KEYSPACE IF NOT EXISTS sparkify\n WITH REPLICATION = \n { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }\"\"\"\n )\n\n session.set_keyspace('sparkify')\n\n return cluster, session", "def get_cluster(self) -> 'AioCluster':\n return AioCluster(self)", "def __init__(self, cluster_metadata: MasterURLIdentifier) -> None:\n self.cluster_metadata = cluster_metadata\n if self.cluster_metadata.region == 'global':\n # The global region is unsupported as it will be eventually deprecated.\n raise ValueError('Clusters in the global region are not supported.')\n elif not self.cluster_metadata.region:\n _LOGGER.warning(\n 'No region information was detected, defaulting Dataproc cluster '\n 'region to: us-central1.')\n self.cluster_metadata.region = 'us-central1'\n\n if not self.cluster_metadata.cluster_name:\n self.cluster_metadata.cluster_name = ie.current_env(\n ).clusters.default_cluster_name\n\n from google.cloud import dataproc_v1\n self._cluster_client = dataproc_v1.ClusterControllerClient(\n client_options={\n 'api_endpoint': \\\n f'{self.cluster_metadata.region}-dataproc.googleapis.com:443'\n })\n\n if self.cluster_metadata in ie.current_env().clusters.master_urls.inverse:\n self.master_url = ie.current_env().clusters.master_urls.inverse[\n self.cluster_metadata]\n else:\n self.master_url = None", "def test_cluster_create(self, mock_is_service_available):\n\n mock_is_service_available.return_value = True\n fake_cluster = FakeCluster(**RETURN_CLUSTER_1)\n cluster = self._create_test_cluster(\n fake_cluster, 'stack_delete', CREATE_CLUSTER_ARG_1)\n scheduler.TaskRunner(cluster.create)()\n self.assertEqual((cluster.CREATE, cluster.COMPLETE), cluster.state)\n self.m.VerifyAll()", "def get_cluster_def():\n if settings.NO_OP:\n return None\n\n ensure_in_custer()\n\n cluster = os.getenv('POLYAXON_CLUSTER', None)\n try:\n return json.loads(cluster) if cluster else None\n except (ValueError, TypeError):\n print('Could get cluster definition, '\n 'please make sure this is running inside a polyaxon job.')\n return None", "def create_redshift_cluster(redshift_client, role_arn):\n # Create the cluster if it doesn't exist.\n try:\n response = redshift_client.create_cluster(\n ClusterType=CLUSTER_TYPE,\n NodeType=NODE_TYPE,\n NumberOfNodes=NUM_NODES,\n DBName=DBNAME,\n ClusterIdentifier=IDENTIFIER,\n MasterUsername=USER,\n MasterUserPassword=PASSWORD,\n IamRoles=[role_arn]\n )\n except Exception as e:\n print(e)", "def create_coe_cluster(\n self,\n name,\n cluster_template_id,\n **kwargs,\n ):\n cluster = self.container_infrastructure_management.create_cluster(\n name=name,\n cluster_template_id=cluster_template_id,\n **kwargs,\n )\n\n self.list_coe_clusters.invalidate(self)\n return cluster", "def launch_cluster(params):\n logging.info('Launching cluster of size: {} and type: {}'.format(params.cluster_size, params.instance_type))\n subprocess.check_call(['cgcloud',\n 'create-cluster',\n '--leader-instance-type', 'm3.medium',\n '--instance-type', params.instance_type,\n '--share', params.shared_dir,\n '--num-workers', str(params.cluster_size),\n '-c', params.cluster_name,\n '--spot-bid', str(params.spot_price),\n '--leader-on-demand',\n '--ssh-opts',\n '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no',\n 'toil'])", "def create(self,\n cluster_name: str=None,\n cluster_type: str='sandbox',\n with_auth: bool=False,\n module: Optional[Union[str, dict]]=None,\n config: dict=None,\n wait_for_completion: bool=True\n ) -> str:\n\n if config is None:\n config = {\n 'id': cluster_name,\n 'configuration': {\n 'tier': cluster_type,\n 'requiresAuthentication': with_auth\n }\n }\n if self.dev:\n config['configuration']['modules'] = _get_module_config(module)\n\n data_to_send = json.dumps(config).encode(\"utf-8\")\n\n try:\n response = self.session.post(\n url=self.url,\n data=data_to_send,\n headers=self._get_request_header(),\n timeout=self._timeout_config\n )\n except RequestsConnectionError as conn_err:\n message = str(conn_err)\\\n + ' Connection error, WCS cluster was not created.'\n raise type(conn_err)(message).with_traceback(sys.exc_info()[2])\n if response.status_code == 400 and \"already exists\" in response.text:\n # this line is never executed if cluster_name is None\n return 'https://' + self.get_cluster_config(cluster_name)['meta']['PublicURL']\n\n if response.status_code != 202:\n raise UnexpectedStatusCodeException('Creating WCS instance', response)\n\n if cluster_name is None:\n cluster_name = response.json()['id']\n\n if wait_for_completion is True:\n pbar = tqdm(total=100)\n progress = 0\n while progress != 100:\n time.sleep(2.0)\n progress = self.get_cluster_config(cluster_name)[\"status\"][\"state\"][\"percentage\"]\n pbar.update(progress - pbar.n)\n pbar.close()\n\n return 'https://' + self.get_cluster_config(cluster_name)['meta']['PublicURL']", "def open(self, db_name, storage, user=None, cred=None):\n config = self.config\n config.set_database(db_name, storage)\n\n # NOTE Odd behaviour; if storage not passed to db_exists, db_create\n # ignores storage\n if not self.client.db_exists(db_name, storage):\n self.client.db_create(db_name\n , pyorient.DB_TYPE_GRAPH\n , storage)\n\n if not (user and cred):\n user = config.user\n cred = config.cred\n self._last_user = user\n self._last_cred = cred\n self._last_db = db_name\n\n cluster_map = self.client.db_open(db_name, user, cred)\n\n\n self.server_version = ServerVersion(\n self.client.version.major, self.client.version.minor, self.client.version.build)\n\n return cluster_map", "def _open_session(self):\n return self.cluster.connect()", "def init_mc(self) -> ManagedCluster:\n # Initialize a ManagedCluster object with mandatory parameter location.\n mc = self.models.ManagedCluster(\n location=self.context.get_location(),\n )\n\n # attach mc to AKSContext\n self.context.attach_mc(mc)\n return mc", "def create_redshift_cluster(config, redshift_role):\n redshift = create_boto3_client(config, 'redshift')\n cluster_identifier = config.get('CLUSTER', 'CLUSTER_IDENTIFIER')\n print(\"Creating redshift cluster: %s\" % cluster_identifier)\n try:\n cc_response = redshift.create_cluster(\n MasterUsername=config.get('CLUSTER', 'DB_USER'),\n MasterUserPassword=config.get('CLUSTER', 'DB_PASSWORD'),\n ClusterIdentifier=cluster_identifier,\n NodeType=config.get('CLUSTER', 'NODE_TYPE'),\n NumberOfNodes=int(config.get('CLUSTER', 'NODE_COUNT')),\n Port=int(config.get('CLUSTER', 'DB_PORT')),\n IamRoles=[\n redshift_role['Role']['Arn']\n ],\n ClusterSubnetGroupName=config.get('CLUSTER', 'SUBNET_GROUP'),\n ClusterSecurityGroups=[config.get('CLUSTER', 'SECURITY_GROUP_ID')]\n )\n print('Creating Cluster:', cc_response)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ClusterAlreadyExists':\n print(\"Cluster %s already exists\" % cluster_identifier)\n return\n else:\n print(\"Unexpected error wile creating cluster: %s\" % e)\n\n print('Sleep 5 seconds')\n time.sleep(5)\n while True:\n print('Fetching status of cluster..')\n try:\n cluster_status = get_cluster_status(redshift, cluster_identifier)\n if cluster_status['Clusters'][0]['ClusterStatus'] == 'available':\n break\n print('Cluster Status:', cluster_status)\n except ClientError as e:\n print(\"Unexpected error wile getting cluster status: %s\" % e)\n raise e\n print('Sleep 10 seconds')\n time.sleep(10)\n print('Cluster is created and available.')", "def add_delete_controller_cinder_ceph(self):\n\n self.env.revert_snapshot('ready_with_9_slaves')\n\n data = {\n 'volumes_lvm': True,\n 'volumes_ceph': False,\n 'images_ceph': True,\n 'objects_ceph': True,\n 'tenant': 'scalegroup5',\n 'user': 'scalegroup5',\n 'password': 'scalegroup5',\n \"net_provider\": 'neutron',\n \"net_segment_type\": settings.NEUTRON_SEGMENT['tun']\n }\n\n self.show_step(1, initialize=True)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n settings=data\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller', 'cinder', 'ceph-osd'],\n 'slave-02': ['controller', 'cinder', 'ceph-osd'],\n 'slave-03': ['controller', 'cinder', 'ceph-osd'],\n 'slave-04': ['compute'],\n 'slave-05': ['compute']\n }\n )\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(2)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(3)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(4)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-06': ['controller', 'cinder', 'ceph-osd']\n }\n )\n\n self.show_step(5)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(6)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(7)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(8)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-07': ['controller', 'cinder', 'ceph-osd']\n }\n )\n\n with self.fuel_web.get_ssh_for_node('slave-02') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-02': ['controller', 'cinder', 'ceph-osd']\n },\n pending_addition=False,\n pending_deletion=True\n )\n\n self.show_step(9)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(10)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(11)\n self.fuel_web.run_ostf(cluster_id)\n\n self.show_step(12)\n with self.fuel_web.get_ssh_for_node('slave-03') as remote_ceph:\n self.fuel_web.prepare_ceph_to_delete(remote_ceph)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-03': ['controller', 'cinder', 'ceph-osd']\n },\n pending_addition=False,\n pending_deletion=True\n )\n\n self.show_step(13)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(14)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(15)\n self.fuel_web.run_ostf(cluster_id)\n\n self.env.make_snapshot('add_delete_controller_cinder_ceph')", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def _init_cluster(self):\n self._Init_Cluster()", "def client():\n return hdfs.connect()", "def create_instance(c_instance):\n return RpycHost(c_instance)", "def start(self):\n\n # check if docker is up.\n\n if \"OZONE_RUNNER_VERSION\" not in os.environ:\n self.__logger__.error(\"OZONE_RUNNER_VERSION is not set.\")\n sys.exit(1)\n\n if \"HDDS_VERSION\" not in os.environ:\n self.__logger__.error(\"HDDS_VERSION is not set.\")\n sys.exit(1)\n\n self.__logger__.info(\"Starting Ozone Cluster\")\n if Blockade.blockade_status() == 0:\n Blockade.blockade_destroy()\n\n Blockade.blockade_up()\n\n call([Command.docker_compose, \"-f\", self.docker_compose_file,\n \"up\", \"-d\", \"--scale\",\n \"datanode=\" + str(self.conf.datanode_count)])\n self.__logger__.info(\"Waiting 10s for cluster start up...\")\n # Remove the sleep and wait only till the cluster is out of safemode\n time.sleep(10)\n output = subprocess.check_output([Command.docker_compose, \"-f\",\n self.docker_compose_file, \"ps\"])\n node_list = []\n for out in output.split(\"\\n\")[2:-1]:\n node = out.split(\" \")[0]\n node_list.append(node)\n Blockade.blockade_add(node)\n\n self.om = filter(lambda x: 'om' in x, node_list)[0]\n self.scm = filter(lambda x: 'scm' in x, node_list)[0]\n self.datanodes = sorted(list(filter(lambda x: 'datanode' in x, node_list)))\n self.client = filter(lambda x: 'ozone_client' in x, node_list)[0]\n self.scm_uuid = self.__get_scm_uuid__()\n self.datanode_dir = self.get_conf_value(\"hdds.datanode.dir\")\n\n assert node_list, \"no node found in the cluster!\"\n self.__logger__.info(\"blockade created with nodes %s\", ' '.join(node_list))", "def run(ceph_cluster, **kw):\n log.info(run.__doc__)\n config = kw[\"config\"]\n cephadm = CephAdmin(cluster=ceph_cluster, **config)\n rados_object = RadosOrchestrator(node=cephadm)\n mon_obj = MonConfigMethods(rados_obj=rados_object)\n ceph_nodes = kw.get(\"ceph_nodes\")\n osd_list = []\n total_osd_app_mem = {}\n\n for node in ceph_nodes:\n if node.role == \"osd\":\n node_osds = rados_object.collect_osd_daemon_ids(node)\n osd_list = osd_list + node_osds\n\n target_configs = config[\"cache_trim_max_skip_pinned\"][\"configurations\"]\n max_skip_pinned_value = int(\n mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_trim_max_skip_pinned\")\n )\n\n # Check the default value of the bluestore_cache_trim_max_skip_pinned value\n if max_skip_pinned_value != 1000:\n log.error(\n \"The default value of bluestore_cache_trim_max_skip_pinned not equal to 1000\"\n )\n raise Exception(\n \"The default value of bluestore_cache_trim_max_skip_pinned is not 1000\"\n )\n\n # Creating pools and starting the test\n for entry in target_configs.values():\n log.debug(\n f\"Creating {entry['pool_type']} pool on the cluster with name {entry['pool_name']}\"\n )\n if entry.get(\"pool_type\", \"replicated\") == \"erasure\":\n method_should_succeed(\n rados_object.create_erasure_pool, name=entry[\"pool_name\"], **entry\n )\n else:\n method_should_succeed(\n rados_object.create_pool,\n **entry,\n )\n\n if not rados_object.bench_write(**entry):\n log.error(\"Failed to write objects into the EC Pool\")\n return 1\n rados_object.bench_read(**entry)\n log.info(\"Finished writing data into the pool\")\n\n # performing scrub and deep-scrub\n rados_object.run_scrub()\n rados_object.run_deep_scrub()\n time.sleep(10)\n\n rados_object.change_heap_profiler_state(osd_list, \"start\")\n # Executing tests for 45 minutes\n time_execution = datetime.datetime.now() + datetime.timedelta(minutes=45)\n while datetime.datetime.now() < time_execution:\n # Get all OSDs heap dump\n heap_dump = rados_object.get_heap_dump(osd_list)\n # get the osd application used memory\n osd_app_mem = get_bytes_used_by_app(heap_dump)\n total_osd_app_mem = mergeDictionary(total_osd_app_mem, osd_app_mem)\n # wait for 10 seconds and collecting the memory\n time.sleep(10)\n for osd_id, mem_list in total_osd_app_mem.items():\n mem_growth = is_what_percent_mem(mem_list)\n if mem_growth > 80:\n log.error(\n f\"The osd.{osd_id} consuming more memory with the relative memory growth {mem_growth}\"\n )\n raise Exception(\"No warning generated by PG Autoscaler\")\n log.info(f\"The relative memory growth for the osd.{osd_id} is {mem_growth} \")\n\n rados_object.change_heap_profiler_state(osd_list, \"stop\")\n\n # check fo the crashes in the cluster\n crash_list = rados_object.do_crash_ls()\n if not crash_list:\n return 0\n else:\n return 1", "def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center", "def raw_cluster_cmd(self, *args, **kwargs):\n proc = self.controller.run(args=[os.path.join(BIN_PREFIX, \"ceph\")] + \\\n list(args), **kwargs)\n return six.ensure_str(proc.stdout.getvalue())", "def cluster(self):\n assert False", "def __init_cluster(self, cluster):\n self.___init_nodes(cluster)\n self.__clusterop.async_rebalance(\n cluster.get_nodes(),\n cluster.get_nodes()[1:],\n []).result()", "async def create(self, job):\n # check if ctdb shared volume already exists and started\n info = await self.middleware.call('gluster.volume.exists_and_started', CTDB_VOL_NAME)\n if not info['exists']:\n # get the peers in the TSP\n peers = await self.middleware.call('gluster.peer.query')\n if not peers:\n raise CallError('No peers detected')\n\n # shared storage volume requires 3 nodes, minimally, to\n # prevent the dreaded split-brain\n con_peers = [i['hostname'] for i in peers if i['connected'] == 'Connected']\n if len(con_peers) < 3:\n raise CallError(\n '3 peers must be present and connected before the ctdb '\n 'shared volume can be created.'\n )\n\n # get the system dataset location\n ctdb_sysds_path = (await self.middleware.call('systemdataset.config'))['path']\n ctdb_sysds_path = str(Path(ctdb_sysds_path).joinpath(CTDB_VOL_NAME))\n\n bricks = []\n for i in con_peers:\n bricks.append(i + ':' + ctdb_sysds_path)\n\n options = {'args': (CTDB_VOL_NAME, bricks,)}\n options['kwargs'] = {'replica': len(con_peers), 'force': True}\n await self.middleware.call('gluster.method.run', volume.create, options)\n\n # make sure the shared volume is configured properly to prevent\n # possibility of split-brain/data corruption with ctdb service\n await self.middleware.call('ctdb.shared.volume.validate')\n\n if not info['started']:\n # start it if we get here\n await self.middleware.call('gluster.volume.start', {'name': CTDB_VOL_NAME})\n\n # try to mount it locally and send a request\n # to all the other peers in the TSP to also\n # FUSE mount it\n data = {'event': 'VOLUME_START', 'name': CTDB_VOL_NAME, 'forward': True}\n await self.middleware.call('gluster.localevents.send', data)\n\n # we need to wait on the local FUSE mount job since\n # ctdb daemon config is dependent on it being mounted\n fuse_mount_job = await self.middleware.call('core.get_jobs', [\n ('method', '=', 'gluster.fuse.mount'),\n ('arguments.0.name', '=', 'ctdb_shared_vol'),\n ('state', '=', 'RUNNING')\n ])\n if fuse_mount_job:\n wait_id = await self.middleware.call('core.job_wait', fuse_mount_job[0]['id'])\n await wait_id.wait()\n\n # The peers in the TSP could be using dns names while ctdb\n # only accepts IP addresses. This means we need to resolve\n # the hostnames of the peers in the TSP to their respective\n # IP addresses so we can write them to the ctdb private ip file.\n names = [i['hostname'] for i in await self.middleware.call('gluster.peer.query')]\n ips = await self.middleware.call('cluster.utils.resolve_hostnames', names)\n if len(names) != len(ips):\n # this means the gluster peers hostnames resolved to the same\n # ip address which is bad....in theory, this shouldn't occur\n # since adding gluster peers has it's own validation and would\n # cause it to fail way before this gets called but it's better\n # to be safe than sorry\n raise CallError('Duplicate gluster peer IP addresses detected.')\n\n # Setup the ctdb daemon config. Without ctdb daemon running, none of the\n # sharing services (smb/nfs) will work in an active-active setting.\n priv_ctdb_ips = [i['address'] for i in await self.middleware.call('ctdb.private.ips.query')]\n for ip_to_add in [i for i in ips if i not in [j for j in priv_ctdb_ips]]:\n ip_add_job = await self.middleware.call('ctdb.private.ips.create', {'ip': ip_to_add})\n await ip_add_job.wait()\n\n # this sends an event telling all peers in the TSP (including this system)\n # to start the ctdb service\n data = {'event': 'CTDB_START', 'name': CTDB_VOL_NAME, 'forward': True}\n await self.middleware.call('gluster.localevents.send', data)\n\n return await self.middleware.call('gluster.volume.query', [('name', '=', CTDB_VOL_NAME)])", "def __init__(self, cluster_name: str, zone: str, sa_credentials_file_path: str):\n self.cluster_name = cluster_name\n self._credentials, self.project_id = load_credentials_from_file(\n sa_credentials_file_path, scopes=[\"https://www.googleapis.com/auth/cloud-platform\"])\n self.zone = zone\n\n # Generate the GCP Cluster Manager Client.\n # See: https://googleapis.dev/python/container/latest/container_v1/cluster_manager.html\n self.client = ClusterManagerClient(credentials=self.credentials)", "def create_test_cell(self):\n hostname = socket.gethostname()\n if get_var('KRB_REALM').lower() != get_var('AFS_CELL').lower():\n run_keyword(\"Set Kerberos Realm Name\", get_var('KRB_REALM'))\n run_keyword(\"Set Machine Interface\")\n setup_service_key()\n if get_var('AFS_DIST') == \"transarc\":\n run_keyword(\"Start the bosserver\")\n else:\n run_keyword(\"Start Service\", \"openafs-server\")\n run_keyword(\"Set the Cell Name\", get_var('AFS_CELL'))\n remove_symlinks_created_by_bosserver()\n run_keyword(\"Create Database Service\", \"ptserver\", 7002)\n run_keyword(\"Create Database Service\", \"vlserver\", 7003)\n if get_var('AFS_DAFS'):\n run_keyword(\"Create Demand Attach File Service\")\n else:\n run_keyword(\"Create File Service\")\n run_keyword(\"Create an Admin Account\", get_var('AFS_ADMIN'))\n run_keyword(\"Create the root.afs Volume\")\n if get_var('AFS_CSDB_DIST'):\n run_keyword(\"Append CellServDB.dist\")\n run_keyword(\"Create AFS Mount Point\")\n run_keyword(\"Set Cache Manager Configuration\")\n if get_var('AFS_DIST') == \"transarc\":\n uname = os.uname()[0]\n if uname == 'Linux':\n run_keyword(\"Start the Cache Manager on Linux\")\n elif uname == 'SunOS':\n run_keyword(\"Start the Cache Manager on Solaris\")\n else:\n raise AssertionError(\"Unsupported operating system: %s\" % (uname))\n else:\n run_keyword(\"Start Service\", \"openafs-client\")\n run_keyword(\"Cell Should Be\", get_var('AFS_CELL'))\n _LoginKeywords().login(get_var('AFS_ADMIN'))\n run_keyword(\"Create Volume\", hostname, \"a\", \"root.cell\")\n run_keyword(\"Mount Cell Root Volume\")\n run_keyword(\"Replicate Volume\", hostname, \"a\", \"root.afs\")\n run_keyword(\"Replicate Volume\", hostname, \"a\", \"root.cell\")\n # Create a replicated test volume.\n path = \"/afs/.%s/test\" % get_var('AFS_CELL')\n volume = \"test\"\n part = \"a\"\n parent = \"root.cell\"\n run_keyword(\"Create Volume\", hostname, part, volume)\n run_keyword(\"Mount Volume\", path, volume)\n run_keyword(\"Add Access Rights\", path, \"system:anyuser\", \"rl\")\n run_keyword(\"Replicate Volume\", hostname, part, volume)\n run_keyword(\"Release Volume\", parent)\n run_program(\"%s checkvolumes\" % get_var('FS'))\n _LoginKeywords().logout()", "def create_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n cluster_config = {\n 'name': cluster_name,\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters.post(**cluster_config)\n pprint(cluster.data)", "def cluster_nodes(self) -> ResponseT:\n return self.execute_command(\"CLUSTER NODES\")", "def create_cluster(t2_url, t2_token, cluster_definition):\n response = requests.post(f\"{t2_url}/api/clusters\", data=cluster_definition, headers={ \"t2-token\": t2_token, \"Content-Type\": \"application/yaml\" })\n if(response.status_code != 200):\n log(f\"API call to create cluster returned error code {response}\")\n return None\n return response.json()", "def createConnectionToCli(self):\n connected = False\n # loop until connected\n while not connected:\n try:\n self.dataClient = Client(\n ('localhost', 5000), authkey=b'secret password')\n connected = True\n except ConnectionRefusedError:\n pass\n\n self.logger.debug('Connected to Process!')", "def launch_cluster(**overrides) -> dict:\n if os.path.isfile(META_FILE):\n raise FileExistsError(\"Cluster already exists!\")\n\n config = DEFAULT_CONFIG.copy()\n config.update(**overrides)\n\n sg = make_sg()\n config[\"Instances\"].update(AdditionalMasterSecurityGroups=[sg.id])\n emr = get_emr_client()\n\n response = emr.run_job_flow(**config)\n cluster_id = response[\"JobFlowId\"]\n master_addr = wait_init(cluster_id)\n\n meta = {\n \"MasterNodeAddr\": master_addr,\n \"ClusterId\": cluster_id,\n \"SGId\": sg.id\n }\n with open(META_FILE, \"w\") as f:\n json.dump(meta, f)\n\n print(\"INFO: Cluster Launched!\")\n return meta", "def CreateInstance(self):\n\n # Create host instances for cuttlefish device. Currently one host instance\n # has one cuttlefish device. In the future, these logics should be modified\n # to support multiple cuttlefish devices per host instance.\n instance = self._compute_client.GenerateInstanceName(\n build_id=self.build_info.build_id, build_target=self._build_target)\n\n if self._cfg.enable_multi_stage:\n remote_build_id = self.build_info.build_id\n else:\n remote_build_id = self._GetGcsBucketBuildId(\n self.build_info.build_id, self.build_info.release_build_id)\n\n if self._cfg.enable_multi_stage:\n remote_system_build_id = self.system_build_info.build_id\n else:\n remote_system_build_id = self._GetGcsBucketBuildId(\n self.system_build_info.build_id, self.system_build_info.release_build_id)\n\n host_image_name = self._compute_client.GetHostImageName(\n self._cfg.stable_host_image_name,\n self._cfg.stable_host_image_family,\n self._cfg.stable_host_image_project)\n # Create an instance from Stable Host Image\n self._compute_client.CreateInstance(\n instance=instance,\n image_name=host_image_name,\n image_project=self._cfg.stable_host_image_project,\n build_target=self.build_info.build_target,\n branch=self.build_info.branch,\n build_id=remote_build_id,\n kernel_branch=self.kernel_build_info.branch,\n kernel_build_id=self.kernel_build_info.build_id,\n kernel_build_target=self.kernel_build_info.build_target,\n blank_data_disk_size_gb=self._blank_data_disk_size_gb,\n extra_scopes=self._extra_scopes,\n system_build_target=self.system_build_info.build_target,\n system_branch=self.system_build_info.branch,\n system_build_id=remote_system_build_id,\n bootloader_build_target=self.bootloader_build_info.build_target,\n bootloader_branch=self.bootloader_build_info.branch,\n bootloader_build_id=self.bootloader_build_info.build_id)\n\n return instance", "def __create_cont(self, path, filesystem, cont_stat, component_number):\n try:\n self.logger.debug('Create container interface called')\n status_obj = Status()\n cont_id = \"container\"\n #cont_id = get_container_id()\n tmp_path = '%s/%s/%s/%s/%s' % (self.__fs_base, \\\n filesystem, TMPDIR, cont_id,component_number)\n self.asyn_helper.call(\"create_container\", \\\n tmp_path, path, cont_stat, status_obj)\n return status_obj\n except Exception as err:\n self.logger.error(('create_container for %(con_dir)s failed ',\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path, \n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def get_cluster_id(options):\n cluster = options.cluster\n datacenter = get_datacenter(options)\n for item in datacenter.hostFolder.childEntity:\n if (item.name == cluster):\n return item._GetMoId()", "def test_create_hyperflex_cluster_network_policy(self):\n pass", "def get_cluster_entry(self):\n\n cert_data = self.cluster_description.get(\"certificateAuthority\", {}).get(\"data\", \"\")\n endpoint = self.cluster_description.get(\"endpoint\")\n arn = self.cluster_description.get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])", "def _newClusterId(self):\n return self.guidGenerator.new_id()", "def create_client(self) -> None:\n pass", "def run(ceph_cluster, **kw):\n config = kw.get(\"config\")\n\n node = ceph_cluster.get_nodes(role=\"mon\")[0]\n conf = {} # Fetching required data from the test config\n\n # Get trusted-ip details\n args = config.get(\"args\")\n trusted_ip_list = args.get(\"trusted_ip_list\")\n if trusted_ip_list:\n node_ips = get_nodes_by_ids(ceph_cluster, trusted_ip_list)\n conf[\"trusted_ip_list\"] = repr(\" \".join([node.ip_address for node in node_ips]))\n\n # Get placement details\n conf[\"pos_args\"] = config.get(\"pos_args\")\n placement = args.get(\"placement\")\n if placement:\n nodes = placement.get(\"nodes\")\n node_ips = get_nodes_by_ids(ceph_cluster, nodes)\n conf[\"placement=\"] = repr(\" \".join([node.hostname for node in node_ips]))\n\n # Deploy and Remove ISCSI 3 times\n for _ in range(3):\n log.info(\"Deploying ISCSI service\")\n CephAdm(node).ceph.orch.apply(service_name=\"iscsi\", **conf)\n\n # Verify if the service is deployed successfully\n if not _verify_service(node):\n raise IncorrectIscsiServiceStatus(\n \"Error - ISCSI service is not running after being deployed\"\n )\n log.info(\"ISCSI service is deployed successfully\")\n\n # Remove ISCSI service\n log.info(\"Removing ISCSI service\")\n CephAdm(node).ceph.orch.rm(service_name=\"iscsi.iscsi\")\n\n # Verify if iscsi is removed\n if not _verify_service(node, False):\n raise IncorrectIscsiServiceStatus(\"Error - ISCSI service is not removed.\")\n log.info(\"ISCSI service is removed successfully\")\n\n return 0", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def create_cluster(\n self,\n name: str,\n cluster_type: Union[dto.ClusterType, str],\n params: Mapping[str, Any],\n ssh_key: str\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def gen_heat_client(self):\n\n print \"\\t* Generating heat client\"\n # request a new auth token from keystone\n keystone = ksclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)\n auth_token = keystone.auth_token\n heat_url = 'http://%s:8004/v1/%s' % (self.ip, self.tenant_id)\n\n # instantiate client\n self.heatclient = hClient('1', endpoint=heat_url, token=auth_token)", "def create_redshift_cluster(redshift, roleArn):\n cluster = redshift.create_cluster(\n #Hardware provisioned\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n #Identifiers & Credentials\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n\n #Roles (for s3 access)\n IamRoles=[roleArn] \n )\n print(\"Creating Redshift cluster with\", DWH_NUM_NODES, \"nodes, on\", DWH_REGION)\n return cluster" ]
[ "0.6605776", "0.6418442", "0.6327758", "0.61983645", "0.61876357", "0.61836255", "0.6062055", "0.6059325", "0.60109323", "0.5957398", "0.5952814", "0.58882135", "0.58059293", "0.57884413", "0.5772146", "0.5749063", "0.57396287", "0.5687946", "0.5676263", "0.56761366", "0.56517947", "0.56498843", "0.56191844", "0.5596464", "0.5593158", "0.5583261", "0.5582657", "0.557475", "0.5559109", "0.55313635", "0.55311495", "0.5529833", "0.5494475", "0.54883665", "0.54865265", "0.5476241", "0.5466207", "0.5462615", "0.5454591", "0.54410565", "0.54395837", "0.5431271", "0.54097587", "0.54074097", "0.5401393", "0.5392626", "0.5384554", "0.53813463", "0.53757757", "0.5353397", "0.53456324", "0.5344403", "0.53436106", "0.5340729", "0.5335743", "0.53332454", "0.53288203", "0.53279155", "0.53132915", "0.53017884", "0.5298983", "0.5286369", "0.5255776", "0.5247458", "0.5240703", "0.5231888", "0.5226042", "0.5225875", "0.5213317", "0.52028966", "0.51939875", "0.5192034", "0.5190758", "0.5189118", "0.5184982", "0.5183178", "0.5180052", "0.5177015", "0.51749897", "0.51747364", "0.51696754", "0.5156008", "0.5155332", "0.5150695", "0.51501065", "0.5142359", "0.5138403", "0.51288474", "0.512799", "0.511762", "0.5088073", "0.5062476", "0.5053238", "0.50530106", "0.5040399", "0.5034526", "0.50243527", "0.50227225", "0.501978", "0.5016777" ]
0.6422216
1
execute a shell command in the cluster
def shell_command(command, shell=True): p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=shell) result = p.communicate()[0] if result == "command not known": LOGGER.info("command not known " + err) return result.strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(cmd) :\n return os.system( cmd )", "def do_shell(self, command):\n os.system(command)", "def raw_cluster_cmd(self, *args, **kwargs):\n proc = self.controller.run(args=[os.path.join(BIN_PREFIX, \"ceph\")] + \\\n list(args), **kwargs)\n return six.ensure_str(proc.stdout.getvalue())", "def shell_execute(self, cmd):\n self.log.debug(\"Executing command in shell: \" + str(cmd))\n\n dcos_config = os.path.expanduser('~/.dcos/dcos.toml')\n os.environ['PATH'] = ':'.join([os.getenv('PATH'), '/src/bin'])\n os.environ['DCOS_CONFIG'] = dcos_config\n os.makedirs(os.path.dirname(dcos_config), exist_ok=True)\n \n try:\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n output, errors = p.communicate()\n except OSError as e:\n self.log.error(\"Error executing command \" + str(cmd) + \". \" + e)\n raise e\n\n return output.decode(\"utf-8\"), errors.decode(\"utf-8\")", "def cmd(command):\n pflush(\"[%s]> %s\" % (HOSTNAME, command))\n code = os.system(command)\n if code != 0:\n raise RuntimeError(\"Error executing: \" + command)", "def execute_shell(self, cmd):\n try:\n return common.execute_shell(cmd, False)\n except Exception, e:\n raise exception.TermSaverException(help=_(\n\"\"\"Could not execute the command [%(cmd)s] properly.\n%(message)s \\nError details: %(error)s\"\"\") % {\n \"cmd\": \" \".join(cmd),\n \"message\": \"Make sure you have figlet installed!\",\n \"error\": str(e)\n }\n )", "def shell(command):\n log(\"Executing: \" + command)\n result = subprocess.call(command, shell=True, executable=\"/bin/bash\")\n if (result != 0):\n log(\"Execution failed (result=%d)\" % result)\n sys.exit()", "def run_shell(cmd: str):\n print_color(f\"** RUNNING: {cmd}\")\n os.system(cmd)", "def run_cmd(cmd):\n command = cmd.split(\" \")[0]\n if command == \"ls\":\n r = requests.get(url.format(cmd.split(\" \")[1], \"OPEN\", userName))\n print(r.json())\n elif command == 'put':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Append_to_a_File\n # this part usess system call to contact the remote\n # server first creating the file then append toit\n # Sample use\n # >>> PUT <file-name> <file-path>\n fileName = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-X', 'PUT', url.format(\n fileName, 'CREATE', userName)]\n subprocess.call(system_call)\n system_call = ['curl', '-i', '-X', 'POST', url.format(\n fileName, 'APPEND', userName)]\n subprocess.call(system_call)\n system_call = ['curl', '-i', '-X', 'POST', '-T', cmd.slpit(\" \")[2],\n url.format(fileName, 'APPEND', userName)]\n subprocess.call(system_call)\n\n elif command == 'get':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Open_and_Read_a_File\n # this part usess system call to contact the remote\n # to read from file\n # Sample use\n # >>> GET <file-path>\n fileName = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-L', url.format(\n fileName, 'OPEN', userName)]\n subprocess.call(system_call)\n elif command == 'mkdir':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Make_a_Directory\n # this part usess system call to contact the remote\n # to read from file\n # Sample use\n # >>> mkdir <folder-Path>\n folderPath = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-X', 'PUT', url.format(\n folderPath, 'MKDIRS', userName)]\n subprocess.call(system_call)\n elif command == 'rmdir':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Delete_a_FileDirectory\n # this part usess system call to contact the remote\n # to read from file\n # Sample use\n # >>> rmdir <file-path>\n folderPath = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-X', 'DELETE', url.format(\n folderPath, 'DELETE', userName)]\n subprocess.call(system_call)\n else:\n print 'Command is invalid.'", "def executor(host):\n try:\n exec_call(sssh(host))\n except Exception as msg:\n print('%s' % str(msg))\n pass", "def execute_command(self, command):\n return self.ssh.exec_command(command)", "def xdsh_execute(node, commands):\n xdsh_commands = 'command=%s' % commands\n # Add -q (quiet) option to ignore ssh warnings and banner msg\n opt = 'options=-q'\n body = [xdsh_commands, opt]\n url = XCATUrl().xdsh('/' + node)\n return xcat_request(\"PUT\", url, body)", "def shellcommand(command):\n\n subprocess.call(str(command))", "def run_command(self, command):\n subprocess.call(command, shell=True)", "def xdsh(node, commands):\n LOG.debug('Run command %(cmd)s on xCAT node %(node)s' %\n {'cmd': commands, 'node': node})\n\n def xdsh_execute(node, commands):\n \"\"\"Invoke xCAT REST API to execute command on node.\"\"\"\n xdsh_commands = 'command=%s' % commands\n # Add -q (quiet) option to ignore ssh warnings and banner msg\n opt = 'options=-q'\n body = [xdsh_commands, opt]\n url = XCATUrl().xdsh('/' + node)\n return xcat_request(\"PUT\", url, body)\n\n res_dict = xdsh_execute(node, commands)\n\n return res_dict", "def sh(cmd):\r\n return check_call(cmd, shell=True)", "def run_command(command):\n os.system('(echo {} | {})&'.format(command, SHELL))", "def nodes_run_cmd(self, group, cmd):\n path_inventory = u'%s/inventories/%s' % (self.ansible_path, self.environment)\n path_lib = u'%s/library/beehive/' % (self.ansible_path)\n runner = Runner(inventory=path_inventory, verbosity=self.verbosity, \n module=path_lib)\n tasks = [\n dict(action=dict(module=u'shell', args=cmd), register=u'shell_out'),\n ]\n runner.run_task(group, tasks=tasks, frmt=u'json')", "def run_command_node(*, user: str, host: str, identity_file: str, command: tuple):\n ssh_client = get_ssh_client(\n user=user,\n host=host,\n identity_file=identity_file)\n\n logger.info(\"[{h}] Running command...\".format(h=host))\n\n command_str = ' '.join(command)\n\n with ssh_client:\n ssh_check_output(\n client=ssh_client,\n command=command_str)\n\n logger.info(\"[{h}] Command complete.\".format(h=host))", "def cluster_run(self, cmd):\n instances = self.service.get_instances()\n responses = []\n for instance in instances:\n success, output = self.run_remote_script(cmd, instance=instance)\n responses.append((success, output))\n return responses", "def shell(self, cmd):\n raise NotImplementedError", "def shell(cmd, check=True):\n eprint(f\"+ {cmd}\")\n return run(cmd, shell=True, check=check)", "def sh(cmd):\n print 'CMD:', cmd\n return check_call(cmd, shell=True)", "def run_command(cmd):\n return subprocess.call(cmd, shell=True)", "def _run_shell(self, cmd):\n self._logger.info(\"Running command\\n{}\".format(\" \".join(cmd)))\n\n out = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n stdout, stderr = out.communicate()\n result = stdout.decode(encoding='utf-8')\n if stderr:\n error_msg = stderr.decode(encoding='utf-8')\n print(error_msg)\n raise Exception(error_msg)\n\n return result", "def run_cmd(cmd):\n logging.debug('Run command \"'+cmd+'\"')\n try:\n process = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n process.check_returncode()\n\n except Exception as e:\n logging.exception(str(e) +\"\\nCMD_SHELL : \"+cmd+\"\\nSTDOUT : \"+process.stdout.decode()+\"\\nSTDERR : \"+process.stderr.decode(), exc_info=True)\n #logging.critical(\"{CDM : \"+cmd+\", \"} : \"+cmd)\n #logging.critical(\"STDOUT : \"+process.stdout.decode())\n #logging.critical(\"STDERR : \"+process.stderr.decode())\n #raise e\n\n return process.stdout.decode()", "def command_thread():\n\n if GlusterCommand.targetNode is not \"localhost\":\n self.cmd += \" --remote-host=%s\" % GlusterCommand.targetNode\n\n self.cmdProcess = subprocess.Popen(self.cmd,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=self.env,\n preexec_fn=os.setsid)\n\n stdout, stderr = self.cmdProcess.communicate()\n self.stdout = stdout.split('\\n')[:-1]\n self.stderr = stderr.split('\\n')[:-1]", "def _execute_command(self, cmd):\n LOG.info(\"Executing: %s\" % cmd)\n status, stdout, stderr = self.client.execute(cmd)\n if status:\n raise RuntimeError(\"Failed executing command: \",\n cmd, stderr)\n return stdout", "def execute(\n cmd,\n showout=False,\n cwd=None,\n shell=\"/bin/bash\",\n timeout=600,\n asynchronous=False,\n env=None,\n replace_env=False,\n die=False,\n):\n return j.core.executors.run_local(\n cmd=cmd,\n hide=not showout,\n cwd=cwd,\n shell=shell,\n timeout=timeout,\n asynchronous=asynchronous,\n env=env or {},\n replace_env=replace_env,\n warn=not die,\n )", "def do_shell(self, line):\n os.system(line)", "def shell_cmd(self, cmd):\n cmd_ex = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n output = cmd_ex.communicate()[0]", "def do_shell(self, args):\n os.system(args)", "def do_shell(self, args):\n os.system(args)", "def ssh_call ( server, identity, cmd ) :\n print \"Running SSH command on server \" + server + \": \" + cmd\n return subprocess.call( [ \"ssh\",\n ssh_opt,\n \"-tt\",\n \"-i\",\n identity,\n \"ec2-user@\" + server,\n cmd ] )", "def shell(cmd):\n print('Running \"{}\"...'.format(cmd))\n subprocess.check_call(cmd, shell=True)", "def run(ceph_cluster, **kw):\n config = kw[\"config\"]\n\n build = config.get(\"build\", config.get(\"rhbuild\"))\n ceph_cluster.rhcs_version = build\n\n # Manage Ceph using ceph-admin orchestration\n command = config.pop(\"command\")\n log.info(\"Executing client %s\" % command)\n orch = Orch(cluster=ceph_cluster, **config)\n method = MAP_[command]\n method(orch, config)\n return 0", "def _exec_command_in_container(client, container, command):\n exec_id = client.exec_create(container, command)\n output = client.exec_start(exec_id).decode('utf-8')\n logger.info(output)\n return output", "def exec_cmd(command):\r\n global _verbose\r\n debug(\"Executing command: %s\" % command)\r\n if not _verbose:\r\n command = \"%s > /dev/null 2>&1\" % command\r\n resp = os.system(command)\r\n if resp != 0:\r\n exit(\"Command [%s] failed\" % command, resp)", "def run_cmd(cmd):\n cmdl = cmd.split(\" \")\n try:\n p = subprocess.Popen(cmdl, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n so, se = p.communicate()\n except subprocess.CalledProcessError, e:\n sys.stderr.write(\"Error encountered in running '\" + cmd +\n \"'. Return status is '\" + str(e.returncode) + \"'\\n\")\n sys.exit(1)\n except:\n sys.stderr.write(\"Unknown error encountered in running 'qhost -j -xml'.\\n\")\n sys.exit(1)\n return so", "def _subexec(command):\n lcwd = fabric.state.env.get('lcwd', None) or None #sets lcwd to None if it bools to false as well\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=lcwd)\n out, err = process.communicate()\n print \"command : %s \" % command\n print \"out: %s\" % out\n print \"err: %s\" % err", "def shell(ws, command):\n full_cmd = [\"bash\", \"-l\"]\n if command:\n full_cmd.extend([\"-c\", command])\n\n remove_container = ws.docker_run()\n\n rc = ws.docker_exec(full_cmd)\n\n if remove_container:\n ws.docker_remove()\n\n sys.exit(rc)", "def shell(cmd):\n return G.DEVICE.shell(cmd)", "def run_cmd(self, cmd):\r\n if 'shell_id' in dir(self):\r\n #checking for the shell_id created in winrm object\r\n command_id = self.conn.run_command(self.shell_id, cmd)\r\n std_out, std_err, status_code = self.conn.get_command_output(\r\n self.shell_id, command_id)\r\n #runs the command and returns output,error,statuscode\r\n return std_out, std_err, status_code", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "def _run_command(command):\n full_command = \"xcrun simctl %s\" % (command,)\n # Deliberately don't catch the exception - we want it to bubble up\n return subprocess.check_output(full_command, universal_newlines=True, shell=True)", "def run_hdfs_command(svc_name, command):\n full_command = 'docker run -e HDFS_SERVICE_NAME={} mesosphere/hdfs-client:2.6.4 /bin/bash -c \"/configure-hdfs.sh && {}\"'.format(svc_name, command)\n\n rc, output = shakedown.run_command_on_master(full_command)\n return rc, output", "def systemCommand(command):\n\n commStatus, commOut = commands.getstatusoutput(command)\n # If our command fails, abort entirely and notify CloudKick\n if commStatus != 0:\n sys.stderr.write('Error: Failure when executing the following ')\n sys.stderr.write(\"command: '%s'\\n\" % (command,))\n sys.stderr.write(\"Exit status: %d\\n\" % (commStatus,))\n sys.stderr.write(\"Output: %s\\n\\n\" % (commOut,))\n sys.stderr.write('status err System command failure: ')\n sys.stderr.write('%s\\n' % (command,))\n sys.exit(1)\n # If we get a 0 exit code, all is well. Return the data.\n else:\n return commOut", "def execute_cmd(client, server, msg):\n cmd = msg.strip().split(' ')[0]\n if cmd[0] == \".\":\n server.logger.info(\"BLACKLIST {} : {}\".format(client.ip, cmd))\n client.exit_status = 0\n return\n if cmd in SCRIPTED:\n server.logger.info(\"SCRIPTED CMD {} : {}\".format(client.ip, cmd))\n method = getattr(sys.modules[__name__], \"{}_cmd\".format(cmd))\n result = method(server, client, msg)\n elif cmd not in BLACK_LIST:\n server.logger.info(\"EXECUTING CMD {} : {}\".format(client.ip, cmd))\n response = client.run_in_container(msg)\n if \"exec failed\" not in response:\n if response == \"\\n\":\n return\n server.logger.info(\n \"RESPONSE {}: {}\".format(client.ip, response[:-1]))\n client.send(response)\n print(client.exit_status)\n else:\n not_found(client, server, cmd)", "def exec_(name, command, runas=None):\n # Construct argument list\n args = [salt.utils.data.decode(name)]\n args.extend(_normalize_args(command))\n\n # Execute command and return output\n return prlctl(\"exec\", args, runas=runas)", "def do_shell(self, command):\n proc = subprocess.Popen(command, stdout=self.stdout, shell=True)\n proc.communicate()", "def cmd(self, command):\n self.enode.get_shell('bash').send_command(command, matches=self.scapy_prompt)\n response = self.enode.get_shell('bash').get_response()\n return response", "def execute(cmd_string):\n pass", "def execute_frontend(self, cmd, verbose=True):\n return self.arangosh.run_command(cmd, verbose)", "def executeOnMaster(self, cmd):\n if self._hostnameResolves(self.getManagementEndpoint()):\n ssh = SSHClient()\n ssh.load_system_host_keys()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(\n self.getManagementEndpoint(),\n username = self.config.get('ACS', \"username\"),\n port = 2200,\n key_filename = os.path.expanduser(self.config.get('SSH', \"privatekey\")))\n session = ssh.get_transport().open_session()\n self.log.debug(\"Session opened on master.\")\n self.log.debug(\"Executing on master: \" + cmd)\n\n AgentRequestHandler(session)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n stdin.close()\n \n result = \"\"\n for line in stdout.read().splitlines():\n self.log.debug(line.decude(\"utf-8\"))\n result = result + line.decode(\"utf-8\") + \"\\n\"\n for line in stderr.read().splitlines():\n self.log.error(line.decode(\"utf-8\"))\n else:\n self.log.error(\"Endpoint \" + self.getManagementEndpoint() + \" does not exist, cannot SSH into it.\")\n result = \"Exception: No cluster is available at \" + self.getManagementEndpoint()\n ssh.close()\n return result", "def run(self):\n\n def command_thread():\n \"\"\" invoke subprocess to run the command \"\"\"\n\n if GlusterCommand.targetNode is not \"localhost\":\n self.cmd += \" --remote-host=%s\" % GlusterCommand.targetNode\n\n self.cmdProcess = subprocess.Popen(self.cmd,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=self.env,\n preexec_fn=os.setsid)\n\n stdout, stderr = self.cmdProcess.communicate()\n self.stdout = stdout.split('\\n')[:-1]\n self.stderr = stderr.split('\\n')[:-1]\n\n # use cfg.CMD_TIMEOUT value, to wait till user specified timeout.\n self.timeout = 10\n\n thread = threading.Thread(target=command_thread)\n thread.start()\n\n thread.join(self.timeout)\n\n if thread.is_alive():\n if cfg.debug:\n print ('Gluster_Command. Response from glusterd has exceeded %d secs timeout, terminating the request'\n % self.timeout)\n os.killpg(self.cmdProcess.pid, signal.SIGTERM)\n self.rc = -1\n\n else:\n # the thread completed normally\n if '--xml' in self.cmd:\n # set the rc based on the xml return code\n xmldoc = ETree.fromstring(''.join(self.stdout))\n self.rc = int(xmldoc.find('.//opRet').text)\n else:\n # set the rc based on the shell return code\n self.rc = self.cmdProcess.returncode", "def raw_cluster_cmd_result(self, *args, **kwargs):\n kwargs['check_status'] = False\n proc = self.controller.run([os.path.join(BIN_PREFIX, \"ceph\")] + list(args), **kwargs)\n return proc.exitstatus", "def do_shell(self, line):\n subprocess.call(line, shell=True)", "def cmd(self, shellcmd, *args, **kwargs):\n _cmd = shellcmd.format(*args, **kwargs)\n os.system(_cmd)", "def runCommand(command):\n None", "def execute(self):\n self.logger.info(\"Executing '{0}' on {1}\".format(self.command, self.salt_target))\n return self.salt_client.cmd(*self.executor_args, full_return=True)", "def run_command(command: str) -> str:\n path_command = f\"PATH={shell_path()} {command}\"\n status, output = getstatusoutput(path_command)\n if status == 0:\n return output\n raise ShellError(status, output)", "def run(self,command):\n #--------------------------------------------------------------------------\n res = subprocess.run(command,stdout=subprocess.DEVNULL,stderr=subprocess.STDOUT).returncode\n return res", "def run(cmd):\n print(cmd)\n r = os.system(cmd)\n if r:\n print(\"ERROR: command returned {0}\".format(r))\n sys.exit(r)", "def cli(ctx):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster subcommand called from cli\")", "def run(cmd: str) -> None:\n subprocess.run(cmd, shell=True, check=True)", "def run_shell_cmd(cmd, workingdir):\n click.echo(f\"run a shell command on cwd={workingdir}. \\ncmd=\\\"{' '.join(cmd)}\\\"\")\n proc = subprocess.Popen(cmd, cwd=workingdir, stdout=subprocess.PIPE)\n for line in io.TextIOWrapper(proc.stdout, encoding=\"utf-8\"):\n print(line.rstrip())", "def cli(ctx, user, workdir, shell, hostname):\n cmd = [\"cat /etc/motd && %s\" % shell]\n execute(ctx, user, hostname, cmd, workdir, tty=True)", "def run(self, command):\r\n boto.log.debug('running:%s on %s' % (command, self.server.instance_id))\r\n status = 0\r\n try:\r\n t = self._ssh_client.exec_command(command)\r\n except paramiko.SSHException:\r\n status = 1\r\n std_out = t[1].read()\r\n std_err = t[2].read()\r\n t[0].close()\r\n t[1].close()\r\n t[2].close()\r\n boto.log.debug('stdout: %s' % std_out)\r\n boto.log.debug('stderr: %s' % std_err)\r\n return (status, std_out, std_err)", "def shell_command(split_cmd, cwd=HERE):\n print(colored(f'Kör \"{\" \".join(split_cmd)}\"', 'blue', attrs=['bold']))\n try:\n sp.run(split_cmd, cwd=cwd, check=True)\n return True\n except sp.CalledProcessError:\n return False", "def _execute(self, cmd):\r\n stdout, stderr, return_code = self._remote_client.run_remote_cmd(cmd)\r\n if return_code:\r\n raise exceptions.ArgusError(\r\n \"Command {command!r} failed with \"\r\n \"return code {return_code!r}\"\r\n .format(command=cmd,\r\n return_code=return_code))\r\n return stdout, stderr", "def _ssh(self, command, use_pwd=True, use_tty=False, forward_x=False, verbose=False):\n if use_pwd:\n cd_cmd = 'cd cluster_test_%d; ' % self.address[1]\n else:\n cd_cmd = ''\n ssh = ['ssh',\n '-o', 'UserKnownHostsFile=/dev/null',\n '-o', 'StrictHostKeyChecking=no',\n '-o', 'IdentitiesOnly=yes']\n if self.key_file:\n ssh.extend(['-i', self.key_file])\n if use_tty:\n ssh.extend(['-t'])\n \n if forward_x:\n ssh.extend(['-Y'])\n \n ssh.extend([self.user_name + '@' + self.address[0], cd_cmd + command])\n \n if verbose: print(\" \".join(ssh))\n \n # Check whether ssh runs successfully.\n if subprocess.call(ssh) == 0:\n return True\n else:\n return False", "def cat_cmd(server, client, line):\n if len(line.split(' ')) > 1 and line.split(' ')[1] == \"/proc/mounts\":\n path = os.path.dirname(os.path.realpath(__file__))\n path = path[:-7] # shaves off /engine\n with open(\"{}/fakefiles/proc%mounts\".format(path), \"r\") as f:\n response = f.read()\n client.exit_status = 0\n else:\n response = client.run_in_container(line)\n client.send(response)", "def execute(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n return process.communicate()", "def run_command(command: str) -> str:\n full_command = f\"xcrun simctl {command}\"\n # Deliberately don't catch the exception - we want it to bubble up\n return subprocess.run(\n full_command,\n universal_newlines=True,\n shell=True,\n check=True,\n stdout=subprocess.PIPE,\n ).stdout", "def do_exec(cs, args):\n opts = {}\n opts['command'] = zun_utils.parse_command(args.command)\n if args.interactive:\n opts['interactive'] = True\n opts['run'] = False\n response = cs.containers.execute(args.container, **opts)\n if args.interactive:\n exec_id = response['exec_id']\n url = response['proxy_url']\n websocketclient.do_exec(cs, url, args.container, exec_id, \"~\", 0.5)\n else:\n output = response['output']\n exit_code = response['exit_code']\n print(output)\n return exit_code", "def system(self,cmd):\n code = 'import os;f=os.popen(\"%s\");res = f.read(-1);f.close();' % cmd\n return self.exec_code(code,returns=['res'])", "def run_shell():\n shell = get_shell()\n if shell not in ['bash','tcsh']:\n raise ValueError, \"Unsupported shell (only works with bash and tcsh)\"\n os.execvp(shell,(shell,\"-l\"))", "def run_cmd(cmd):\n print 'running: %s' % cmd\n return subprocess.call(cmd.split(), env=os.environ, shell=False)", "def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)", "def do_shell(command, context=None, **kwargs):\n logging.info(\"%s: executing %s\" % (context, command))\n\n child_env = {'CRANKD_CONTEXT': context}\n\n # We'll pull a subset of the available information in for shell scripts.\n # Anyone who needs more will probably want to write a Python handler\n # instead so they can reuse things like our logger & config info and avoid\n # ordeals like associative arrays in Bash\n for k in [ 'info', 'key' ]:\n if k in kwargs and kwargs[k]:\n child_env['CRANKD_%s' % k.upper()] = str(kwargs[k])\n\n if 'user_info' in kwargs:\n for k, v in kwargs['user_info'].items():\n child_env[create_env_name(k)] = str(v)\n\n try:\n rc = call(command, shell=True, env=child_env)\n if rc == 0:\n logging.debug(\"`%s` returned %d\" % (command, rc))\n elif rc < 0:\n logging.error(\"`%s` was terminated by signal %d\" % (command, -rc))\n else:\n logging.error(\"`%s` returned %d\" % (command, rc))\n except OSError, exc:\n logging.error(\"Got an exception when executing %s:\" % (command, exc))", "def execute_remote(ipaddr, command):\n\n\treturn execute(['ssh', '-f', '-t', '-oPasswordAuthentication=no',\n\t\t'-l', 'alt', ipaddr, command])", "def hadoop(self, command, *args, **kwargs):\n hadoop_cmd = \"-{}\".format(re.sub(\"^-*\", \"\", command))\n return self.exec(\"hadoop fs\", hadoop_cmd, *args, **kwargs)", "def ssh(host_=None):\n run_command_on_selected_server(open_shell, host_=host_)", "def do_shell(self, line):\n print 'Running shell command:', line\n output = os.popen(line).read()\n print output\n self.last_output = output", "def execute_remote_cmd(ip, user, cmd, timeout=10, suppress_output=False):\n cmd = \"ssh -o StrictHostKeyChecking=no %s@%s \\\"%s\\\"\" % (user, ip, cmd)\n l.info(\"Executing remote command [%s] on ip[%s], user[%s]\", cmd, ip, user)\n pg_cmd = PySysCommand(cmd)\n pg_cmd.run(timeout=timeout)\n output = pg_cmd.stdout + pg_cmd.stderr\n if not suppress_output:\n l.info(\"Result: %s\", output)\n return output", "def run_example_cluster_cmd(example_module_name, example_argv):\n run_example_cluster(example_module_name, example_argv)", "def executeCommand(command):\n time.sleep(1)\n #return os.system(command)\n subprocess.Popen(command, shell=True)", "def shell(cmd, check=True, stdin=None, stdout=None, stderr=None):\n return subprocess.run(cmd, shell=True, check=check, stdin=stdin, stdout=stdout, stderr=stderr)", "async def run_command(self, cmd: str) -> None:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n result = None\n # get/create ssh connection to miner\n conn = await self.get_connection(\"root\", \"admin\")\n # send the command and store the result\n for i in range(3):\n try:\n result = await conn.run(cmd)\n except:\n if i == 3:\n self.add_to_output(f\"Unknown error when running the command {cmd}...\")\n return\n pass\n # let the user know the result of the command\n if result is not None:\n if result.stdout != \"\":\n self.add_to_output(result.stdout)\n if result.stderr != \"\":\n self.add_to_output(\"ERROR: \" + result.stderr)\n elif result.stderr != \"\":\n self.add_to_output(\"ERROR: \" + result.stderr)\n else:\n self.add_to_output(cmd)", "def _run(\n name,\n cmd,\n output=None,\n no_start=False,\n preserve_state=True,\n stdin=None,\n python_shell=True,\n output_loglevel=\"debug\",\n use_vt=False,\n path=None,\n ignore_retcode=False,\n chroot_fallback=None,\n keep_env=\"http_proxy,https_proxy,no_proxy\",\n):\n orig_state = state(name, path=path)\n try:\n if attachable(name, path=path):\n ret = __salt__[\"container_resource.run\"](\n name,\n cmd,\n path=path,\n container_type=__virtualname__,\n exec_driver=EXEC_DRIVER,\n output=output,\n no_start=no_start,\n stdin=stdin,\n python_shell=python_shell,\n output_loglevel=output_loglevel,\n ignore_retcode=ignore_retcode,\n use_vt=use_vt,\n keep_env=keep_env,\n )\n else:\n if not chroot_fallback:\n raise CommandExecutionError(f\"{name} is not attachable.\")\n rootfs = info(name, path=path).get(\"rootfs\")\n # Set context var to make cmd.run_chroot run cmd.run instead of\n # cmd.run_all.\n __context__[\"cmd.run_chroot.func\"] = __salt__[\"cmd.run\"]\n ret = __salt__[\"cmd.run_chroot\"](\n rootfs,\n cmd,\n stdin=stdin,\n python_shell=python_shell,\n output_loglevel=output_loglevel,\n ignore_retcode=ignore_retcode,\n )\n finally:\n # Make sure we honor preserve_state, even if there was an exception\n new_state = state(name, path=path)\n if preserve_state:\n if orig_state == \"stopped\" and new_state != \"stopped\":\n stop(name, path=path)\n elif orig_state == \"frozen\" and new_state != \"frozen\":\n freeze(name, start=True, path=path)\n\n if output in (None, \"all\"):\n return ret\n else:\n return ret[output]", "def proc_exec(cmd):\n\n return envoy.run(cmd)", "def run_command(self, command, max_attempts=1):\n if self.remote is True:\n self.ssh_command(command, max_attempts)\n else:\n os.system(command)", "def run_command(self, command, timeout=None, stdout=True):\n print('Running \"{}\"...'.format(command))\n output = self._shell.run_command(\n command, timeout=timeout, async_=False\n )\n if stdout:\n print(output)\n print(\"Done!\")\n return output", "def run_cmd(cmd):\n return check_output(cmd, shell=True).decode('utf-8')", "def run_command(shell_command, get_output):\n command_ran = subprocess.run(shell_command, capture_output=get_output)\n return command_ran", "def Executingbysubprocess(command):\n result = subprocess.Popen(command, shell=True, stdout=PIPE).stdout\n output = result.read()\n print output", "def docker_exec(self, verbose=False):\n # command = '\\'/usr/bin/docker exec -it `/usr/bin/docker ps --filter \"name=ecs-{}*\" -q` bash \\''\n command = self.provider.get_docker_exec_sub_command()\n command = command.format(self.service.family)\n self.ssh(command, is_running=True, verbose=verbose)", "def run_cmd(context, exec_cmd, pty=True, hide=False, error_message=\"An unknown error has occurred!\"):\n print(f\"LOCAL - Running command {exec_cmd}\")\n result = context.run(exec_cmd, pty=pty, hide=hide, warn=True)\n if not result:\n print(f\"ERROR - {error_message}\\n{result.stdout if pty else result.stderr}\")\n raise invoke.exceptions.UnexpectedExit(result)\n\n return result", "def sys_exec(command):\n print('Running: {}'.format(command))\n return os.popen(command).read().rstrip()" ]
[ "0.682894", "0.6679226", "0.6656794", "0.66437536", "0.6632637", "0.6577042", "0.65581995", "0.65328985", "0.65173554", "0.65003866", "0.64953446", "0.6490907", "0.64615303", "0.64323294", "0.6411985", "0.63707674", "0.6368736", "0.63686305", "0.6345518", "0.631454", "0.6314375", "0.6282134", "0.6278847", "0.62581897", "0.6216123", "0.6201247", "0.6197205", "0.6197062", "0.6182353", "0.61761755", "0.6169814", "0.61689615", "0.61689615", "0.6166661", "0.6166559", "0.61455184", "0.6139774", "0.61176825", "0.6115738", "0.61150104", "0.61055595", "0.6102491", "0.60815305", "0.60655874", "0.60655874", "0.60655874", "0.60650814", "0.605797", "0.60523045", "0.6046314", "0.6030312", "0.6022324", "0.60194486", "0.6017009", "0.6011484", "0.599797", "0.5977871", "0.5976437", "0.59690094", "0.5963265", "0.59621173", "0.595285", "0.5952016", "0.5947605", "0.5930585", "0.5923416", "0.5920449", "0.59113944", "0.590465", "0.5904389", "0.588655", "0.5882522", "0.5878918", "0.5875218", "0.58741885", "0.58728516", "0.586572", "0.58652985", "0.5851413", "0.58468807", "0.5845965", "0.5834058", "0.5831136", "0.58234286", "0.5823115", "0.581302", "0.58119935", "0.58103365", "0.5805628", "0.58051157", "0.57977515", "0.5796124", "0.5778804", "0.57784367", "0.5771545", "0.57684904", "0.57647336", "0.5763647", "0.5760113", "0.57598746", "0.57584304" ]
0.0
-1
Gather overall cluster information
def get_ceph_info(handle, ceph_config, timeout): cluster = dict() cluster['status'] = ceph_mon_command(handle, 'status', timeout) cluster['version'] = shell_command('ceph -v') + b'\n' # ceph versions command was introduced in mimic version = cluster['version'] version = str(version.decode('utf-8')).split(' ')[2].split(".")[0] if int(version) >= 13: cluster['versions'] = shell_command('ceph versions') + b'\n' fsid = handle.get_fsid() + '\n' cluster['fsid'] = str.encode(fsid) with open(ceph_config, 'r') as f: ceph_conf = f.read() cephconf = str(ceph_conf) cluster['ceph_conf'] = str.encode(cephconf) return cluster
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def show_overview(self) -> None:\n print(f\"\\n\\nCluster overview:\")\n all_clusters = self.get_all_clusters()\n print(f\" - Total of {len(all_clusters)} clusters\")\n if all_clusters:\n cluster_lengths = [len(v) for v in all_clusters.values()]\n print(f\" - Average number of cluster-labels: {round(sum(cluster_lengths) / len(cluster_lengths), 2)}\")", "def _load_cluster(self):", "def _get_cluster_list(self):\n return self.__cluster_list", "def print_cluster_attributes(self, objects):\n print(\"\\n\")\n print((\"ClusterName\".ljust(35),\":\",objects.ClusterName.value()))\n print((\"Repository Disk\".ljust(35),\":\", \\\n objects.RepositoryDisk.PhysicalVolume[0].VolumeName.value()))\n print(\"\\nNodes in the cluster :\\n-----------------------\")\n for Node in objects.Node.Node :\n print((\"HostName\".ljust(35),\":\",\\\n Node.HostName.value()))\n print((\"PartitionID\".ljust(35),\":\", \\\n Node.PartitionID.value()))\n print()", "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def _get_cluster_components(self):\n print(\"Connecting to cluster...\")\n self.cluster.connect_to_cluster()\n print(\"Connected!\")\n print(\"Collecting information from the cluster...\")\n return self.cluster.get_components()", "def clusters(self):\n raise NotImplementedError", "def show_clusters(self):\n cluster_ids = [\n self.controller.cluster and self.controller.cluster['id']\n ]\n self.print_list(\n ('id', 'name', 'status'), self.controller.get_clusters(),\n lambda x: cluster_ids.index(x['id'])\n )", "def list_cluster_response():\n return {\n \"clusters\": [\n EXAMPLE_NAME\n ]\n }", "def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]", "def atlas_clusters():\n pass", "def get_cluster_entry(self):\n\n cert_data = self.cluster_description.get(\"certificateAuthority\", {}).get(\"data\", \"\")\n endpoint = self.cluster_description.get(\"endpoint\")\n arn = self.cluster_description.get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])", "def cluster(self):\n assert False", "def cluster(self):\n logger.debug(\"Beginning feature based clustering on %d clusters.\" % len(self.c2b))\n # Merge the two nearest clusters until we can't.\n #\n while self.mergeNearestClusters():\n pass\n logger.debug(\"After clustering, there are now %d clusters remaining.\" % len(self.c2b))\n return self.c2b.values()", "def print_cluster_summary(algo, i):\n assert algo in ['DBSCAN', 'KMeans', 'DBSCAN_topics', 'KMeans_topics']\n \n cluster_df = apps_df.copy()\n cluster_df = cluster_df[cluster_df[algo] == i]\n print('Cluster {} consists out of {} apps.'.format(str(i), str(cluster_df.shape[0])))\n titles = list(cluster_df['title'])\n print('The apps are:\\n {}'.format('\\n\\t'.join(titles)))", "def get_cluster_dstructure(self, curs, mcl_id, splat_table, mcl_table):\n\t\tno_of_total_genes = get_no_of_total_genes(curs)\n\t\tcluster = self.get_basic_cluster_dstructure(curs, mcl_id, splat_table, mcl_table)\n\t\tif cluster:\t#not None\n\t\t\tcluster.go_no2association_genes = self.get_go_functions_of_this_gene_set(curs, cluster.vertex_set)\n\t\t\tcluster.go_no2information = self.get_information_of_go_functions(curs, cluster.go_no2association_genes, \\\n\t\t\t\tlen(cluster.vertex_set), no_of_total_genes)\n\t\t\tcluster.edge_cor_2d_list, cluster.edge_sig_2d_list = self.get_cor_sig_2d_list(curs, cluster.edge_set)\n\t\t\t#graph = self.graph_from_node_edge_set(cluster.vertex_set, cluster.edge_set)\n\t\treturn cluster\n\t\t\n\t\t\"\"\"\n\t\tprint \"vertex_set\"\n\t\tprint cluster.vertex_set\n\t\tprint \"edge_set\"\n\t\tprint cluster.edge_set\n\t\trecurrence_list_2d = ['recurrence_array']+cluster.recurrence_array\n\t\trecurrence_list_2d_1 = ['recurrence_array_1']+cluster.recurrence_array\n\t\trecurrence_list_2d = [recurrence_list_2d, recurrence_list_2d_1]\n\t\tself.column_output('/tmp/yh/recurrence_array',recurrence_list_2d)\n\n\t\tprint cluster.splat_connectivity\n\t\tprint \"connectivity\"\n\t\tprint cluster.connectivity\n\t\tprint \"connectivity_original\"\n\t\tprint cluster.connectivity_original\n\t\tcor_list_2d = []\n\t\tsig_list_2d = []\n\t\tfor i in range(len(cluster.edge_set)):\n\t\t\tcor_list_2d.append([repr(cluster.edge_set[i])]+cluster.edge_cor_2d_list[i])\n\t\t\tsig_list_2d.append([repr(cluster.edge_set[i])]+cluster.edge_sig_2d_list[i])\n\t\tself.column_output('/tmp/yh/edge_cor_2d_list', cor_list_2d)\n\t\tself.column_output('/tmp/yh/edge_sig_2d_list', sig_list_2d)\n\n\t\tgo_no_list_2d = []\n\t\tfor go_no,information in cluster.go_no2information.iteritems():\n\t\t\tgo_no_list_2d.append(list(information)+[len(cluster.go_no2association_genes[go_no])])\n\t\t#self.column_output('/tmp/yh/go_no_list_2d', go_no_list_2d)\n\t\t\"\"\"", "def show_clusters() -> Dict[str, Cluster]:\n environment = EnvironmentProvider().environment\n return {key: value for key, value in environment.clusters.items()}", "def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()", "def cluster_list():\n request_debug(r, logger)\n json_body = r.get_json(force=True, silent=True) or {}\n result = cluster_handler.list(filter_data=json_body)\n response_ok[\"data\"] = result\n return jsonify(response_ok), CODE_OK", "def cluster_info(self) -> ClusterInfoResult:\n if not self.connected:\n raise RuntimeError(\n \"Cluster is not connected, cannot get cluster info.\")\n cluster_info = None\n cluster_info = self._get_cluster_info()\n self._cluster_info = cluster_info\n return cluster_info", "def describe(self) -> None:\n return {\n 'cluster_metadata': self.cluster_metadata,\n 'master_url': self.master_url\n }", "def find_clusters():\n clusters = ecs_client.list_clusters()['clusterArns']\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n logging.debug(\"Retrieved %i clusters\" % (len(clusters)))\n for cluster in clusters:\n ratio = SequenceMatcher(\n lambda item:\n item == \" \",\n \"arn:aws:ecs:us-east-1*cluster/default\",\n cluster\n ).ratio()\n if ratio < 0.82:\n cluster_short = cluster.split(\"/\")[1]\n if args.cluster and cluster_short != args.cluster:\n continue\n ecs_data[cluster_short] = {}\n logging.debug(\"Cluster: %s\" % (cluster))\n instance_arns = ecs_client.list_container_instances(\n cluster=cluster\n )['containerInstanceArns']\n instances = ecs_client.describe_container_instances(\n cluster=cluster,\n containerInstances=instance_arns\n )['containerInstances']\n logging.debug(\"Retrieved %i cluster instances\" % (len(instances)))\n for instance in instances:\n ecs_data[cluster_short][instance['ec2InstanceId']] = {\n 'instance_id': instance['ec2InstanceId'],\n 'cluster': cluster_short,\n 'containers': []\n }\n logging.debug(\"\\tLooking for tasks in (%s): %s %s\" % (instance_data[instance['ec2InstanceId']]['name'], instance_data[instance['ec2InstanceId']]['id'], instance['containerInstanceArn']))\n tasks = ecs_client.list_tasks(\n cluster=cluster,\n containerInstance=instance['containerInstanceArn'],\n )['taskArns']\n logging.debug(\"Retrieved %i cluster tasks\" % (len(tasks)))\n for task in tasks:\n containers = ecs_client.describe_tasks(\n cluster=cluster,\n tasks=[task]\n )['tasks']\n for container in containers:\n if args.action != \"list\":\n if container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0] == args.task:\n if args.action == \"ssh\":\n if args.random:\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"sshing to %s\" % (instance['ec2InstanceId']))\n print('*** Initiating Host Interactive Session\\n')\n interactive().connect(instance_data[instance['ec2InstanceId']]['private_ip'],'')\n sys.exit(0)\n if args.action == \"enter\":\n if args.random:\n logging.debug(\"Recording host %s for random selection\" % (instance['ec2InstanceId']))\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"connect to %s -> %s\" % (instance['ec2InstanceId'],container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0]))\n print '*** Initiating Container Interactive Session\\n'\n interactive().docker_enter(args.user, instance_data[instance['ec2InstanceId']]['private_ip'],args.task)\n sys.exit(0)\n if args.action == \"list\":\n logging.debug(\"%s matched arg(%s): %s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.action, instance['ec2InstanceId']))\n ecs_data[cluster_short][instance['ec2InstanceId']]['containers'].append(container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0])\n # logging.info(\"%s:%s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.task))\n return True", "def get_clusters(self):\n fields = ['name', ]\n return self.get_data(\"clusters\", fields)", "def __str__(self):\n return \"Clustering\"", "def __str__(self):\n return \"Cluster\"", "def cluster(self):\n return self._cluster", "def cluster(self):\n return self._cluster", "def print_cluster(self):\n print('Cluster', self.number)\n for pattern in self.patterns:\n pattern.print_pattern()", "def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "async def getAll():\n return [cluster.export() for cluster in clusters.get_all()]", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def cluster_info(self, target_nodes: Optional[\"TargetNodesT\"] = None) -> ResponseT:\n return self.execute_command(\"CLUSTER INFO\", target_nodes=target_nodes)", "def clusters(self,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n project_id = project_id if project_id != '' else self.__project_id\n return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))", "def prepare_statistics(self):\n\n # statistics of clustering files\n len0 = len(self.cluster_lists[0])\n len1 = len(self.cluster_lists[1])\n longer_index = 0 if len0 >= len1 else 1\n shorter_index = 1 if len1 <= len0 else 0\n\n percentage_stars = \"%.2f\" % (100.0 * float(self.shared_spec_num)/float(self.cluster_spectra_num[shorter_index]))\n percentage_starlets = \"%.2f\" % (100.0 * float(self.shared_spec_num)/float(self.cluster_spectra_num[longer_index]))\n\n head = \"{0:<25}{1:<20}{2:<20}\\n\".format(\"name\", \"number\", \"description\")\n rows = \"\"\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"stars No.\", self.stars_length, \"in file with less(or equal) clusters: file\" + str(shorter_index))\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"starlets No.\", self.starlets_length, \"in file with more(or equal) clusters: file\" + str(longer_index))\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"identical cluster No.\", self.similarity_dist[10], \"between them\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"spectrum No\", self.cluster_spectra_num[shorter_index], \"in stars\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"spectrum No\", self.cluster_spectra_num[longer_index], \"in starlets \")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum No\", self.shared_spec_num, \"between them\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum percent\", percentage_stars, \"in stars\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum percent\", percentage_starlets, \"in starlets\")\n self.tables.append(('statistics of files', head, rows))\n\n # distribution of cluster size in stars\n head = '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(\"cluster size\",\"No.\", \"percentage\", \"accumulate pecentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_star_size), \"average\")\n accumulate_num = 0\n for key in sorted(self.cluster_size_dist[shorter_index].keys()):\n value = self.cluster_size_dist[shorter_index][key]\n accumulate_num += value\n percent = \"%.2f\" % (100 * value/self.stars_length)\n accum_percent = \"%.2f\" % (100 * accumulate_num/self.stars_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent, accum_percent)\n self.tables.append(('distribution of cluster size in stars', head, rows))\n \n head = '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(\"cluster size\",\"No.\", \"percentage\", \"accumulate pecentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_starlet_size), \"average\")\n accumulate_num = 0\n for key in sorted(self.cluster_size_dist[longer_index].keys()):\n value = self.cluster_size_dist[longer_index][key]\n accumulate_num += value\n percent = \"%.2f\" % (100 * value/self.starlets_length)\n accum_percent = \"%.2f\" % (100 * accumulate_num/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent, accum_percent)\n self.tables.append(('distribution of cluster size in starlets', head, rows))\n\n # distribution of similarity\n head = \"{0:<20}{1:<20}{2:<20}{3:<20}\\n\".format(\"similarity score\", \"pairs of clusters\", \"percentage(stars)\", \"percentage(starlets)\")\n rows = \"\"\n for key in reversed(sorted(self.similarity_dist.keys())):\n value = self.similarity_dist[key]\n percent_star = \"%.2f\" % (100.0*value/self.stars_length)\n percent_starlet = \"%.2f\" % (100.0*value/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent_star, percent_starlet)\n self.tables.append(('distribution of similarity (identical = 10)', head, rows))\n\n # distribution of star divide factors\n head = '{0:<20}{1:<20}{2:<20}\\n'.format(\"divide factor\",\"No.\",\"percentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_divide_factor_star), \"average\")\n for key in sorted(self.star_divide_factor_dist.keys()):\n value = self.star_divide_factor_dist[key]\n percent_star = \"%.2f\" % (100.0*value/self.stars_length)\n rows += '{0:<20}{1:<20}{2:<20}\\n'.format(key, value, percent_star)\n self.tables.append(('distribution of star divide factors', head, rows))\n\n # distribution of starlet divide factors\n head = '{0:<20}{1:<20}{2:<20}\\n'.format(\"divide factor\",\"No.\",\"percentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_divide_factor_starlet), \"average\")\n for key in sorted(self.starlet_divide_factor_dist.keys()):\n value = self.starlet_divide_factor_dist[key]\n percent_starlet = \"%.2f\" % (100.0*value/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}\\n'.format(key, value, percent_starlet)\n self.tables.append(('distribution of starlet divide factors', head, rows))", "def cluster_nodes(self) -> ResponseT:\n return self.execute_command(\"CLUSTER NODES\")", "def clustering(self) -> 'outputs.ClusteringResponse':\n return pulumi.get(self, \"clustering\")", "def cluster(self):\n\t\tself.index[\"cluster\"] = {}\n\n\t\tfor item in self.index[\"items\"]:\n\t\t\tself.index[\"cluster\"][item] = [{\"weight\" : float(len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))))/float(len(self.index[\"items\"][item])) , \"name\" : id, \"authority\" : set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id])) } for id in self.index[\"items\"] if id != item and len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))) >= 1]\n\n\t\treturn self.index", "def cluster(self):\r\n\t\tself.clusterer.fit(self.koopman_feature_array)\r\n\t\tself.labels = self.clusterer.labels_\r\n\t\tfor j in range(max(self.labels)+1):\r\n\t\t\tself.koop_cluster_list.append([self.koop_list[i] for i in range(len(self.labels)) if self.labels[i] == j])\r\n\t\t\tself.koop_cluster_memb_prob_list.append([self.clusterer.probabilities_[i] for i in range(len(self.labels)) if self.labels[i] == j])", "def test_get_hyperflex_cluster_list(self):\n pass", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp", "def get_clusters(self):\r\n\r\n return self.__clusters", "def get_clusters(ensemble, grouping, clustering):\n\n\t# Prevent SQL injected since column names cannot be parameterized.\n\tif \";\" in ensemble or \";\" in grouping or \";\" in clustering:\n\t\treturn None\n\n\tensemble = ensemble.replace('EnsEns','Ens')\n\tdf = None;\n\n\tif grouping in ['annotation','cluster']:\n\t\tgroupingu = ensemble+\".\"+grouping+\"_\"+clustering\n\telif grouping in ['NeuN']:\n\t\tgroupingu = \"CONCAT('NeuN',cells.\"+grouping+\")\"\n\telse:\n\t\tgroupingu = \"cells.\"+grouping\n\n\t# Get methylation info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snmC' as modality, \\\n\t\t%(groupingu)s as groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'groupingu': groupingu,\n\t\t\t\t\t'clustering': clustering}\n\ttry:\n\t\tdf = pd.read_sql(query, db.get_engine(current_app, 'methylation_data'))\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\t\t# return None\n\n\t# Get snATAC info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snATAC' AS modality, %(ensemble)s.cluster_ATAC groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_atac = pd.read_sql(query, db.get_engine(current_app, 'snATAC_data'))\n\t\tdf=df.append(df_atac)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\n\t# Get snRNA info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'RNA' AS modality, %(ensemble)s.cluster_RNA groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_rna = pd.read_sql(query, db.get_engine(current_app, 'RNA_data'))\n\t\tdf=df.append(df_rna)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\treturn df", "def Clusters(self):\n return", "def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )", "def assemble_bicluster_info_single( self, db, db_file, cursor, iteration, cluster, run2id, row2id, col2id ):\n\t\t#print cluster\n\t\trun_name = db_file.split(\"/\")[-2]\n\t \tw = (cluster,iteration)\n\t \tcursor.execute(\"SELECT residual FROM cluster_stats WHERE cluster = ? AND iteration = ?;\", w )\n\t \tresidual = cursor.fetchone()[0]\n\t \tcursor.execute(\"SELECT name FROM row_members JOIN row_names ON row_members.order_num = row_names.order_num WHERE row_members.cluster = ? AND row_members.iteration = ?;\", w )\n\t \trows = [ row2id.loc[ str(i[0]) ].row_id for i in cursor.fetchall() ]\n\t \tcursor.execute(\"SELECT name FROM column_members JOIN column_names ON column_members.order_num = column_names.order_num WHERE column_members.cluster = ? AND column_members.iteration = ?;\", w )\n\t \tcols = [ col2id.loc[ str(i[0]) ].col_id for i in cursor.fetchall() ]\n\t \n\t \td = {\n\t \t\"run_id\": run2id.loc[run_name].run_id,\n\t \t\"cluster\": cluster,\n\t \t\"rows\": rows,\n\t \t\"columns\": cols,\n\t \t\"residual\": residual,\n\t \t}\n\n\t \treturn d", "def get_clusters(cluster_path): #{{{\n print 'loading cluster info'\n indicesToParticle = pickle.load(open(cluster_path+\"/verticesToParticle.p\",\"rb\"))\n indicesOnCluster = pickle.load(open(cluster_path+\"/verticesOnCell.p\",\"rb\"))\n maxIndices = pickle.load(open(cluster_path+\"/maxVertices.p\",\"rb\"))\n print 'done'\n\n return indicesToParticle, indicesOnCluster, maxIndices #}}}", "def _init_cluster(self):\n self._Init_Cluster()", "def clusters(self, *args, **kwargs):\n\n result, name = is_file(kwargs.get('value')[0])\n if result:\n jdata = load_file(name)\n dump = False\n else:\n url = self.base.format('file/clusters')\n if by_id:\n self.params['query'] = 'cluster:{0}'.format(kwargs.get('value')[0])\n else:\n self.params['date'] = name\n jdata, response = get_response(url, apikey=self.apikey, params=self.params)\n\n if kwargs.get('return_raw'):\n return jdata\n\n if _check_error(jdata):\n return\n\n simple_list = (\n 'size_top200',\n 'num_clusters',\n )\n\n self.simple_print(jdata, simple_list, indent='\\n\\t')\n for key in simple_list:\n if jdata.get(key):\n self.print_key(key, indent='\\n\\t')\n print('\\n\\t', jdata.get(key))\n\n if jdata.get('clusters'):\n plist = [[]]\n for line in jdata['clusters']:\n plist.append(\n [line['label'], line['avg_positives'], line['id'], line['size']])\n\n pretty_print_special(\n plist,\n ['Label', 'AV Detections', 'Id', 'Size'],\n [40, 15, 80, 8],\n ['l', 'c', 'l', 'c'],\n kwargs.get('email_template')\n )\n\n if dump:\n jsondump(jdata, 'clusters_{0}'.format(name))", "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def createcluster(self):\n for hostitem in OTHER_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n alive = str(REMAINING_NODES)[1:-1]\n print \"{}\\nThe following nodes are alive in cluster:{}\\n {}\".format(\n RED, WHITE, alive)\n print \"\\n\\nTo boostrap a new cluster you need to switch them off\\n\"\n os.sys.exit(1)\n else:\n if self.mode == \"new\" and not self.force:\n ask('\\nThis operation will destroy the local data')\n clean_dir(self.datadir)\n initialize_mysql(self.datadir)\n bootstrap_mysql(self.mode)\n if self.mode == \"new\":\n create_monitor_table()\n ALL_NODES.append(\"localhost\")\n for creditem in CREDENTIALS:\n create_users(creditem)\n print \"\"\n drop_anonymous()", "def all_cluster_summary_fn(self):\n return op.join(self.combined_dir, 'all.cluster_summary.json')", "def get_cluster_conf(self):\n # Load cluster configuration file\n fpath = Path(self.cmd_opts.get(CLI_CMDOPT.DCOS_CLUSTERCFGPATH))\n\n # Unblock irrelevant local operations\n if str(fpath) == 'NOP':\n self.cluster_conf_nop = True\n LOG.info(f'{self.msg_src}: cluster_conf: NOP')\n return {}\n\n if not fpath.is_absolute():\n if self.inst_storage.cfg_dpath.exists():\n fpath = self.inst_storage.cfg_dpath.joinpath(fpath)\n else:\n fpath = Path('.').resolve().joinpath(fpath)\n\n cluster_conf = cr_utl.rc_load_ini(\n fpath, emheading='Cluster setup descriptor'\n )\n\n # CLI options take precedence, if any.\n # list(tuple('ipaddr', 'port'))\n cli_master_priv_ipaddrs = [\n ipaddr.partition(':')[::2] for ipaddr in\n self.cmd_opts.get(CLI_CMDOPT.MASTER_PRIVIPADDR, '').split(' ') if\n ipaddr != ''\n ]\n mnode_sects = [\n sect for sect in cluster_conf if sect.startswith('master-node')\n ]\n # iterator(tuple('ipaddr', 'port'), str)\n change_map = zip(cli_master_priv_ipaddrs, mnode_sects)\n for item in change_map:\n if item[0][0]:\n cluster_conf[item[1]]['privateipaddr'] = item[0][0]\n if item[0][1]:\n try:\n port = int(item[0][1])\n except (ValueError, TypeError):\n port = cm_const.ZK_CLIENTPORT_DFT\n port = (port if 0 < port < 65536 else\n cm_const.ZK_CLIENTPORT_DFT)\n cluster_conf[item[1]]['zookeeperclientport'] = port\n\n # Add extra 'master-node' sections, if CLI provides extra arguments\n extra_cli_items = cli_master_priv_ipaddrs[len(mnode_sects):]\n for n, item in enumerate(extra_cli_items):\n if item[0]:\n # TODO: Implement collision tolerance for section names.\n cluster_conf[f'master-node-extra{n}'] = {}\n cluster_conf[f'master-node-extra{n}']['privateipaddr'] = (\n item[0]\n )\n if item[1]:\n try:\n port = int(item[1])\n except (ValueError, TypeError):\n port = cm_const.ZK_CLIENTPORT_DFT\n port = (port if 0 < port < 65536 else\n cm_const.ZK_CLIENTPORT_DFT)\n cluster_conf[f'master-node-extra{n}'][\n 'zookeeperclientport'\n ] = port\n # DC/OS storage distribution parameters\n cli_dstor_url = self.cmd_opts.get(CLI_CMDOPT.DSTOR_URL)\n cli_dstor_pkgrepo_path = self.cmd_opts.get(\n CLI_CMDOPT.DSTOR_PKGREPOPATH\n )\n cli_dstor_pkglist_path = self.cmd_opts.get(\n CLI_CMDOPT.DSTOR_PKGLISTPATH\n )\n cli_dstor_dcoscfg_path = self.cmd_opts.get(\n CLI_CMDOPT.DSTOR_DCOSCFGPATH\n )\n if not cluster_conf.get('distribution-storage'):\n cluster_conf['distribution-storage'] = {}\n\n if cli_dstor_url:\n cluster_conf['distribution-storage']['rooturl'] = cli_dstor_url\n if cli_dstor_pkgrepo_path:\n cluster_conf['distribution-storage']['pkgrepopath'] = (\n cli_dstor_pkgrepo_path\n )\n if cli_dstor_pkglist_path:\n cluster_conf['distribution-storage']['pkglistpath'] = (\n cli_dstor_pkglist_path\n )\n if cli_dstor_dcoscfg_path:\n cluster_conf['distribution-storage']['dcoscfgpath'] = (\n cli_dstor_dcoscfg_path\n )\n\n # Local parameters of DC/OS node\n cli_local_priv_ipaddr = self.cmd_opts.get(CLI_CMDOPT.LOCAL_PRIVIPADDR)\n if not cluster_conf.get('local'):\n cluster_conf['local'] = {}\n\n if cli_local_priv_ipaddr:\n cluster_conf['local']['privateipaddr'] = cli_local_priv_ipaddr\n\n return cluster_conf", "def cluster():\n try:\n cf = sys.argv[1]\n except IndexError:\n raise ConfigError('Please provide a configuration file.')\n\n with open(cf, 'r') as stream:\n data = load(stream, Loader=Loader)\n dbname = (data['setup'])['database name']\n dbusr = (data['setup'])['database user']\n \n try:\n conn = psycopg2.connect(host='localhost', database=dbname, user=dbusr)\n except:\n raise ConfigError('Could not connect to database.')\n \n try:\n cur = conn.cursor()\n cur.execute(\n 'CLUSTER detected_source_q3c_ang2ipix_idx ON detected_source;')\n cur.execute('ANALYZE detected_source;')\n cur.execute('CLUSTER assoc_source_q3c_ang2ipix_idx ON assoc_source;')\n cur.execute('ANALYZE assoc_source;')\n cur.close()\n print('\\ndetected_source and assoc_source tables successfully '\n 'clustered and analyzed.')\n except:\n raise ConfigError('Tables could not be clustered.')", "def describe_cluster_response():\n return {\n \"cluster\": {\n \"status\": \"ACTIVE\",\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def list_cluster(self, ip, x_api_session):\n log.log_debug(\"cluster object list is started\")\n list_object = ListModule.ListModule()\n object_list = list_object.listing(\"uom\", ip,\n self.root, self.content_type,\n \"Cluster\", x_api_session)\n log.log_debug(\"cluster object list is returned\")\n return object_list", "def cluster(self):\n print(\"Calculating distances\")\n self.all_distances()\n\n print(\"Start making sets\")\n clusters = self.clusters\n\n # Generates a set with neighbours for each point\n for row in self.distances:\n clusters.append(set(np.where(row < self.distance_threshold)[0].tolist()))\n\n print(\"Merging sets\")\n for cluster1 in range(self.point_count):\n for cluster2 in range(self.point_count):\n if clusters[cluster2] is not None and clusters[cluster1] is not None:\n if not clusters[cluster1].isdisjoint(clusters[cluster2]) and cluster1 != cluster2:\n clusters[cluster1].update(clusters[cluster2])\n clusters[cluster2] = None\n # Deletes empty clusters\n clusters = [points for points in clusters if points is not None]\n # Sorts clusters by their size\n clusters.sort(key=len, reverse=True)\n # Builds main set\n for point_set in clusters[0:self.cluster_count_threshold]:\n self.main_cluster.update(point_set)\n\n self.main_cluster = list(self.main_cluster)\n self.clusters = clusters", "def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")", "def print_cluster(self, cluster, value):\n total = 0\n ham = 0\n spam = 0\n for message in cluster:\n if self.spamorham[self.ids[message]] == 'ham':\n ham += 1\n elif self.spamorham[self.ids[message]] == 'spam':\n spam += 1\n else:\n print(\"ERROR!\")\n total += 1\n\n print(\"Total number of messages in the {0} cluster: {1}\\n\"\n \"Percentage of SPAM messages in the {2} cluster: {3}\\n\"\n \"Percentage of HAM messages in the {4} cluster: {5}\".format(value, total, value,\n str((float(spam) / total) * 100), value,\n str((float(ham) / total) * 100)))", "def getClusterStatus(self):\n data = self.connect('get','cluster/status', None)\n return data", "def info(self):\n\t\ttry:\n\t\t\tprint(\"DBSCAN was successfully achieved by building {} cluster(s) and by identifying {} elements as noise.\\nIf you are not happy with the result, do not forget to normalise the data before applying DBSCAN. As this algorithm is using the p-distance, it is really sensible to the data distribution.\".format(self.n_cluster, self.n_noise))\n\t\texcept:\n\t\t\tprint(\"Please use the 'fit' method to start the algorithm.\")", "def __init__(self):\n self._predefined_cluster_topics()\n self._gatherSEs()", "def cluster(self, verbose=0, sum_ess=False):\n ## if sum_ess and self.linkage.__name__ != \"ward_link\":\n ## raise ValueError(\n ## \"Summing for method other than Ward makes no sense...\")\n clusters = copy.copy(self._dist_matrix)\n #clusters = self._dist_matrix\n summed_ess = 0.0\n\n while len(clusters) > max(self._num_clusters, 1):\n if verbose >= 1:\n print('k=%s' % len(clusters))\n if verbose == 2:\n print(clusters)\n\n best, i, j = self.smallest_distance(clusters)\n # In Ward (1963) ess is summed at each iteration\n # in R's hclust and Python's hcluster and some text books it is not.\n # Here it is optional...\n if sum_ess:\n summed_ess += best\n else:\n summed_ess = best\n clusters = self.update_distmatrix(i, j, clusters)\n self._dendrogram.merge(i,j)\n self._dendrogram[i].distance = summed_ess\n indices = numpy.arange(clusters.shape[0])\n indices = indices[indices!=j]\n clusters = clusters.take(indices, axis=0).take(indices, axis=1)", "def resume_cluster(self):\n self.log.info(\"Loading info from the IaaS\")\n if not isfile(self.save_file):\n self.log.info(\"No existing created cluster\")\n saved_nodes = []\n else:\n saved_cluster = loads(open(self.save_file, 'r').read())\n saved_nodes = saved_cluster['clients']\n\n in_nodes = Node.get_all_nodes(check_active=True)\n for n in in_nodes:\n if n.name not in saved_nodes:\n if \"orchestrator\" in n.name:\n global orchestrator\n orchestrator = n\n self.log.debug('Found orchestrator %s' % n.name)\n continue\n else:\n self.all_nodes.append(n)\n #sort nodes by name\n self.all_nodes.sort(key=lambda x: x.name)", "def enumerate_clusterings(self):\n\n # Initialize an empty list of clusterings. Each element of the list\n # is a dictionary mapping NOEs to the signatures they are clustered to\n # in a solution. Each clustering is initialize with all uniquely\n # clusterable NOEs as keys mapping to their unique clusters\n\n clusterings = []\n\n while True:\n\n # Run the solver and get a solution back\n\n solution = self.solve()\n\n # If UNSAT, then flush aux clauses from the formula and return\n # all the clusterings we found so far\n\n if not solution:\n self.flush()\n return clusterings\n\n # Iterate over the clustering variables set to true by in the\n # discovered solution. Forbid this clustering from reoccuring and\n # add it to the list of found clusterings\n\n clause = []\n clustering = {}\n for node in self.clustering_variables.keys():\n if len(node.clusters) == 1:\n clustering[node] = list(node.clusters)[0]\n\n for vtype, node, cluster in solution:\n if vtype == Formula.CST_VAR:\n clustering[node] = cluster\n clause.append(-self.clustering_variables[node][cluster])\n\n self.add_clause(clause)\n clusterings.append(clustering)", "def cluster_description(self):\n if self._cluster_description is None:\n if self._parsed_globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._parsed_globals.region,\n endpoint_url=self._parsed_globals.endpoint_url,\n verify=self._parsed_globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] not in [\"ACTIVE\", \"UPDATING\"]:\n raise EKSClusterError(\"Cluster status is {0}\".format(\n self._cluster_description[\"status\"]\n ))\n\n return self._cluster_description", "def describe_cluster_no_status_response():\n return {\n \"cluster\": {\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def cluster_health(self, host):\n\n h = self.call_to_cluster(host, '/_cluster/health')\n\n data = {\n 'number_of_nodes': h['number_of_nodes'],\n 'unassigned_shards': h['unassigned_shards'],\n 'timed_out': h['timed_out'],\n 'active_primary_shards': h['active_primary_shards'],\n 'relocating_shards': h['relocating_shards'],\n 'active_shards': h['active_shards'],\n 'initializing_shards': h['initializing_shards'],\n 'number_of_data_nodes': h['number_of_data_nodes']\n }\n\n return data", "def getClusterSetup(self):\n data = {}\n data[\"parameters\"] = self.config.getACSParams()\n \n fqdn = {}\n fqdn[\"master\"] = self.getManagementEndpoint()\n fqdn[\"agent\"] = self.getAgentEndpoint()\n data[\"domains\"] = fqdn\n \n data[\"sshTunnel\"] = \"ssh -o StrictHostKeyChecking=no -L 80:localhost:80 -N \" + self.config.get('ACS', 'username') + \"@\" + self.getManagementEndpoint() + \" -p 2200\"\n\n azure = {}\n azure['resourceGroup'] = self.config.get('Group', 'name')\n data[\"azure\"] = azure\n\n return data", "def get_all_clusters(self) -> Dict[str, List[str]]:\n result = {}\n for c_id in set(self._clusters.values()):\n result[c_id] = self.get_cluster_by_id(c_id)\n return result", "def test_get_hyperflex_cluster_profile_list(self):\n pass", "def _predefined_cluster_topics(self):\n\n self.clusters = ['Doesnt fit',\n 'Weight changes', \n 'Mood and behavioral changes', \n 'Vision changes',\n 'Headaches',\n 'Body aches and pain',\n 'Memory and concentration issues',\n 'Menstrual changes',\n 'Sleep issues and drowsiness',\n 'Balance, coordination, and muscle control',\n 'Dizziness and fainting',\n 'Stomach issues',\n 'Intestinal issues',\n 'Skin issues',\n 'Dry mouth and changes in taste',\n 'Blood sugar changes',\n 'Hair loss and abnormal growth',\n 'Changes in libido and sexual performance',\n 'Changes in energy',\n 'Sweating and temperature control issues',\n 'Eye itching or sensitivity changes',\n 'Blood pressure and heart rate changes',\n 'Changes in appetite',\n 'Urinary changes',\n 'Kidney issues',\n 'Hearing issues',\n 'Respiratory issues and coughing',\n 'Salivary issues',\n 'Breast growth and swelling (all genders)',\n 'Dental issues']", "def list_ecs_clusters():\n clusters = ECS_MANAGER.list_ecs_clusters()\n\n print(str_sep)\n\n if clusters:\n print(\"Listing clusters ARNs available in {}\"\n .format(SESSION.region_name.upper()))\n print(str_sep)\n for arn in clusters['clusterArns']:\n print(arn)\n\n print(str_sep)", "def clustering(dataset, logger):\n all_instances = dataset\n meta_dataset = collections.defaultdict(list)\n for instance in all_instances:\n meta_dataset[instance['label']].append(instance['coordinate'])\n\n tasklist = map(\n lambda item, meta_dataset=meta_dataset, logger=logger: (\n item[0],\n clustering_by_label,\n (item[1], item[0], meta_dataset, logger)), meta_dataset.items())\n\n # pool = multiprocessing.pool.Pool(PROCESS_COUNT)\n # clusters = dict(pool.map(map_generate_tuple, tasklist))\n clusters = dict(map(map_generate_tuple, tasklist))\n # pool.close()\n # pool.join()\n\n return clusters", "def extract_clusters(self, dictionary=None, autorenaming_option=True):\n cluster_list = self.__dendrogram._extract_clusters_by_color()\n return cluster_list if autorenaming_option is False else self.__autorename_clusters(cluster_list, dictionary, 5)", "def data_fetch(self, curs, splat_table, mcl_table, crs_no=0, output_fname=None):\n\t\tgene_no2gene_id = get_gene_no2gene_id(curs)\t#08-31-05\n\t\toutf = open(output_fname, 'w')\t#08-31-05\n\t\toutf.write(\"r:=[\")\t#08-31-05\n\t\t\n\t\tmcl_id2cluster_dstructure = {}\n\t\tno_of_total_genes = get_no_of_total_genes(curs)\n\t\tsys.stderr.write(\"Getting the basic information for all clusters...\\n\")\n\t\tcurs.execute(\"DECLARE crs%s CURSOR FOR select m.mcl_id, m.vertex_set, m.connectivity, 0,\\\n\t\t\tm.recurrence_array, s.edge_set, s.connectivity, m.cooccurrent_cluster_id from %s m, %s s where \\\n\t\t\tm.splat_id=s.splat_id\"\\\n\t\t\t%(crs_no, mcl_table, splat_table))\t#06-20-05\tconnectivity_original faked to be 0\n\t\tcurs.execute(\"fetch 5000 from crs%s\"%crs_no)\n\t\trows = curs.fetchall()\n\t\twhile rows:\n\t\t\tfor row in rows:\n\t\t\t\tunit = cluster_dstructure()\n\t\t\t\tunit.cluster_id = row[0]\n\t\t\t\tvertex_set = row[1][1:-1].split(',')\n\t\t\t\tunit.vertex_set = map(int, vertex_set)\n\t\t\t\tunit.connectivity = row[2]\n\t\t\t\tunit.connectivity_original = row[3]\n\t\t\t\trecurrence_array = row[4][1:-1].split(',')\n\t\t\t\tunit.recurrence_array = map(float, recurrence_array)\n\t\t\t\tunit.edge_set = parse_splat_table_edge_set(row[5])\n\t\t\t\tunit.splat_connectivity = row[6]\n\t\t\t\tunit.cooccurrent_cluster_id = row[7]\n\t\t\t\tunit.go_no2association_genes = self.get_go_functions_of_this_gene_set(curs, unit.vertex_set)\n\t\t\t\tunit.go_no2information = self.get_information_of_go_functions(curs, \\\n\t\t\t\t\tunit.go_no2association_genes, len(unit.vertex_set), no_of_total_genes, p_value_cut_off=0.05)\t#jasmine wants to cut some go-nos.\n\t\t\t\tunit.edge_cor_2d_list, unit.edge_sig_2d_list = self.get_cor_sig_2d_list(curs, unit.edge_set)\n\t\t\t\t\n\t\t\t\tstr_tmp = self.return_string_form_of_cluster_dstructure(unit, gene_no2gene_id)\t#08-31-05\n\t\t\t\toutf.write(\"%s,\"%str_tmp)\n\t\t\t\t#mcl_id2cluster_dstructure[unit.cluster_id] = unit\n\t\t\t\t\"\"\"\n\t\t\t\torder_1st_id, order_2nd_id = map(int, unit.cooccurrent_cluster_id.split('.'))\n\t\t\t\tif order_1st_id not in self.order_1st_id2all_clusters:\n\t\t\t\t\tself.order_1st_id2all_clusters[order_1st_id] = {}\n\t\t\t\tif order_2nd_id not in self.order_1st_id2all_clusters[order_1st_id]:\n\t\t\t\t\tself.order_1st_id2all_clusters[order_1st_id][order_2nd_id] = []\n\t\t\t\tself.order_1st_id2all_clusters[order_1st_id][order_2nd_id].append(unit)\n\t\t\t\t\"\"\"\n\t\t\tcurs.execute(\"fetch 5000 from crs%s\"%crs_no)\n\t\t\trows = curs.fetchall()\n\t\toutf.write(\"[]]:\")\t#08-31-05, 09-01-05 add the last blank []\n\t\tdel outf\n\t\tsys.stderr.write(\"Done.\\n\")\n\t\treturn mcl_id2cluster_dstructure", "def clustering(self):\n ret_concepts = []\n clusters = []\n for word in self.words:\n clusters.append(WordCluster(None, word))\n while len(clusters) > 1:\n maxi = -1\n maxj = -1\n max = -1\n m = -1\n for i in range(len(clusters)):\n for j in range(len(clusters)):\n if i == j:\n continue\n # print(\"%d cluster compare with %d cluster\" % (i, j))\n # 1: join 21: i absorb j 22: j absorb i 3: collapse\n # l1: join L(Tm) value l21: A absorb B L(Tm)value\n l1, newtags = self.__calculate_ltm(clusters[i], clusters[j], 1)\n if l1 > max:\n m = 1\n maxi = i\n maxj = j\n max = l1\n print(\"max L(Tm) for clustering in current loop: %lf\" % max)\n if max < ClusterAlgorithm.P_threshold:\n return\n Tm = clusters[maxi].join(clusters[maxj])\n Tm_concepts = self.__select_concepts(self.__getword(Tm))\n for tmp_concept in Tm_concepts.items():\n ret_concepts.append(tmp_concept)\n rm1 = clusters[maxi]\n rm2 = clusters[maxj]\n clusters.remove(rm1)\n clusters.remove(rm2)\n if Tm is not None:\n print(\"merged cluster's words:\")\n print(self.__getword(Tm))\n return ret_concepts", "def clusters(self):\n\t\tif self._record is None:\n\t\t return []\n\t\tclusters = [i for i in self._record.features if i.type == 'cluster']\n\t\treturn clusters", "def compute_cluster_metrics_helper(self, event_index: int, timestamp: datetime, cp_count: int,\n cluster: Cluster, cluster_type: str) -> None:\n self.cluster_metrics.append([event_index, timestamp, cp_count, cluster.id, cluster.centroid[0],\n cluster.centroid[1], cluster.radius, cluster.weight, cluster_type])", "def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center", "def get_clusters(self):\n return self._clusters", "def show_cluster_status(self, *args, **kwargs):\r\n return execute(self._show_cluster_status, *args, **kwargs)", "def __init__(self, conn, args, data, split_type, num_clusters):\n\n self.conn = conn\n self.args = args\n self.data = data\n self.split_type = split_type\n\n self.pca_model = None\n self.cluster_model = None\n self.algorithm = args['cluster_algorithm']\n\n # http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n affinity_propagation = cluster.AffinityPropagation()\n ms = cluster.MeanShift(bin_seeding=True)\n spectral = cluster.SpectralClustering(n_clusters=num_clusters, \n eigen_solver='arpack',\n affinity=\"nearest_neighbors\", \n random_state=self.args['seed'])\n ward = cluster.AgglomerativeClustering(n_clusters=num_clusters, \n linkage='ward')\n birch = cluster.Birch(n_clusters=num_clusters)\n two_means = cluster.MiniBatchKMeans(n_clusters=num_clusters,\n random_state=self.args['seed'])\n average_linkage = cluster.AgglomerativeClustering(linkage=\"average\", \n n_clusters=num_clusters)\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n kmeans = cluster.KMeans(n_clusters=num_clusters, random_state=self.args['seed'])\n dbscan = cluster.DBSCAN()\n \n self.clustering_algorithms = {\n 'MiniBatchKMeans': two_means,\n 'AffinityPropagation': affinity_propagation,\n 'MeanShift': ms,\n 'SpectralClustering': spectral,\n 'Ward': ward,\n 'AgglomerativeClustering': average_linkage,\n 'DBSCAN': dbscan,\n 'Birch': birch,\n 'HDBSCAN': hdbsc,\n 'KMeans': kmeans\n }", "def print_all_tasks(self):\n cluster_root = gdb.parse_and_eval('uKernelModule::globalClusters.root')\n if cluster_root is 0x0:\n print('uKernelModule::globalClusters list is null')\n return\n\n curr = cluster_root\n print('{:>20}{:>18}'.format('Cluster Name', 'Address'))\n\n while True:\n addr = str(curr['cluster_'].reference_value())[1:]\n print(\n ('{:>20}{:>18}'.format(curr['cluster_']['name'].string(),\n addr))\n )\n\n self.print_tasks_by_cluster_address(addr)\n curr = curr['next'].cast(uClusterDL_ptr_type)\n if curr == cluster_root:\n break", "def describe_cluster_creating_response():\n return {\n \"cluster\": {\n \"status\": \"CREATING\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {},\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)", "def _cluster(self):\n # , distance_function=spearman_squared_distance, max_iter=1000, tol=0.0001):\n if self.cluster_method is None:\n clusters = KMedoids(\n self.k,\n self.batchsize,\n dist_func=self.distance_function,\n max_iter=self.max_iter,\n tol=self.tol,\n init_medoids=self.init_medoids,\n swap_medoids=self.swap_medoids,\n )\n clusters.fit(self.clustering_attributions, verbose=self.verbose)\n\n self.subpopulations = clusters.members\n self.subpopulation_sizes = GAM.get_subpopulation_sizes(clusters.members)\n self.explanations = self._get_explanations(clusters.centers)\n # Making explanations return numerical values instead of dask arrays\n if isinstance(self.explanations[0][0][1], da.Array):\n explanations = []\n for explanation in self.explanations:\n explanations.append([(x[0], x[1].compute()) for x in explanation])\n self.explanations = explanations\n else:\n self.cluster_method(self)", "def test_list_cluster_network(self):\n pass", "def clustering_metrics(clusts, node_assn, node_pred):\n pred_vox = cluster_to_voxel_label(clusts, node_pred)\n true_vox = cluster_to_voxel_label(clusts, node_assn)\n ari = ARI(pred_vox, true_vox)\n ami = AMI(pred_vox, true_vox)\n sbd = SBD(pred_vox, true_vox)\n pur, eff = purity_efficiency(pred_vox, true_vox)\n return ari, ami, sbd, pur, eff", "def get_db_afvalcluster_info():\n db_df = get_dataframe(\"\"\"SELECT *\n FROM proj_afval_netwerk.afv_rel_nodes_poi\n \"\"\")\n db_df['woning'] = db_df['bk_afv_rel_nodes_poi'].str.split('~')\n db_df['cluster_x'] = db_df['woning'].apply(lambda x: x[0]).astype('float')\\\n .round(0).astype('int')\n db_df['cluster_y'] = db_df['woning'].apply(lambda x: x[1]).astype('float')\\\n .round(0).astype('int')\n db_df['type'] = db_df['woning'].apply(lambda x: x[2])\n db_df['bag'] = db_df['woning'].apply(lambda x: x[3])\n db_df = db_df.drop('woning', axis=1)\n return db_df", "def clusters(self):\n return self._clusters", "def cluster_spatial_positioning(data):\n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n if n_clusters <2:\n #Setting cluster angluar features to default\n cdist=[Cluster_Relative_Distances()]\n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n elif n_clusters >=2:\n # Here we implement two approaches for measuring distances between clustes:\n # (1) border-boder distances and (2) centroid-centroid distances. \n # We compute dispersion measures for the distances obtained. \n \n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n\n min_dist_between_clusters=np.row_stack([[np.amin(ss.distance_matrix(np.column_stack([d[i]['X'].array,d[i]['Y'].array]), \n np.column_stack([d[j]['X'].array,d[j]['Y'].array]))) for j in d.keys()] for i in d.keys()])\n min_dist_between_clusters=np.delete(list(set(np.frombuffer(min_dist_between_clusters))) ,0)\n\n cen_dist_between_clusters=ss.distance_matrix(np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]),\n np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]))\n cen_dist_between_clusters=np.delete(list(set(np.frombuffer(cen_dist_between_clusters))) ,0)\n\n (avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster)= distribution_statistics(min_dist_between_clusters)\n\n (avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster)= distribution_statistics(cen_dist_between_clusters)\n\n cdist = [Cluster_Relative_Distances([avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster,\n avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster])]\n \n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n \n return cdist", "def cluster_data(data_loc, num_clusters, base_destination, vectorizer):\n cluster_df = __title_cluster_df(data_loc, num_clusters, vectorizer)\n if not os.path.isdir(base_destination):\n os.mkdir(base_destination)\n vec_path = os.path.join(base_destination, 'vectorizer.pkl')\n with open(vec_path, 'wb') as f:\n pickle.dump(vectorizer, f)\n cluster_stats = {}\n for i in range(num_clusters):\n titles = cluster_df[cluster_df['cluster']==i]['title']\n cluster_stats[i] = titles.shape[0]\n cluster_data = __get_data_with_titles(data_loc, titles)\n dest = os.path.join(base_destination, 'cluster_{}.json'.format(i))\n with open(dest, 'w') as f:\n json.dump(cluster_data, f)\n stats_path = os.path.join(base_destination, 'cluster_statistics.txt')\n with open(stats_path, 'w') as f:\n for cluster in cluster_stats.keys():\n f.write('cluster {}: '.format(cluster))\n f.write(str(cluster_stats[cluster]) + '\\n')", "def generate_clustering_info(self, algorithm_type, clustering_parameters, clusterings = []):\n clustering_info = {}\n for i, running_parameters in enumerate(clustering_parameters):\n\n clustering_id = \"clustering_%04d\"%(self.current_clustering_id)\n self.current_clustering_id += 1\n clustering_info[clustering_id] = {\n \"type\":algorithm_type,\n \"clustering\": None,\n \"parameters\": running_parameters\n }\n\n if clusterings != []:\n clustering_info[clustering_id][\"clustering\"] = clusterings[i]\n\n return clustering_info", "def mmo_cluster_hostInfo(self, mmo_connection, inc_mongos):\n return self.mmo_execute_on_cluster(mmo_connection, \"hostInfo\", inc_mongos)", "def list_clusters(ctx, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n clusters = ctx.obj.groups[project.id].clusters.get()\n pprint(clusters.data)", "def internal_global_clustering(self, node_list):\n clustering = self.local_clustering()\n internal_clustering = clustering[node_list].mean()\n return internal_clustering", "def get_stats(self):\n stats = \\\n 'cluster: %s\\ncount = %d, size = %d, minvar = %f, avg_dist = %s\\n'\\\n % (self.name, self.count, self.size, self.minvar, self.avg_dist)\n return stats" ]
[ "0.7719436", "0.74022055", "0.73409766", "0.7059982", "0.6924038", "0.6866448", "0.67935354", "0.67794836", "0.6743119", "0.6643425", "0.66368085", "0.6614722", "0.6586277", "0.6576589", "0.6573444", "0.6546255", "0.65434664", "0.651949", "0.6503765", "0.64962125", "0.64861023", "0.6473851", "0.644249", "0.6422214", "0.6421132", "0.64067817", "0.63698894", "0.6357483", "0.6357483", "0.6352547", "0.6347965", "0.6335867", "0.63318866", "0.63267493", "0.6319974", "0.6310271", "0.6297092", "0.62964505", "0.6286517", "0.62813085", "0.6267302", "0.62563777", "0.6249643", "0.6247876", "0.6215452", "0.62037325", "0.6199444", "0.6187629", "0.61758417", "0.617139", "0.6144406", "0.61427", "0.6120621", "0.61069083", "0.6106224", "0.6103406", "0.60986954", "0.6094939", "0.6088205", "0.6084236", "0.606831", "0.606524", "0.60458577", "0.60404384", "0.603467", "0.60310555", "0.60284096", "0.60215145", "0.6007479", "0.60047436", "0.6002092", "0.5998586", "0.5996607", "0.59915364", "0.5990169", "0.59900165", "0.5988037", "0.5980183", "0.59613174", "0.595498", "0.5950596", "0.5948217", "0.5946014", "0.59361655", "0.59316385", "0.59139556", "0.5913656", "0.59062105", "0.5895951", "0.58959293", "0.589504", "0.58880186", "0.5886539", "0.5875011", "0.5874194", "0.5871971", "0.5864952", "0.5855684", "0.58535737", "0.5851195", "0.5840453" ]
0.0
-1
Gather cluster health information
def get_health_info(handle, timeout): health = dict() health['stat'] = ceph_mon_command(handle, 'health' , timeout) # TODO command not known with ceph_mon_command #health['detail'] = ceph_mon_command(handle, 'health detail', timeout) health['detail'] = shell_command('ceph health detail') + b'\n' health['df'] = ceph_mon_command(handle, 'df' , timeout) health['report'] = ceph_mon_command(handle, 'report' , timeout) return health
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster_health(self, host):\n\n h = self.call_to_cluster(host, '/_cluster/health')\n\n data = {\n 'number_of_nodes': h['number_of_nodes'],\n 'unassigned_shards': h['unassigned_shards'],\n 'timed_out': h['timed_out'],\n 'active_primary_shards': h['active_primary_shards'],\n 'relocating_shards': h['relocating_shards'],\n 'active_shards': h['active_shards'],\n 'initializing_shards': h['initializing_shards'],\n 'number_of_data_nodes': h['number_of_data_nodes']\n }\n\n return data", "def _retrieve_health_data(self):\n return self._client.request('_cluster/health', query={'level': 'shards'}).data", "def health_check(system=''):\n if not system:\n system = os.uname()[1]\n\n print 'Checking system: %s' % (system)\n c = hcvcs.VCS(server=system)\n if not c.status:\n print ' Error: Problem communicating with cluster. Moving on.'\n return\n\n # 0. Status information\n t1 = time.localtime(int(c.info['ClusterTime']))\n print ' Cluster \"%s\" was last updated %s (%s)' % (c.info['ClusterName'], time.strftime('%F %T', t1), c.info['ClusterTime'])\n # VCSFeatures == 'NONE' means non-global. WACPort is the port which a global cluster connect to.\n print ' VCSFeatures: %s, WACPort: %s' % (c.info['VCSFeatures'], c.info['WACPort'])\n\n # 1. General cluster status health\n c_info = c.status[system]\n if c.info['ReadOnly'] != '1':\n print ' Warn: Cluster is Writable. (haconf -dump -makero)'\n if c_info['frozen']:\n print ' Warn: system %s is frozen.' % system\n if c_info['state'] != 'RUNNING':\n print ' Warn: system %s state is \"%s\".' % (system, c_info['state'])\n\n attr_list = std_cluser_attr\n for k, v in attr_list.iteritems():\n if c.info[k] != v:\n print ' Warn: Expecting cluster \"%s\" value \"%s\" to be \"%s\": Currently \"%s\".' % (system, k, v, c.info[k])\n \n # 2. Service group health\n for group in c.groups:\n g_state = c_info[group]\n #print ' Checking group: %s - \"%s\" on \"%s\"' % (group, g_state['state'], system)\n if not g_state['probed']:\n print ' Warn: group \"%s\" is not probed on system \"%s\".' % (group, system)\n if g_state['autodisabled']:\n print ' Warn: group \"%s\" is currently autodisabled.' % (group)\n \n g_list = c.group_display(group) #, c.group_display(group, system)\n\n g_info = hcvcs.quad2dict(g_list)\n # Check values that should be set. Some attributes are different for parallel vs. failover groups.\n if g_info.get('Parallel', '0') == '1':\n attr_list = parallel_group_attr\n else:\n attr_list = failover_group_attr\n for k, v in attr_list.iteritems():\n try:\n if g_info[k] != v:\n print ' Warn: Expecting group %s \"%s\" to be \"%s\": Currently \"%s\".' % (group, k, v, g_info[k])\n except (KeyError), e:\n pass\n\n # Is the group configured to run on all systems?\n syslist = g_info.get('SystemList', '').split('\\t')\n group_nodes = set([ syslist[i] for i in range(len(syslist)) if not i % 2 ])\n cluster_nodes = set(c.status.keys())\n group_nodes_off = cluster_nodes.difference(group_nodes)\n if group_nodes_off:\n print ' Warn: group %s is not configured to run on cluster nodes: %s' % (group, ', '.join(group_nodes_off))\n \n # 3. Attributes on a group\n for resource in [ x[0] for x in c.resource_list(group) if x[1] == system ]:\n r_list = c.resource_display(resource, system)\n r_info = hcvcs.quad2dict(r_list)\n attr_list = std_resource_attr\n for k, v in attr_list.iteritems():\n try:\n if r_info[k] != v:\n print ' Warn: Resource \"%s\", in group \"%s\", attr \"%s\" should be \"%s\": Currently \"%s\".' % (resource, group, k, v, r_info[k])\n except (KeyError), e:\n pass", "def test_healthcheck_galera_cluster(host):\n\n sql_query = (\"show status where Variable_name like 'wsrep_clu%'\"\n \"or Variable_name like 'wsrep_local_state%';\")\n mysql_cmd = 'mysql -h localhost -e \"{0}\"'.format(sql_query)\n\n cmd = \"{} {}\".format(galera_container, mysql_cmd)\n\n output = host.run(cmd)\n verify_items = ['wsrep_cluster_conf_id',\n 'wsrep_cluster_size',\n 'wsrep_cluster_state_uuid',\n 'wsrep_cluster_status',\n 'wsrep_local_state_uuid']\n\n for item in verify_items:\n assert item in output.stdout", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def getClusterStatus(self):\n data = self.connect('get','cluster/status', None)\n return data", "def health():\n return jsonify(hostname=hostname, uptime=uptime(), \\\n cpu_percent=int(cpu_percent(interval=None, percpu=False)))", "def status(**kwargs): # noqa\n try:\n cluster_health = ClusterHealth()\n return ClusterHealthSchema().dump(cluster_health)\n except Exception as e:\n logging.error(traceback.format_exc())\n return jsonify({\"message\": str(e)}), 500", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def describe_cluster_no_status_response():\n return {\n \"cluster\": {\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def get_health(self):\n return {\n 'api_name': 'BrightHive Master Client Index API',\n 'current_time': str(datetime.utcnow()),\n 'current_api_version': '1.0.0',\n 'api_status': 'OK'\n }, 200", "def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()", "def stat_cartridge_health(self):\n raise NotImplementedError", "def get_host_stats(self, refresh=False):", "def get_cluster_status(boto3_client, cluster_identifier):\n return boto3_client.describe_clusters(\n ClusterIdentifier=cluster_identifier\n )", "def describe(self) -> None:\n return {\n 'cluster_metadata': self.cluster_metadata,\n 'master_url': self.master_url\n }", "def get_health(self):\n return {'status': 'ok'}", "def show_cluster_status(self, *args, **kwargs):\r\n return execute(self._show_cluster_status, *args, **kwargs)", "def describe_cluster_response():\n return {\n \"cluster\": {\n \"status\": \"ACTIVE\",\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def get_cluster_usage_info(cluster_id, kind, namespace_id=None, pods_list=None):\n if pods_list is None:\n pods_list = []\n else:\n logger.info('pod list not none')\n if pods_list == 'no_pod_resource':\n return {'cpu': 0,\n 'memory': 0}\n else:\n logger.info('resources no 0')\n # node usage stats if needed\n if kind == 'nodes':\n cpu_usage_info = client.CustomObjectsApi().list_cluster_custom_object('metrics.k8s.io', 'v1beta1', kind)\n cpu_usage_in_cores = sum([int(''.join(filter(\n str.isdigit, str(cpu_usage_item['usage']['cpu'].encode(\n 'utf-8'))))) for cpu_usage_item in cpu_usage_info['items']])\n cpu_usage_in_percentage = round(cpu_usage_in_cores / 10000000, 0)\n memory_usage = sum([unit_conversion(int(''.join(filter(\n str.isdigit, str(memory_usage_item['usage']['memory'].encode(\n 'utf-8'))))), ''.join(filter(str.isalpha, str(memory_usage_item['usage']['memory'].encode('utf-8')))))\n for memory_usage_item in cpu_usage_info['items']])\n # pods usage stats\n elif kind == 'pods':\n if namespace_id:\n cpu_usage_info = client.CustomObjectsApi().list_namespaced_custom_object('metrics.k8s.io', 'v1beta1',\n namespace_id, kind)\n else:\n cpu_usage_info = client.CustomObjectsApi().list_cluster_custom_object('metrics.k8s.io', 'v1beta1', kind)\n if len(pods_list) != 0:\n cpu_usage_in_cores = round(unit_conversion(sum([int(''.join(filter(\n str.isdigit, str(cpu_usage_item['containers'][0]['usage']['cpu'].encode(\n 'utf-8'))))) for cpu_usage_item in cpu_usage_info['items'] if cpu_usage_item['metadata']['name']\n in pods_list]), 'n'), 2)\n memory_usage = round(sum([unit_conversion(int(''.join(filter(\n str.isdigit, str(memory_usage_item['containers'][0]['usage']['memory'].encode(\n 'utf-8'))))),\n ''.join(\n filter(str.isalpha, str(memory_usage_item['containers'][0]['usage']['memory'].encode('utf-8')))))\n for memory_usage_item in cpu_usage_info['items'] if memory_usage_item['metadata']['name']\n in pods_list]), 2)\n else:\n cpu_usage_in_cores = round(unit_conversion(sum([int(''.join(filter(\n str.isdigit, str(cpu_usage_item['containers'][0]['usage']['cpu'].encode(\n 'utf-8'))))) for cpu_usage_item in cpu_usage_info['items']]), 'n'), 2)\n memory_usage = round(sum([unit_conversion(int(''.join(filter(\n str.isdigit, str(memory_usage_item['containers'][0]['usage']['memory'].encode(\n 'utf-8'))))),\n ''.join(filter(str.isalpha, str(memory_usage_item['containers'][0]['usage']['memory'].encode('utf-8')))))\n for memory_usage_item in cpu_usage_info['items']]), 2)\n return {'cpu': cpu_usage_in_cores,\n 'memory': memory_usage}", "def test_get_hyperflex_health_list(self):\n pass", "def list_cluster_response():\n return {\n \"clusters\": [\n EXAMPLE_NAME\n ]\n }", "def health(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.health)", "def show_overview(self) -> None:\n print(f\"\\n\\nCluster overview:\")\n all_clusters = self.get_all_clusters()\n print(f\" - Total of {len(all_clusters)} clusters\")\n if all_clusters:\n cluster_lengths = [len(v) for v in all_clusters.values()]\n print(f\" - Average number of cluster-labels: {round(sum(cluster_lengths) / len(cluster_lengths), 2)}\")", "def healthcheck(self):\n url = urljoin(self.url, \"/.well-known/healthcheck.json\")\n r = requests.get(url)\n return r.json()", "def home(request):\n ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL)\n\n cresp, cluster_health = ceph.health(body='json')\n sresp, cluster_status = ceph.status(body='json')\n\n # Monitors\n all_mons = cluster_status['output']['monmap']['mons']\n up_mons = cluster_status['output']['health']['timechecks']['mons']\n total_mon_count = len(all_mons)\n mons_ok = 0\n mons_warn = 0\n mons_crit = 0\n\n for mon in up_mons:\n if mon['health'] == \"HEALTH_OK\":\n mons_ok += 1\n else:\n mons_warn += 1\n\n mons_crit = total_mon_count - (mons_ok + mons_warn)\n\n # Activity\n pgmap = cluster_status['output']['pgmap']\n activities = {}\n if 'read_bytes_sec' in pgmap:\n activities['Read'] = filesize.naturalsize(pgmap.get('read_bytes_sec'))\n if 'write_bytes_sec' in pgmap:\n activities['Write'] = filesize.naturalsize(pgmap.get('write_bytes_sec'))\n if 'op_per_sec' in pgmap:\n activities['Ops'] = pgmap.get('op_per_sec')\n if 'recovering_objects_per_sec' in pgmap:\n activities['Recovering Objects'] = pgmap.get('recovering_objects_per_sec')\n if 'recovering_bytes_per_sec' in pgmap:\n activities['Recovery Speed'] = filesize.naturalsize(pgmap.get('recovering_bytes_per_sec'))\n if 'recovering_keys_per_sec' in pgmap:\n activities['Recovering Keys'] = pgmap.get('recovering_keys_per_sec')\n\n # Get a rough estimate of cluster free space. Is this accurate ?\n presp, pg_stat = ceph.pg_stat(body='json')\n bytes_total = cluster_status['output']['pgmap']['bytes_total']\n bytes_used = cluster_status['output']['pgmap']['bytes_used']\n\n data_avail, data_scale = filesize.naturalsize(bytes_total).split()\n scale = filesize.suffixes['decimal'].index(data_scale)+1\n data_used = round(float(bytes_used)/pow(1024, scale), 1)\n\n # pgs\n pg_statuses = cluster_status['output']['pgmap']\n\n pg_ok = 0\n pg_warn = 0\n pg_crit = 0\n\n # pg states\n pg_warn_status = re.compile(\"(creating|degraded|replay|splitting|scrubbing|repair|recovering|backfill|wait-backfill|remapped)\")\n pg_crit_status = re.compile(\"(down|inconsistent|incomplete|stale|peering)\")\n\n for state in pg_statuses['pgs_by_state']:\n if state['state_name'] == \"active+clean\":\n pg_ok = pg_ok + state['count']\n\n elif pg_warn_status.search(state['state_name']):\n pg_warn = pg_warn + state['count']\n\n elif pg_crit_status.search(state['state_name']):\n pg_crit = pg_crit + state['count']\n\n # pg statuses\n pg_states = dict()\n\n for state in pg_statuses['pgs_by_state']:\n pg_states[state['state_name']] = state['count']\n\n # osds\n dresp, osd_dump = ceph.osd_dump(body='json')\n osd_state = osd_dump['output']['osds']\n\n osds_ok = 0\n osds_warn = 0\n osds_crit = 0\n\n # Possible states are: exists, up, autoout, new, ???\n osd_up = re.compile(\"(?=.*exists)(?=.*up)\")\n osd_down = re.compile(\"(?=.*exists)(?=.*autoout)\")\n\n for osd_status in osd_state:\n if osd_up.search(str(osd_status['state'])):\n osds_ok += 1\n elif osd_down.search(str(osd_status['state'])):\n osds_warn += 1\n else:\n osds_crit += 1\n\n return render_to_response('dashboard.html', locals())", "def describe_cluster_creating_response():\n return {\n \"cluster\": {\n \"status\": \"CREATING\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {},\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data", "async def check_health():\n return {\"healthy\": True}", "def handle_cluster_status(self, request):\n \"\"\"\n @api {get} /cluster/status Get cluster status\n @apiName GetClusterStatus\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiSuccess {Object} nodes Nodes in the cluster.\n @apiSuccess {Object} nodes.node Node.\n @apiSuccess {String[]} nodes.node.pools Pools in which the node is registered.\n @apiSuccess {String} nodes.node.address IP address of the node.\n @apiSuccess {String} leader Leader node.\n\n @apiSuccessExample {json} Example response:\n {\n \"nodes\": {\n \"node1\": {\n \"pools\": [\"pool1\", \"pool2\"],\n \"address\": \"127.0.0.1:32001\"\n },\n \"node2\": {\n \"pools\": [\"pool1\"],\n \"address\": \"127.0.0.1:32002\"\n },\n \"node3\": {\n \"pools\": [\"pool2\"],\n \"address\": \"127.0.0.1:32003\"\n },\n },\n \"leader\": \"node1\"\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n status = {\n 'nodes': self.cluster.nodes,\n 'leader': self.cluster.leader\n }\n\n return HTTPReply(body = json.dumps(status), headers = headers)", "def get_host_stats(self, refresh=False):\n return self.host_status", "def cluster_info(self, target_nodes: Optional[\"TargetNodesT\"] = None) -> ResponseT:\n return self.execute_command(\"CLUSTER INFO\", target_nodes=target_nodes)", "def mmo_cluster_hostInfo(self, mmo_connection, inc_mongos):\n return self.mmo_execute_on_cluster(mmo_connection, \"hostInfo\", inc_mongos)", "def get_host_stats(self):\n status, data, errors, messages = self._make_get_request(CraftyAPIRoutes.HOST_STATS)\n \n if status == 200:\n return data\n elif status == 500:\n self._check_errors(errors, messages)", "def get_host_stats(self):\n status, data, errors, messages = self._make_get_request(CraftyAPIRoutes.SERVER_STATS)\n \n if status == 200:\n return data\n elif status == 500:\n self._check_errors(errors, messages)", "def get_system_health(self):\n HealthTotal_mo = self.moDir.lookupByDn('topology/health')\n return HealthTotal_mo.cur", "def get_health(self):\n return self.health", "def test_get_hyperflex_cluster_list(self):\n pass", "def health_ok(self):\n for client in self.clients():\n if client.run_cmd('ls'):\n log.info('Vmware cluster is up.')\n return True\n else:\n return False", "def test_health_get(self):\n pass", "def health():\n return jsonify({\n 'status': 'UP',\n 'dependencies': {\n 'predixpy': predix.version,\n 'python': sys.version,\n }\n })", "def get_stats(self, host, keyname):\n\n h = self.call_to_cluster(host, '/_nodes/stats')\n\n node_name = h['nodes'].keys()[0]\n stats = h['nodes'][node_name]['indices'][keyname]\n\n return stats", "def get_stats(self):\n stats = \\\n 'cluster: %s\\ncount = %d, size = %d, minvar = %f, avg_dist = %s\\n'\\\n % (self.name, self.count, self.size, self.minvar, self.avg_dist)\n return stats", "def show_health(self):\n print(self.name + \"'s health is \", str(self.health))", "def server_agent_statistics(ctx):\n data = ctx.obj.get_agent_statistics()\n output_json_data(data)", "def Gethealth(self):\n return self.health", "def cluster_list():\n request_debug(r, logger)\n json_body = r.get_json(force=True, silent=True) or {}\n result = cluster_handler.list(filter_data=json_body)\n response_ok[\"data\"] = result\n return jsonify(response_ok), CODE_OK", "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "def get_health(self):\n return self.bot_client.send_command(_Command.GetHealth)", "def cluster_info(self) -> ClusterInfoResult:\n if not self.connected:\n raise RuntimeError(\n \"Cluster is not connected, cannot get cluster info.\")\n cluster_info = None\n cluster_info = self._get_cluster_info()\n self._cluster_info = cluster_info\n return cluster_info", "def _get_cluster_components(self):\n print(\"Connecting to cluster...\")\n self.cluster.connect_to_cluster()\n print(\"Connected!\")\n print(\"Collecting information from the cluster...\")\n return self.cluster.get_components()", "def get_health_check(self):\n return util.create_response(output=\"OK\")", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def get_host_stats(self, refresh=False):\n stats = []\n for nodename in self._drv_nodes:\n host_status = self.host_status_base.copy()\n host_status['hypervisor_hostname'] = nodename\n host_status['host_hostname'] = nodename\n host_status['host_name_label'] = nodename\n host_status['hypervisor_type'] = self.name\n host_status['vcpus'] = drv_conf.max_vcpus\n host_status['memory_mb'] = drv_conf.max_memory_mb\n host_status['local_gb'] = drv_conf.max_disk_gb\n stats.append(host_status)\n if len(stats) == 0:\n raise exception.NovaException(\"Azure Driver has no node\")\n elif len(stats) == 1:\n return stats[0]\n else:\n return stats", "def get_health(self):\n return self.__health", "def cluster_ha_get(self, desired_attributes=None):\n return self.request( \"cluster-ha-get\", {\n 'desired_attributes': [ desired_attributes, 'desired-attributes', [ ClusterHaInfo, 'None' ], False ],\n }, {\n 'attributes': [ ClusterHaInfo, False ],\n } )", "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def health_check():\n return dict(api_status='OK')", "def clear_cluster(name):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Clearing cluster statistics\"\n return ret\n\n __salt__[\"trafficserver.clear_cluster\"]()\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Cleared cluster statistics\"\n return ret", "def health(self):\n return self._health", "def health(self):\n return self._health", "def _healthcheck():\n return '', 200", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def query_controller_cluster_upgrade_status(self):\n self.response = self.request('GET', self.cluster_status_endpoint, \"\")\n self.log.debug(self.response.status)\n response = self.response.read()\n status_schema = ControllerUpgradeSchema()\n status_schema.set_data(response, self.accept_type)\n return status_schema", "def poll_health():\n global timesCalled\n\n # Poll /health\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n response = session.get(health_url)\n\n # Check HTTP status code\n status_code = response.status_code\n if status_code != status_ok:\n exit(1)\n\n # Get metrics values\n metrics = response.json()['metrics']\n requestLatencyValues.append(metrics['requestLatency'])\n dbLatencyValues.append(metrics['dbLatency'])\n cacheLatencyValues.append(metrics['cacheLatency'])\n\n # If 60 seconds has passed, send data to STDOUT\n timesCalled += 1\n if timesCalled == 6:\n output_data()\n\n timesCalled = 0\n requestLatencyValues.clear()\n dbLatencyValues.clear()\n cacheLatencyValues.clear()", "def bdev_nvme_get_controller_health_info(client, name):\n params = {}\n params['name'] = name\n return client.call('bdev_nvme_get_controller_health_info', params)", "def get_alerts(node: CephAdmin) -> dict:\n cmd = \"ceph health detail\"\n all_alerts = {}\n out, err = node.shell([cmd])\n regex = r\"(\\(MUTED[\\w\\s,-]*\\))?\\s*\\[\\w{3}\\]\\s([\\w_]*):\"\n alerts = re.findall(regex, out)\n all_alerts[\"active_alerts\"] = [alert[1] for alert in alerts if not alert[0]]\n all_alerts[\"muted_alerts\"] = [alert[1] for alert in alerts if alert[0]]\n return all_alerts", "def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def pcp_health_check_stats(self, nid):\n\n\t\tif self.PCPConnectionStatus() != ConnStateType.OK:\n\t\t\tself.pcp_internal_error('invalid PCP connection')\n\t\t\treturn None\n\n\t\tnode_id = str(nid)\n\n\t\tself._PCPWrite('H'.encode(), 1)\n\t\twsize = self.int_to_bytes(len(node_id) + 1 + 4)\n\t\tself._PCPWrite(wsize, 4)\n\t\tself._PCPWrite(node_id.encode() + NULL, len(node_id) + 1)\n\t\tif self.PCPFlush() < 0:\n\t\t\treturn None\n\t\tif self.Pfdebug:\n\t\t\tself.Pfdebug.write(f'DEBUG: send: tos=\"H\", length={self.bytes_to_int(wsize)}\\n')\n\n\t\treturn self._process_pcp_response('H')", "def cassandra_histograms(mycluster=RING_1_dev__allnodes):\n cassandra_nodetool(mycluster,cmd=\"cfhistograms\")", "def ping_cluster():\n response = requests.get('{}/v1/status/peers'.format(common.URL))\n response.raise_for_status()\n\n # Wait for all 3 agents to join the cluster\n if len(response.json()) == 3:\n return True\n\n return False", "def healthcheck(self):\n while True:\n time.sleep(NAMENODE_HEALTH_CHECK_INTERVAL)\n self.check_datanodes()", "def _load_cluster(self):", "def cassandra_ring_status(mycluster=RING_1_dev__allnodes):\n cassandra_nodetool(mycluster)", "def host_info(vm_hostname):\n with _get_vm(vm_hostname) as vm:\n\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n info = vm.info()\n\n # Disconnect fabric now to avoid messages after the table\n disconnect_all()\n\n categories = (\n ('General', (\n 'hypervisor',\n 'status',\n )),\n ('Network', (\n 'intern_ip',\n 'mac_address',\n )),\n ('Resources', (\n 'num_cpu',\n 'max_cpus',\n 'memory',\n 'memory_free',\n 'max_mem',\n 'disk',\n 'disk_size_gib',\n 'disk_free_gib',\n )),\n # Anything else will appear in this section\n ('Other', None),\n )\n\n def _progress_bar(free_key, capacity_key, result_key, unit):\n \"\"\"Helper to show nice progress bars.\"\"\"\n if free_key not in info or capacity_key not in info:\n return\n free = info[free_key]\n del info[free_key]\n capacity = info[capacity_key]\n del info[capacity_key]\n\n simple_stats = (\n 'Current: {} {unit}\\n'\n 'Free: {} {unit}\\n'\n 'Max: {} {unit}'.format(\n capacity - free, free, capacity, unit=unit))\n\n if not 0 <= free <= capacity > 0:\n log.warning(\n '{} ({}) and {} ({}) have weird ratio, skipping progress '\n 'calculation'.format(\n free_key, free, capacity_key, capacity)\n )\n info[result_key] = red(simple_stats)\n return\n\n assert 0 <= free <= capacity\n ratio = 1 - float(free) / float(capacity)\n if ratio >= 0.9:\n color = red\n elif ratio >= 0.8:\n color = yellow\n else:\n color = green\n\n max_bars = 20\n num_bars = int(round(ratio * max_bars))\n info[result_key] = (\n '[{}{}] {}%\\n{}'.format(\n color('#' * num_bars), ' ' * (max_bars - num_bars),\n int(round(ratio * 100)),\n simple_stats,\n )\n )\n\n _progress_bar('memory_free', 'memory', 'memory', 'MiB')\n _progress_bar('disk_free_gib', 'disk_size_gib', 'disk', 'GiB')\n\n max_key_len = max(len(k) for k in info.keys())\n for category, keys in categories:\n # Handle 'Other' section by defaulting to all keys\n keys = list(keys or info.keys())\n\n # Any info available for the category?\n if not any(k in info for k in keys):\n continue\n\n print('')\n print(white(category, bold=True))\n for k in keys:\n if k not in info:\n continue\n\n # Properly re-indent multiline values\n value = str(info.pop(k))\n value = ('\\n' + ' ' * (max_key_len + 3)).join(\n value.splitlines()\n )\n print('{} : {}'.format(k.ljust(max_key_len), value))", "async def health(self) -> Health:\n response = await self._http_requests.get(build_url(Paths.HEALTH))\n return Health(**response.json())", "def server_stats():\n out = subprocess.check_output(cmd_preamble + [\"admin\", \"stats\"])\n return out.decode()", "def test_get_hyperflex_cluster_profile_list(self):\n pass", "def get_health_state(self):\n\n doc = self.client.enumerate(uris.CIM_ComputerSystem)\n\n health_state = doc.find(\n './/s:Body/wsen:EnumerateResponse/wsman:Items/wsinst:CIM_HostComputerSystem/wsinst:HealthState', wsman.NS_MAP_COMPUTER_SYSTEM)\n return constants._get_health_state(health_state.text)", "def zero_cluster(name):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Zeroing cluster statistics\"\n return ret\n\n __salt__[\"trafficserver.zero_cluster\"]()\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Zeroed cluster statistics\"\n return ret", "def get_health(self):\n return self.__healthy", "def health_check():\n # TODO: implement any other checking logic.\n return '', 200", "def stats_get(self, host):\n\n s = self.get_stats(host, 'get')\n\n data = {\n 'missing_total': s['missing_total'],\n 'exists_total': s['exists_total'],\n 'current': s['current'],\n 'total': s['total']\n }\n\n return data", "def test_cluster_status(self):\n application = zaza.model.get_application(\"ovn-central\")\n sb_status, nb_status = self._cluster_status_action()\n\n # Verify that cluster status includes \"unit_map\" field with correct\n # type\n for status in (nb_status, sb_status):\n self.assertIn(\"unit_map\", status)\n self.assertIsInstance(status[\"unit_map\"], dict)\n\n # Verify that units and their Server IDs are properly paired\n expected_mapping = {}\n for unit in application.units:\n unit_name = unit.entity_id\n nb_status_cmd = self.NB_CMD.format(\"cluster/status OVN_Northbound\")\n sb_status_cmd = self.SB_CMD.format(\"cluster/status OVN_Southbound\")\n nb_cluster_status = zaza.model.run_on_unit(unit_name,\n nb_status_cmd)\n sb_cluster_status = zaza.model.run_on_unit(unit_name,\n sb_status_cmd)\n nb_id = nb_cluster_status[\"Stdout\"].splitlines()[0]\n sb_id = sb_cluster_status[\"Stdout\"].splitlines()[0]\n expected_mapping[unit_name] = {\"sb_id\": sb_id, \"nb_id\": nb_id}\n\n for unit_name, unit_data in expected_mapping.items():\n sb_id = unit_data[\"sb_id\"]\n nb_id = unit_data[\"nb_id\"]\n self.assertEqual(sb_status[\"unit_map\"][unit_name], sb_id)\n self.assertEqual(nb_status[\"unit_map\"][unit_name], nb_id)", "def health_check():\n app.logger.info(\"Health Check!\")\n return Response(\"All Good!\", status=200)", "def healthcare():", "def clients(self):\n\n try:\n req = requests.get(self.root_url + \"/clients\")\n except requests.exceptions.ConnectionError as e:\n req = None\n print(str(e), file=sys.stderr)\n except Exception as e:\n print(\"Unknown error making a request to the Sensu API\", file=sys.stderr)\n print(str(e), file=sys.stderr)\n\n if req and req.status_code == 200:\n dat = req.json()\n for host in dat:\n self.metrics.append(('sensu_status', host['status'], {'host': host['name'], 'dc': host['dc']}))", "def check_all_hosts (self, repo_version_id, version_name):\n if self.compare_versions(self.ambari_version, \"2.1.0\") < 0:\n query1 = \"SELECT chm.host_name from ClusterHostMapping chm JOIN clusters c ON c.cluster_name = '{0}';\".format(self.cluster_name)\n else:\n query1 = \"SELECT h.host_name from ClusterHostMapping chm JOIN clusters c ON c.cluster_name = '{0}' JOIN hosts h ON chm.host_id = h.host_id;\".format(self.cluster_name)\n\n if self.compare_versions(self.ambari_version, \"2.1.0\") < 0:\n query2 = \"SELECT hv.host_name, hv.state FROM host_version hv WHERE hv.repo_version_id = {0};\".format(repo_version_id)\n else:\n #query2 = \"SELECT hv.state,h.host_name FROM hosts h JOIN host_version hv ON h.host_id = hv.host_id WHERE hv.repo_version_id = {0};\".format(repo_version_id)\n query2 = \"SELECT hv.state,h.host_name, hs.health_status,hs.agent_version,(h.total_mem/1024/1024) as total_mem_gb,(hs.available_mem/1024/1024) as available_mem_gb FROM hosts h JOIN host_version hv ON h.host_id = hv.host_id JOIN hoststate hs ON h.host_id = hs.host_id WHERE hv.repo_version_id = {0} order by h.host_name;\".format(repo_version_id)\n # All cluster hosts\n host_names = set()\n self.cursor.execute(query1)\n rows = self.cursor.fetchall()\n if self.options.verbose:\n Logger.debug(query1 + \"\\n\")\n if rows and len(rows) > 0:\n host_names = set([row[0] for row in rows if len(row) == 1])\n Logger.debug(\"Hosts: {0}\".format(\", \".join(host_names)))\n\n host_name_to_state = {} # keys should be a subset of host_names\n hosts_with_repo_version_state_not_in_current = set()\n self.cursor.execute(query2 + \"\\n\")\n rows = self.cursor.fetchall()\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST(S) STATE\\t\")\n Logger.info(\"******************************************************************************************************************************************************\\n\")\n Logger.info(\"------------------------------------------------------------------------------------------------------------------------------------------------------\")\n Logger.info(\"State\\t\\tHostname\\t\\t\\t\\tHealth\\t\\tAgentVersion\\tTotalMemory\\tAvailableMemory\")\n Logger.info(\"------------------------------------------------------------------------------------------------------------------------------------------------------\")\n\n if rows and len(rows) > 0:\n for row in range(len(rows)):\n data = json.loads(rows[row][2])\n data1 = json.loads(rows[row][3])\n Logger.info(\"{0}\\t\\t{1}\\t\\t{2}\\t\\t{3}\\t\\t{4}\\t\\t{5}\".format(rows[row][0], rows[row][1], data[\"healthStatus\"], data1[\"version\"], rows[row][4], rows[row][5]))\n print (\"\\n\")\n Logger.debug(query2)\n if rows and len(rows) > 0:\n for row in rows:\n if len(row) == 6:\n host_name = row[1]\n state = row[0]\n host_name_to_state[host_name] = state\n if state.upper() != \"CURRENT\":\n hosts_with_repo_version_state_not_in_current.add(host_name)\n host_names_with_version = set(host_name_to_state.keys())\n host_names_without_version = host_names - host_names_with_version\n # Logger.info(\"\\t\\tHost(s) state Summary\")\n if len(host_names) > 0:\n if len(host_names_without_version) > 0:\n Logger.error(\"{0} host(s) do not have a Host Version for Repo Version {1}.\\n\" \\\n \"Host(s):\\n{2}\\n\".\n format(len(host_names_without_version), version_name, \", \".join(host_names_without_version)))\n\n if len(hosts_with_repo_version_state_not_in_current) > 0:\n Logger.error(\"{0} host(s) have a Host Version for Repo Version {1} but the state is not CURRENT.\\n\" \\\n \"Host(s):\\n{2}\\n\".\n format(len(hosts_with_repo_version_state_not_in_current), version_name, \", \".join(hosts_with_repo_version_state_not_in_current)))\n\n if len(host_names_without_version) == 0 and len(hosts_with_repo_version_state_not_in_current) == 0:\n Logger.info(\"Found {0} host(s) in the cluster, and all have a Host Version of CURRENT for \" \\\n \"Repo Version {1}. Things look good.\\n\".format(len(host_names), version_name))\n else:\n Logger.error(\"Make sure that all of these hosts are heartbeating, that they have the packages installed, the\\n\" \\\n \"hdp-select symlinks are correct, and that the services on these hosts have been restarated.\\n\")\n pass", "def cluster_description(self):\n if self._cluster_description is None:\n if self._parsed_globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._parsed_globals.region,\n endpoint_url=self._parsed_globals.endpoint_url,\n verify=self._parsed_globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] not in [\"ACTIVE\", \"UPDATING\"]:\n raise EKSClusterError(\"Cluster status is {0}\".format(\n self._cluster_description[\"status\"]\n ))\n\n return self._cluster_description", "def test_eks_cluster_exists(self) -> None:\n cluster = self.eks.describe_cluster(name='andrew-jarombek-eks-v2')\n\n cluster_name = cluster.get('cluster').get('name')\n kubernetes_version = cluster.get('cluster').get('version')\n platform_version = cluster.get('cluster').get('platformVersion')\n cluster_status = cluster.get('cluster').get('status')\n\n self.assertEqual('andrew-jarombek-eks-v2', cluster_name)\n self.assertEqual('1.24', kubernetes_version)\n self.assertEqual('eks.6', platform_version)\n self.assertEqual('ACTIVE', cluster_status)", "def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))", "def getAllHosts(cluster):\n nics = []\n hosts = rhevGet(\"/api/hosts\")\n doc = libxml2.parseDoc(hosts)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/hosts/host[cluster[@id='\" + getClusterData(cluster ,\"id\") + \"']]\")\n for i in res:\n #hrefs.append(i.prop(\"href\"))\n nic = rhevGet(i.prop(\"href\")+\"/nics\")\n nicdoc = libxml2.parseDoc(nic)\n ctxt = nicdoc.xpathNewContext()\n res = ctxt.xpathEval(\"/host_nics/host_nic/name[text() = '%s']/parent::*\" %rhev_settings.NIC)\n for i in res:\n nics.append(i.prop(\"href\"))\n return nics", "def wait_for_status(es, expected_cluster_status):\n logger.info(\"Wait for cluster status [%s]\" % expected_cluster_status)\n start = time.perf_counter()\n reached_cluster_status, relocating_shards = _do_wait(es, expected_cluster_status)\n stop = time.perf_counter()\n logger.info(\"Cluster reached status [%s] within [%.1f] sec.\" % (reached_cluster_status, (stop - start)))\n logger.info(\"Cluster health: [%s]\" % str(es.cluster.health()))\n logger.info(\"Shards:\\n%s\" % es.cat.shards(v=True))", "def _cluster_status_action(self):\n yaml_load_err = \"Status of '{}' could not be loaded as yaml:\\n{}\"\n status_raw = zaza.model.run_action_on_leader(\"ovn-central\",\n \"cluster-status\")\n status_data = status_raw.data[\"results\"]\n # Verify expected items in the action result\n self.assertIn(\"ovnnb\", status_data)\n self.assertIn(\"ovnsb\", status_data)\n\n try:\n nb_status = yaml.safe_load(status_data[\"ovnnb\"])\n except yaml.YAMLError:\n self.fail(yaml_load_err.format(\"northbound-cluster\",\n status_data[\"ovnnb\"]))\n try:\n sb_status = yaml.safe_load(status_data[\"ovnsb\"])\n except yaml.YAMLError:\n self.fail(yaml_load_err.format(\"southbound-cluster\",\n status_data[\"ovnsb\"]))\n\n return sb_status, nb_status", "def get_ceph_info(handle, ceph_config, timeout):\n cluster = dict()\n\n cluster['status'] = ceph_mon_command(handle,\n 'status', timeout)\n cluster['version'] = shell_command('ceph -v') + b'\\n'\n\n # ceph versions command was introduced in mimic\n version = cluster['version']\n version = str(version.decode('utf-8')).split(' ')[2].split(\".\")[0]\n\n if int(version) >= 13:\n cluster['versions'] = shell_command('ceph versions') + b'\\n'\n\n\n fsid = handle.get_fsid() + '\\n'\n cluster['fsid'] = str.encode(fsid)\n\n with open(ceph_config, 'r') as f:\n ceph_conf = f.read()\n\n cephconf = str(ceph_conf)\n cluster['ceph_conf'] = str.encode(cephconf)\n\n return cluster", "def healthcheck():\n return make_response(jsonify(status=200, message='Healthy'), status.HTTP_200_OK)", "def _get_cluster_list(self):\n return self.__cluster_list", "def cluster_nodes(self) -> ResponseT:\n return self.execute_command(\"CLUSTER NODES\")", "def get_kong_node_usage_metrics(opts):\n\n url = \"{0}/status\".format(opts['base_url'])\n\n r = requests.get(url)\n try:\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n logging.debug(\"http response body - %s\", r.text)\n logging.error(\"An exception occurred: (%s)\", e)\n sys.exit(2)\n\n print r.text\n\n return True" ]
[ "0.8006035", "0.7863848", "0.71975887", "0.700192", "0.6814766", "0.6722469", "0.6707405", "0.66621304", "0.665207", "0.65768224", "0.65568644", "0.6537222", "0.6509707", "0.6436703", "0.6426991", "0.64085305", "0.6380572", "0.6379265", "0.63764757", "0.63066256", "0.62769216", "0.62467647", "0.6242099", "0.6232904", "0.62226737", "0.62080866", "0.61709005", "0.61469674", "0.6091008", "0.6071856", "0.60663503", "0.6053172", "0.6039821", "0.6031175", "0.6022107", "0.6018224", "0.6009011", "0.6008812", "0.5979333", "0.5974597", "0.59552884", "0.59518814", "0.5950001", "0.5947167", "0.5914719", "0.5911969", "0.590732", "0.58991927", "0.5890537", "0.58782494", "0.58738637", "0.5873628", "0.5865204", "0.58626115", "0.58620363", "0.58454484", "0.5815644", "0.5808334", "0.5803715", "0.5796589", "0.5796589", "0.5791782", "0.57904845", "0.5779132", "0.5775535", "0.5768255", "0.5768169", "0.5763362", "0.5750226", "0.5745079", "0.5737011", "0.57363397", "0.5734748", "0.5730456", "0.5719763", "0.5716459", "0.56983304", "0.5685268", "0.56835884", "0.5678973", "0.5672199", "0.56663734", "0.56610554", "0.5659992", "0.56580174", "0.5657504", "0.5650515", "0.56477743", "0.5645738", "0.56396943", "0.5634956", "0.5631537", "0.56239706", "0.5619376", "0.5617159", "0.56096214", "0.5608052", "0.5607787", "0.5605818", "0.5605096" ]
0.68823427
4
Gather ceph monitor information
def get_monitor_info(handle, timeout): mon_info = dict() mon_info['stat'] = ceph_mon_command(handle, 'mon stat' , timeout) mon_info['dump'] = ceph_mon_command(handle, 'mon dump' , timeout) mon_info['map'] = ceph_mon_command(handle, 'mon getmap' , timeout) mon_info['metadata'] = ceph_mon_command(handle, 'mon metadata', timeout) return mon_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_monitor_data(self):\n json = await self._api_call(\"app/monitors/%s/overview\" % self.sense_monitor_id)\n if \"monitor_overview\" in json and \"monitor\" in json[\"monitor_overview\"]:\n self._monitor = json[\"monitor_overview\"][\"monitor\"]\n return self._monitor", "def monitor(self):", "def get_host_stats(self, refresh=False):", "def get_monitor_details():\n monitor_id = paranoid_clean(request.args.get('id'))\n monitors = mongo.db[app.config['MONITORS_COLLECTION']]\n monitor = monitors.find_one({'hashed': monitor_id}, {'_id': 0})\n if not monitor:\n return jsonify({'success': False, 'error': 'Monitor was not found.'})\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n link = monitor['metadata']['rss_link']\n articles = list(articles.find({'feed_source': link}, {'_id': 0}))\n for idx, item in enumerate(articles):\n articles[idx]['title'] = html.unescape(item['title'])\n articles[idx]['date'] = item['collected'][:10]\n articles.sort(key=lambda x: x['collected'], reverse=True)\n return jsonify({'success': True, 'monitor': monitor, 'articles': articles})", "def monitor(self, **kwargs):\n self.show_info(monitor=True, **kwargs)", "def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data", "def monitor(frist_invoke=2):\n sdiskio = psutil.disk_io_counters()\n # sleep some time\n\n value_dic = {\n 'iostats': {\n 'io.disks_read': sdiskio.read_bytes/(1024*1024),\n 'io.disks_write': sdiskio.write_bytes/(1024*1024),\n 'io.disks_read_count': sdiskio.read_count/(1024 * 1024),\n 'io.disks_write_count': sdiskio.write_count/(1024 * 1024),\n 'io.disks_read_time': sdiskio.read_time/1000,\n 'io.disks_write_time': sdiskio.write_time/1000,\n 'io.disks_busy_time': sdiskio.write_time/1000,\n }\n }\n\n return value_dic", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def get_health_info(handle, timeout):\n health = dict()\n\n health['stat'] = ceph_mon_command(handle, 'health' , timeout)\n # TODO command not known with ceph_mon_command\n #health['detail'] = ceph_mon_command(handle, 'health detail', timeout)\n health['detail'] = shell_command('ceph health detail') + b'\\n'\n health['df'] = ceph_mon_command(handle, 'df' , timeout)\n health['report'] = ceph_mon_command(handle, 'report' , timeout)\n\n return health", "def monitor(self):\n logging.debug(\"monitor entered\")\n # monitor machines...\n # first, get a list of machine IDs\n res = progress_table(self.machines)\n return res", "def monitor(self):\n\t\tresponse = self._request(\"/demovibes/ajax/monitor/{}/\".format(self.next_event))\n\t\tif not response:\n\t\t\treturn None\n\t\t\n\t\tdata = response.read()\n\t\treturn self.parse_monitor(data)", "def collect(self):\n self.status['serial'] = self.config.get('dlmconfig', 'serial')\n self.status['timestamp'] = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime())\n self.status['uptime'] = system.stats.uptime()\n self.status['free_disk_space_sdcard'] = system.stats.disk_usage('root')\n self.status['free_disk_space_stick'] = system.stats.disk_usage('sda1')\n self.status['wwan_reception'] = system.interfaces.WwanInterface.signal_strength(self.config.get('network', 'iface'))", "def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()", "def poll_host(self, server, obj, name):\n\n self.log.debug('found host: %s' % (name,))\n\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n\n props = server._retrieve_properties_traversal(property_names=[\n 'name',\n 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize',\n 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz',\n ], from_node=obj, obj_type='HostSystem')\n\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n\n stats = {\n 'status': status,\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'cpu_count': cpu_count,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n }\n\n return stats", "def monitor(self, rms):\n pass", "def watch():\n\n try:\n headers = ('CONTAINER ID', 'NAME', 'CPU %', 'MEM USAGE / LIMIT',\n 'MEM %', 'NET I/O', 'BLOCK I/O', 'PIDS')\n column_width = 20\n for element in headers:\n print(element.ljust(column_width)),\n print('')\n\n for container in CLIENT.containers.list():\n column_width = 20\n stats = container.stats(stream=False)\n\n # Block I/O stats\n blkio = stats.get('blkio_stats').get('io_service_bytes_recursive')\n # in case blkio is empty --> IndexError: list index out of range\n if not blkio:\n blkio_read = '0'\n blkio_write = '0'\n else:\n blkio_read = size(blkio[0].get('value'), system=si)\n blkio_write = size(blkio[1].get('value'), system=si)\n\n # Network stats\n rx_stats = size(stats.get('networks').get('eth0').get('rx_bytes'), system=si)\n tx_stats = size(stats.get('networks').get('eth0').get('tx_bytes'), system=si)\n\n # Memory stats\n mem = stats.get('memory_stats')\n mem_usage = mem.get('stats').get('active_anon')\n mem_limit = mem.get('limit')\n mem_percent = (\"%.2f\"%((mem_usage / mem_limit)*100))\n\n # CPU stats\n # this is taken directly from docker CLIENT:\n # https://github.com/docker/docker/blob/28a7577a029780e4533faf3d057ec9f6c7a10948/api/CLIENT/stats.go#L309\n cpu_percent = 0.0\n cpu = stats.get('cpu_stats')\n pre_cpu = stats.get('precpu_stats')\n cpu_total = cpu.get('cpu_usage').get('total_usage')\n pre_cpu_total = pre_cpu.get('cpu_usage').get('total_usage')\n cpu_count = cpu.get('online_cpus')\n\n cpu_delta = cpu_total - pre_cpu_total\n system_delta = cpu.get('system_cpu_usage') - pre_cpu.get('system_cpu_usage')\n\n if system_delta > 0.0 and cpu_delta > 0.0:\n cpu_percent = (\"%.2f\"%(cpu_delta / system_delta * 100.0 * cpu_count))\n\n # container attributes\n attrs = [(str(container.short_id), str(container.name), str(cpu_percent),\n str(size((mem_usage), system=si) + \" / \" + size((mem_limit), system=si)),\n str(mem_percent), str(rx_stats + \" / \" + tx_stats),\n str(blkio_read + \" / \" + blkio_write),\n str(stats.get('pids_stats').get('current')))]\n\n for row in attrs:\n for element in row:\n print(element.ljust(column_width)),\n print('')\n\n except (docker.errors.NotFound, KeyError, AttributeError):\n print('No such container or container not running!')", "def getCurrentMetrics(self):\n self.notifyPut('Obtaining Current Display Metrics')\n try:\n data = []\n data = win32api.EnumDisplayMonitors(None, None)\n screens = {}\n scrNum = 0\n for screen in data:\n screens[scrNum] = screen[2]\n scrNum += 1\n return screens \n except Exception, e:\n self.logQ.put('{0} - Unable to capture current metrics'.format(e))", "def get_ceph_info(handle, ceph_config, timeout):\n cluster = dict()\n\n cluster['status'] = ceph_mon_command(handle,\n 'status', timeout)\n cluster['version'] = shell_command('ceph -v') + b'\\n'\n\n # ceph versions command was introduced in mimic\n version = cluster['version']\n version = str(version.decode('utf-8')).split(' ')[2].split(\".\")[0]\n\n if int(version) >= 13:\n cluster['versions'] = shell_command('ceph versions') + b'\\n'\n\n\n fsid = handle.get_fsid() + '\\n'\n cluster['fsid'] = str.encode(fsid)\n\n with open(ceph_config, 'r') as f:\n ceph_conf = f.read()\n\n cephconf = str(ceph_conf)\n cluster['ceph_conf'] = str.encode(cephconf)\n\n return cluster", "def get_monitor_string(self):\n\n return self.reporter.get_overview_string(self.info)", "def get_monitor_info_a(h_monitor):\n return __get_monitor_info(WINDLL.user32.GetMonitorInfoA, h_monitor)", "def get_manager_info(handle, timeout):\n mgr_info = dict()\n mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout)\n mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout)\n mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout)\n return mgr_info", "def monitoring_group(ctx):\n pass", "def get_monitor_info_w(h_monitor):\n return __get_monitor_info(WINDLL.user32.GetMonitorInfoW, h_monitor)", "def gather_metric(self):\n device_dict = {}\n # Delete first and last line of output of adb.\n output = self._shell.run(self.COMMAND).stdout\n\n # Example Line, Device Serial Num TAB Phone Status\n # 00bd977c7f504caf\toffline\n if output:\n for line in output.split('\\n'):\n spl_line = line.split('\\t')\n # spl_line[0] is serial, [1] is status. See example line.\n device_dict[spl_line[0]] = spl_line[1]\n\n return {self.DEVICES: device_dict}", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "async def start_monitor(self):\n self._logger.info(\"Starting monitor...\")\n org1_admin = self.fabric_client.get_user(org_name='org1.example.com', name='Admin')\n\n self._logger.info(\"Starting monitor...\")\n cmd = \"/home/martijn/go/bin/go run \" \\\n \"/home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/fabric-cli.go event listenblock \" \\\n \"--cid mychannel --peer localhost:8001 \" \\\n \"--config /home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/config.yaml\"\n out_file = open(\"transactions.txt\", \"w\")\n my_env = os.environ.copy()\n my_env[\"GOPATH\"] = \"/home/martijn/gocode\"\n self.monitor_process = subprocess.Popen(cmd.split(\" \"), env=my_env, stdout=out_file,\n cwd=\"/home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/\")\n\n async def get_latest_block_num():\n self._logger.info(\"Getting latest block nr...\")\n response = await self.fabric_client.query_info(\n requestor=org1_admin,\n channel_name='mychannel',\n peers=['peer0.org1.example.com'],\n decode=True\n )\n print(response)\n\n latest_block = response.height\n if latest_block > self.latest_block_num:\n self._logger.info(\"Updating to block nr %d\", latest_block)\n old_latest_block_num = self.latest_block_num\n self.latest_block_num = latest_block\n confirm_time = int(round(time.time() * 1000))\n for confirmed_block_num in range(old_latest_block_num + 1, latest_block + 1):\n self.block_confirm_times[confirmed_block_num] = confirm_time\n\n self.monitor_lc = run_task(get_latest_block_num, interval=0.1)", "def start_monitor(self, collector):\n pass", "def getMonitor(self) -> ghidra.util.task.TaskMonitor:\n ...", "def getMonitors(self):\n return [self.monitor]", "def parse_monitor(self):\n return DEFAULT_MONITOR", "async def monitor():\n\n for n in range(6):\n await asyncio.sleep(2)\n print(\"monitor status:\", n, await ps.status())", "def home(request):\n ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL)\n\n cresp, cluster_health = ceph.health(body='json')\n sresp, cluster_status = ceph.status(body='json')\n\n # Monitors\n all_mons = cluster_status['output']['monmap']['mons']\n up_mons = cluster_status['output']['health']['timechecks']['mons']\n total_mon_count = len(all_mons)\n mons_ok = 0\n mons_warn = 0\n mons_crit = 0\n\n for mon in up_mons:\n if mon['health'] == \"HEALTH_OK\":\n mons_ok += 1\n else:\n mons_warn += 1\n\n mons_crit = total_mon_count - (mons_ok + mons_warn)\n\n # Activity\n pgmap = cluster_status['output']['pgmap']\n activities = {}\n if 'read_bytes_sec' in pgmap:\n activities['Read'] = filesize.naturalsize(pgmap.get('read_bytes_sec'))\n if 'write_bytes_sec' in pgmap:\n activities['Write'] = filesize.naturalsize(pgmap.get('write_bytes_sec'))\n if 'op_per_sec' in pgmap:\n activities['Ops'] = pgmap.get('op_per_sec')\n if 'recovering_objects_per_sec' in pgmap:\n activities['Recovering Objects'] = pgmap.get('recovering_objects_per_sec')\n if 'recovering_bytes_per_sec' in pgmap:\n activities['Recovery Speed'] = filesize.naturalsize(pgmap.get('recovering_bytes_per_sec'))\n if 'recovering_keys_per_sec' in pgmap:\n activities['Recovering Keys'] = pgmap.get('recovering_keys_per_sec')\n\n # Get a rough estimate of cluster free space. Is this accurate ?\n presp, pg_stat = ceph.pg_stat(body='json')\n bytes_total = cluster_status['output']['pgmap']['bytes_total']\n bytes_used = cluster_status['output']['pgmap']['bytes_used']\n\n data_avail, data_scale = filesize.naturalsize(bytes_total).split()\n scale = filesize.suffixes['decimal'].index(data_scale)+1\n data_used = round(float(bytes_used)/pow(1024, scale), 1)\n\n # pgs\n pg_statuses = cluster_status['output']['pgmap']\n\n pg_ok = 0\n pg_warn = 0\n pg_crit = 0\n\n # pg states\n pg_warn_status = re.compile(\"(creating|degraded|replay|splitting|scrubbing|repair|recovering|backfill|wait-backfill|remapped)\")\n pg_crit_status = re.compile(\"(down|inconsistent|incomplete|stale|peering)\")\n\n for state in pg_statuses['pgs_by_state']:\n if state['state_name'] == \"active+clean\":\n pg_ok = pg_ok + state['count']\n\n elif pg_warn_status.search(state['state_name']):\n pg_warn = pg_warn + state['count']\n\n elif pg_crit_status.search(state['state_name']):\n pg_crit = pg_crit + state['count']\n\n # pg statuses\n pg_states = dict()\n\n for state in pg_statuses['pgs_by_state']:\n pg_states[state['state_name']] = state['count']\n\n # osds\n dresp, osd_dump = ceph.osd_dump(body='json')\n osd_state = osd_dump['output']['osds']\n\n osds_ok = 0\n osds_warn = 0\n osds_crit = 0\n\n # Possible states are: exists, up, autoout, new, ???\n osd_up = re.compile(\"(?=.*exists)(?=.*up)\")\n osd_down = re.compile(\"(?=.*exists)(?=.*autoout)\")\n\n for osd_status in osd_state:\n if osd_up.search(str(osd_status['state'])):\n osds_ok += 1\n elif osd_down.search(str(osd_status['state'])):\n osds_warn += 1\n else:\n osds_crit += 1\n\n return render_to_response('dashboard.html', locals())", "def get_alerts(node: CephAdmin) -> dict:\n cmd = \"ceph health detail\"\n all_alerts = {}\n out, err = node.shell([cmd])\n regex = r\"(\\(MUTED[\\w\\s,-]*\\))?\\s*\\[\\w{3}\\]\\s([\\w_]*):\"\n alerts = re.findall(regex, out)\n all_alerts[\"active_alerts\"] = [alert[1] for alert in alerts if not alert[0]]\n all_alerts[\"muted_alerts\"] = [alert[1] for alert in alerts if alert[0]]\n return all_alerts", "def printMonitorSelf(daemon, name, ad):\n \n out = []\n\n t = ad['MonitorSelfTime']\n out.append('{0} {1} status at {2}:'.format(daemon, name,\n datetime.datetime.fromtimestamp(t)))\n out.append('\\n')\n\n if 'MonitorSelfAge' in ad:\n uptime = datetime.timedelta(seconds = ad['MonitorSelfAge'])\n out.append('Uptime: {0}'.format(uptime))\n out.append('\\n')\n if 'MonitorSelfSysCpuTime' in ad:\n sys_cpu_time = datetime.timedelta(seconds = ad['MonitorSelfSysCpuTime'])\n out.append('SysCpuTime: {0}'.format(sys_cpu_time))\n out.append(' '*4)\n if 'MonitorSelfUserCpuTime' in ad:\n user_cpu_time = datetime.timedelta(seconds=ad['MonitorSelfUserCpuTime'])\n out.append('UserCpuTime: {0}'.format(user_cpu_time))\n out.append('\\n')\n if 'MonitorSelfImageSize' in ad:\n memory = formatMemorySize(ad['MonitorSelfImageSize'])\n out.append('Memory: {0}'.format(memory))\n out.append(' '*4)\n if 'MonitorSelfResidentSetSize' in ad:\n rss = formatMemorySize(ad['MonitorSelfResidentSetSize'])\n out.append('RSS: {0}'.format(rss))\n out.append('\\n')\n out.append('\\n')\n\n sys.stdout.write(''.join(out))\n\n lines = out.count('\\n')\n \n return lines", "def fetch_status():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((GEARMAND_HOST, GEARMAND_PORT))\n log_verbose('Connected to Gearmand at %s:%s' % (GEARMAND_HOST, GEARMAND_PORT))\n except socket.error, e:\n collectd.error('gearmand_info plugin: Error connecting to %s:%d - %r'\n % (GEARMAND_HOST, GEARMAND_PORT, e))\n return None\n fp = s.makefile('r')\n log_verbose('Sending info command')\n s.sendall('status\\r\\n')\n\n status = {}\n while True:\n data = fp.readline().strip()\n log_verbose('Received data: %r' % data)\n if not data or data == '.':\n break\n function, total, running, available_workers = data.split('\\t')\n status[function] = {\n 'total': total,\n 'running': running,\n 'available_workers': available_workers}\n\n s.close()\n return status", "def all_info(stdscr, jetson, key):\n # Screen size\n height, width = stdscr.getmaxyx()\n line_counter = 1\n # Plot Status CPU\n line_counter = plot_CPUs(stdscr, line_counter, jetson.stats['CPU'], width)\n # Plot MTS\n if 'MTS' in jetson.stats:\n line_counter += 1\n stdscr.addstr(line_counter, 0, \"MTS \", curses.color_pair(5))\n MTS_FG = {'name': 'FG',\n 'value': int(jetson.stats['MTS']['fg']),\n }\n linear_percent_gauge(stdscr, MTS_FG, width // 2 - 2,\n offset=line_counter, start=4, color_name=5)\n MTS_BG = {'name': 'BG',\n 'value': int(jetson.stats['MTS']['bg']),\n }\n linear_percent_gauge(stdscr, MTS_BG, width // 2 - 2,\n offset=line_counter, start=2 + width // 2, color_name=5)\n # RAM linear gauge info\n ram_status = jetson.stats['RAM']['RAM']\n lfb_status = jetson.stats['RAM']['lfb']\n RAM_VALUE = {'name': \"Mem\",\n 'value': int(ram_status['used'][-1] / float(ram_status['total']) * 100.0),\n 'label': \"(lfb \" + str(lfb_status['nblock']) + \"x\" + str(lfb_status['size']) + \"MB)\",\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(ram_status['used'][-1] / 1000.0, ram_status['total'] / 1000.0),\n }\n line_counter += 1\n linear_percent_gauge(stdscr, RAM_VALUE, width, offset=line_counter)\n # EMC linear gauge info\n if 'EMC' in jetson.stats:\n line_counter += 1\n linear_percent_gauge(stdscr, make_gauge_from_percent(jetson.stats['EMC']), width, offset=line_counter)\n # IRAM linear gauge info\n iram_status = jetson.stats['IRAM']\n if iram_status:\n line_counter += 1\n IRAM_VALUE = {'name': \"Imm\",\n 'value': int(iram_status['used'][-1] / float(iram_status['total']) * 100.0),\n 'label': \"(lfb \" + str(iram_status['size']) + \"MB)\",\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(iram_status['used'][-1] / 1000.0,\n iram_status['total'] / 1000.0),\n }\n linear_percent_gauge(stdscr, IRAM_VALUE, width, offset=line_counter)\n # SWAP linear gauge info\n swap_status = jetson.stats['SWAP']\n if swap_status:\n SWAP_VALUE = {'name': \"Swp\",\n 'value': int(swap_status['used'][-1] / float(swap_status['total']) * 100.0),\n 'label': \"(cached \" + str(swap_status['cached']) + \"MB)\",\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(swap_status['used'][-1] / 1000.0,\n swap_status['total'] / 1000.0),\n }\n else:\n SWAP_VALUE = {'name': \"Swp\"}\n line_counter += 1\n linear_percent_gauge(stdscr, SWAP_VALUE, width, offset=line_counter)\n # GPU linear gauge info\n line_counter += 1\n if 'GR3D' in jetson.stats:\n linear_percent_gauge(stdscr, make_gauge_from_percent(jetson.stats['GR3D']), width, offset=line_counter + 1)\n line_counter += 2\n # Status disk\n disk_status = jetson.disk\n DISK_STATUS = {'name': \"Dsk\",\n 'value': int(float(disk_status['used']) / float(disk_status['total']) * 100.0),\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(disk_status['used'], disk_status['total']),\n }\n linear_percent_gauge(stdscr, DISK_STATUS, width, offset=line_counter, type_bar=\"#\", color_name=3)\n # Last part of information\n split = 1.0\n split += 1.0 if jetson.stats['temperatures'] else 0.0\n split += 1.0 if jetson.stats['voltages'] else 0.0\n column_width = int(float(width - 4) / split)\n line_counter += 1\n # List of all mini menu\n mini_menu = [compact_info, plot_temperatures, plot_voltages]\n # Evaluate column width\n column_width = int(float(width) / len(mini_menu))\n for idx, mini in enumerate(mini_menu):\n # Run mini page\n mini(stdscr, idx * column_width, line_counter, column_width, jetson)", "def bdev_nvme_get_transport_statistics(client):\n return client.call('bdev_nvme_get_transport_statistics')", "def getmondim():\n import globaldef\n import pickle\n import subprocess\n import tempfile\n\n width=[]\n height=[]\n offwidth=[]\n offheight=[]\n\n mondict={}\n\n xrandroutput = subprocess.check_output(['xrandr']).decode(\"utf-8\")\n\n for line in xrandroutput.splitlines():\n # print(type(line))\n if not line != line.replace(' connected ', ''):\n continue\n temp = ' '.join(line.split(' ')[2: ]) #remove \"eDP1 connected \", \"HDMI1 connected \" etc.\n temp = temp.replace('primary ', '')\n dimensions = temp.split(' ')[0]\n try:\n width.append(int(dimensions.split('x')[0]))\n except:\n continue\n temp = dimensions.split('x')[1]\n height.append(int(temp.split('+')[0]))\n offwidth.append(int(temp.split('+')[1]))\n offheight.append(int(temp.split('+')[2]))\n\n for monnum in range(1, len(width) + 1):\n minoffheight = min(offheight)\n minoffheightindex = []\n for i in range(0, len(width)):\n if offheight[i] == minoffheight:\n minoffheightindex.append(i)\n minoffwidth = min([offwidth[i] for i in minoffheightindex])\n for j in minoffheightindex:\n if offwidth[j] == minoffwidth:\n mondict[monnum] = {'width': width[j],'height': height[j],'offwidth': offwidth[j],'offheight': offheight[j]}\n \n width.pop(j)\n height.pop(j)\n offwidth.pop(j)\n offheight.pop(j)\n break\n\n from globaldef import monitorsfile\n with open(monitorsfile, 'wb') as handle:\n pickle.dump(mondict, handle, protocol = 2) #change to protocol 3 when change other script to python3", "def get_ceph_drv_info():\n disks_info = []\n stat = psutil.disk_io_counters(perdisk=True)\n for drv in get_ceph_disk():\n info = CEPHDiskInfo(drv)\n disk = basename(drv)\n if disk in stat:\n info.rd_cnt = stat[disk].read_count\n info.wr_cnt = stat[disk].write_count\n info.rd_bytes = stat[disk].read_bytes\n info.wr_bytes = stat[disk].write_bytes\n info.rd_time = stat[disk].read_time\n info.wr_time = stat[disk].write_time\n\n disks_info.append(info)\n\n return disks_info", "def read(self):\n\n self.log.debug('Beginning read callback')\n info = self.poll()\n\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n\n # report information for all vCenter servers\n for vcenter, data in info.items():\n # report datastore information\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n\n # report datacenter information\n for dc_name, dc_data in data['datacenter'].items():\n # extract any cluster and host information for later processing\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n\n # report cluster information\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n\n # report host information\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n\n time.sleep(self.sleep_time)", "def get_device_info(handle, timeout):\n device_info = dict()\n device_info['ls'] = ceph_mon_command(handle, 'device ls', timeout)\n\n return device_info", "def __initializeMonitor( self ):\n if self.__moduleProperties[ 'standalone' ]:\n self.monitor = gMonitor\n else:\n self.monitor = MonitoringClient()\n self.monitor.setComponentType( self.monitor.COMPONENT_AGENT )\n self.monitor.setComponentName( self.__moduleProperties[ 'fullName' ] )\n self.monitor.initialize()\n self.monitor.registerActivity( 'CPU', \"CPU Usage\", 'Framework', \"CPU,%\", self.monitor.OP_MEAN, 600 )\n self.monitor.registerActivity( 'MEM', \"Memory Usage\", 'Framework', 'Memory,MB', self.monitor.OP_MEAN, 600 )\n # Component monitor\n for field in ( 'version', 'DIRACVersion', 'description', 'platform' ):\n self.monitor.setComponentExtraParam( field, self.__codeProperties[ field ] )\n self.monitor.setComponentExtraParam( 'startTime', Time.dateTime() )\n self.monitor.setComponentExtraParam( 'cycles', 0 )\n self.monitor.disable()\n self.__monitorLastStatsUpdate = time.time()", "def monitored(self):\n return self.monitor", "def _get_physical_monitors_from_hmonitor(hmonitor: wintypes.HMONITOR) -> list:\n class _PhysicalMonitorStructure(ctypes.Structure):\n \"\"\"\n PHYSICAL_MONITOR Structure.\n https://msdn.microsoft.com/en-us/library/vs/alm/dd692967(v=vs.85).aspx\n typedef struct _PHYSICAL_MONITOR {\n HANDLE hPhysicalMonitor;\n WCHAR szPhysicalMonitorDescription[PHYSICAL_MONITOR_DESCRIPTION_SIZE];\n } PHYSICAL_MONITOR, *LPPHYSICAL_MONITOR;\n\n PHYSICAL_MONITOR_DESCRIPTION_SIZE = 128\n \"\"\"\n _fields_ = [\n (\"hPhysicalMonitor\", wintypes.HANDLE),\n (\"szPhysicalMonitorDescription\", wintypes.WCHAR * 128)\n ]\n\n # Retrieves the number of physical monitors\n phy_monitor_number = wintypes.DWORD()\n api_call_get_number = ctypes.windll.Dxva2.GetNumberOfPhysicalMonitorsFromHMONITOR\n if not api_call_get_number(hmonitor, ctypes.byref(phy_monitor_number)):\n _LOGGER.error(ctypes.WinError())\n return []\n \n # Retrieves the physical monitors\n api_call_get_monitor = ctypes.windll.Dxva2.GetPhysicalMonitorsFromHMONITOR\n # create array\n phy_monitor_array = (_PhysicalMonitorStructure * phy_monitor_number.value)()\n if not api_call_get_monitor(hmonitor, phy_monitor_number, phy_monitor_array):\n _LOGGER.error(ctypes.WinError())\n return []\n \n return list(phy_monitor_array)", "def _transform_health_monitor(monitor):\n return {\n 'id': monitor.id,\n 'type': monitor.type,\n 'delay': monitor.delay,\n 'timeout': monitor.timeout,\n 'max_retries': monitor.max_retries,\n 'http_method': monitor.http_method,\n 'url_path': monitor.url_path,\n 'expected_codes': '|'.join(\n _expand_expected_codes(monitor.expected_codes)),\n 'admin_state_up': monitor.admin_state_up,\n }", "def get_stats(self): \n return dict(l.split('\\t', 1) \\\n for l in wait(self.proto.stat()).splitlines() if l)", "def get_measurements(self):\n metrics = {}\n for key in self.fields.keys():\n metrics[key] = []\n # What's in output:\n # proc_pid date virt res shrd cpu mem power gpus_power\n while not self.queue.empty():\n data = self.queue.get().strip().split()\n for field in self.fields:\n tp = self.fields[field]['type']\n idx = self.fields[field]['index']\n count = self.fields[field]['count']\n if count == -1:\n metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp))\n elif count == 0:\n metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)])\n else:\n metrics[field].append([\n ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count)\n ])\n return metrics", "def get_host_stats(self, refresh=False):\n return self.host_status", "def _monitor(self):\n # while CONF.weight == 'bw':\n while True:\n self._send_echo_request()\n self.create_link_delay()\n # self.get_loss()\n self.stats['flow'] = {}\n self.stats['port'] = {}\n for dp in self.datapaths.values():\n self.port_features.setdefault(dp.id, {})\n self.link_loss.setdefault(dp.id,{})\n self._request_stats(dp)\n # refresh data.\n self.capabilities = None\n self.best_paths = None\n hub.sleep(setting.MONITOR_PERIOD)\n self.show_stat()", "def description(self):\n return '%s and a monitor' % self.component.description()", "def show():\n configdb = ConfigDBConnector()\n configdb.connect()\n queue_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE')\n port_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PORT')\n port_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PORT_BUFFER_DROP)\n rif_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'RIF')\n queue_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE_WATERMARK')\n pg_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PG_WATERMARK')\n pg_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PG_DROP)\n buffer_pool_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', BUFFER_POOL_WATERMARK)\n acl_info = configdb.get_entry('FLEX_COUNTER_TABLE', ACL)\n tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL')\n trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP')\n route_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_ROUTE')\n\n header = (\"Type\", \"Interval (in ms)\", \"Status\")\n data = []\n if queue_info:\n data.append([\"QUEUE_STAT\", queue_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), queue_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if port_info:\n data.append([\"PORT_STAT\", port_info.get(\"POLL_INTERVAL\", DEFLT_1_SEC), port_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if port_drop_info:\n data.append([PORT_BUFFER_DROP, port_drop_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), port_drop_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if rif_info:\n data.append([\"RIF_STAT\", rif_info.get(\"POLL_INTERVAL\", DEFLT_1_SEC), rif_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if queue_wm_info:\n data.append([\"QUEUE_WATERMARK_STAT\", queue_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), queue_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if pg_wm_info:\n data.append([\"PG_WATERMARK_STAT\", pg_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), pg_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if pg_drop_info:\n data.append(['PG_DROP_STAT', pg_drop_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), pg_drop_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if buffer_pool_wm_info:\n data.append([\"BUFFER_POOL_WATERMARK_STAT\", buffer_pool_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), buffer_pool_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if acl_info:\n data.append([ACL, pg_drop_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), acl_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if tunnel_info:\n data.append([\"TUNNEL_STAT\", rif_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), rif_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if trap_info:\n data.append([\"FLOW_CNT_TRAP_STAT\", trap_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), trap_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if route_info:\n data.append([\"FLOW_CNT_ROUTE_STAT\", route_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC),\n route_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n\n click.echo(tabulate(data, headers=header, tablefmt=\"simple\", missingval=\"\"))", "def get_common_monitors(monitor):\n monitor_content_loss = MonitorSeries(\n 'content loss', monitor, interval=20)\n monitor_gen_loss = MonitorSeries(\n 'generator loss', monitor, interval=20)\n monitor_warp_loss = MonitorSeries(\n 'warp loss', monitor, interval=20)\n monitor_lr = MonitorSeries(\n 'learning rate', monitor, interval=20)\n monitor_time = MonitorTimeElapsed(\n \"training time per iteration\", monitor, interval=20)\n Monitor_common = collections.namedtuple('Monitor_common',\n ['monitor_content_loss', 'monitor_gen_loss', 'monitor_warp_loss',\n 'monitor_lr', 'monitor_time'])\n return Monitor_common(monitor_content_loss, monitor_gen_loss, monitor_warp_loss, monitor_lr, monitor_time)", "def get_host_stats(self, refresh=False):\n stats = []\n for nodename in self._drv_nodes:\n host_status = self.host_status_base.copy()\n host_status['hypervisor_hostname'] = nodename\n host_status['host_hostname'] = nodename\n host_status['host_name_label'] = nodename\n host_status['hypervisor_type'] = self.name\n host_status['vcpus'] = drv_conf.max_vcpus\n host_status['memory_mb'] = drv_conf.max_memory_mb\n host_status['local_gb'] = drv_conf.max_disk_gb\n stats.append(host_status)\n if len(stats) == 0:\n raise exception.NovaException(\"Azure Driver has no node\")\n elif len(stats) == 1:\n return stats[0]\n else:\n return stats", "def get_health_monitor(self):\n return self.manager.get_health_monitor(self)", "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def read():\n for host in _hosts:\n remaining = ssl_valid_time_remaining(host)\n remaining = remaining.total_seconds()\n remaining = int(remaining)\n\n collectd.info(\n 'tls-cert-monitor(host=%s): Reading data (data=%d)' %\n (host, remaining))\n\n val = collectd.Values(type='gauge', type_instance=host)\n\n val.plugin = 'tls-cert-monitor'\n val.dispatch(values=[remaining])", "def getMonitoringHosts(self):\r\n return self.monitoringClients.values()", "async def main(self) -> None:\n async with ClientSession() as session:\n tasks = [\n asyncio.create_task(self.monitor(session, params)) for params in self._monitor_list\n ]\n await asyncio.gather(*tasks)", "def host_info(vm_hostname):\n with _get_vm(vm_hostname) as vm:\n\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n info = vm.info()\n\n # Disconnect fabric now to avoid messages after the table\n disconnect_all()\n\n categories = (\n ('General', (\n 'hypervisor',\n 'status',\n )),\n ('Network', (\n 'intern_ip',\n 'mac_address',\n )),\n ('Resources', (\n 'num_cpu',\n 'max_cpus',\n 'memory',\n 'memory_free',\n 'max_mem',\n 'disk',\n 'disk_size_gib',\n 'disk_free_gib',\n )),\n # Anything else will appear in this section\n ('Other', None),\n )\n\n def _progress_bar(free_key, capacity_key, result_key, unit):\n \"\"\"Helper to show nice progress bars.\"\"\"\n if free_key not in info or capacity_key not in info:\n return\n free = info[free_key]\n del info[free_key]\n capacity = info[capacity_key]\n del info[capacity_key]\n\n simple_stats = (\n 'Current: {} {unit}\\n'\n 'Free: {} {unit}\\n'\n 'Max: {} {unit}'.format(\n capacity - free, free, capacity, unit=unit))\n\n if not 0 <= free <= capacity > 0:\n log.warning(\n '{} ({}) and {} ({}) have weird ratio, skipping progress '\n 'calculation'.format(\n free_key, free, capacity_key, capacity)\n )\n info[result_key] = red(simple_stats)\n return\n\n assert 0 <= free <= capacity\n ratio = 1 - float(free) / float(capacity)\n if ratio >= 0.9:\n color = red\n elif ratio >= 0.8:\n color = yellow\n else:\n color = green\n\n max_bars = 20\n num_bars = int(round(ratio * max_bars))\n info[result_key] = (\n '[{}{}] {}%\\n{}'.format(\n color('#' * num_bars), ' ' * (max_bars - num_bars),\n int(round(ratio * 100)),\n simple_stats,\n )\n )\n\n _progress_bar('memory_free', 'memory', 'memory', 'MiB')\n _progress_bar('disk_free_gib', 'disk_size_gib', 'disk', 'GiB')\n\n max_key_len = max(len(k) for k in info.keys())\n for category, keys in categories:\n # Handle 'Other' section by defaulting to all keys\n keys = list(keys or info.keys())\n\n # Any info available for the category?\n if not any(k in info for k in keys):\n continue\n\n print('')\n print(white(category, bold=True))\n for k in keys:\n if k not in info:\n continue\n\n # Properly re-indent multiline values\n value = str(info.pop(k))\n value = ('\\n' + ' ' * (max_key_len + 3)).join(\n value.splitlines()\n )\n print('{} : {}'.format(k.ljust(max_key_len), value))", "def collect_compute_info(self, ctxt, host_id, host_info):\n cctxt = self.client.prepare(server=DEFAULT_SERVER, timeout=RPC_TIMEOUT)\n cctxt.cast(ctxt, \"collect_compute_info\", host_id=host_id, host_info=host_info)", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def test_list_healthmonitors_sort(self):\r\n resources = \"health_monitors\"\r\n cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def get_switch_details_from_mgmt(self, using):\n ret_output = {}\n #Get the console mgmt handle\n console = self.connect_mgmt_ip(using)\n console.sendline('terminal length 0')\n console.expect(SWITCH_PROMPT)\n console.sendline('show inventory | xml')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['inv'] = console.before\n console.sendline('show system uptime | xml')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['uptime'] = console.before\n console.sendline('show accounting log | grep \"configure\" | last 1')\n console.expect(SWITCH_PROMPT,120)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['idletime'] = console.before\n console.sendline('terminal length 15')\n console.expect(SWITCH_PROMPT)\n console.sendline('show clock | last 1')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['clock'] = console.before\n console.close()\n return ret_output", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def generate_json(vmware_monitor):\n\n global server\n\n if not server.is_connected():\n try:\n server.connect(VCENTER_SERVER, VCENTER_USERNAME, VCENTER_PASSWORD)\n except Exception as error:\n vmware_monitor.json = output_message(\"Error connecting to vCenter server\" + error.message)\n return\n\n if server.is_connected():\n # First we want to grab a list of all the hosts so we can determine the clock speed of the CPUs\n # API: HostSystem - https://pubs.vmware.com/vsphere-51/index.jsp#com.vmware.wssdk.apiref.doc/vim.HostSystem.html\n\n if len(VMwareVM.all_vms) == 0:\n query = [\n \"name\",\n \"hardware.cpuInfo.hz\"\n ]\n try:\n # Fast way of getting Host properties\n host_props = server._retrieve_properties_traversal(property_names=query, obj_type=\"HostSystem\")\n except Exception as error:\n vmware_monitor.json = output_message(\"Error retrieving VMware Host data\" + error.message)\n return\n else:\n for prop_set in host_props:\n host_mor = prop_set.Obj # managed_object_reference\n host_hz = 0\n host_name = \"Error\"\n # The properties aren't always returned in the order you expect, so we have to match them up\n for i in range(len(prop_set.PropSet)):\n if prop_set.PropSet[i].Name == \"name\":\n host_name = prop_set.PropSet[i].Val\n elif prop_set.PropSet[i].Name == \"hardware.cpuInfo.hz\":\n host_hz = prop_set.PropSet[i].Val\n # Create host object so we can find Mhz later (object is stored in class array all_hosts)\n VMwareHost(host_mor, host_name, host_hz)\n\n\n # API: VirtualMachine - https://pubs.vmware.com/vsphere-50/index.jsp#com.vmware.wssdk.apiref.doc_50/vim.VirtualMachine.html\n # summary.overallStatus: general \"health\" value: gray, green, red, yellow\n # summary.quickStats.overallCpuUsage: Amount of CPU actually granted to the VM in Mhz\n # summary.quickStats.staticCpuEntitlement: Max CPU possible for the VM in Mhz\n # summary.quickStats.guestMemoryUsage: Active memory usage of the VM in MB.\n # summary.quickStats.staticMemoryEntitlement: Max CPU possible for the VM in MB\n # config.hardware.numCPU: Number of virtual CPUs present in this virtual machine.\n # runtime.host: The host that is responsible for running a virtual machine.\n\n query = [\n \"name\",\n \"summary.overallStatus\",\n \"summary.quickStats.overallCpuUsage\",\n \"config.hardware.numCPU\", # This number is vCPU\n \"runtime.host\"\n ]\n\n\n try:\n # Fast way of getting VM properties\n props = server._retrieve_properties_traversal(property_names=query, obj_type=\"VirtualMachine\")\n except Exception as error:\n vmware_monitor.json = output_message(\"Error retrieving VMware VM data\" + error.message)\n return\n else:\n\n for prop_set in props:\n mor = prop_set.Obj # managed_object_reference\n vm_name = \"Error\"\n vm_status = 0\n vm_cpu = 0\n vm_cpu_count = 0\n vm_host_mor = None\n\n # The properties aren't always returned in the order you expect, so we have to match them up\n for i in range(len(prop_set.PropSet)):\n if prop_set.PropSet[i].Name == \"name\":\n vm_name = prop_set.PropSet[i].Val\n elif prop_set.PropSet[i].Name == \"summary.overallStatus\":\n vm_status = prop_set.PropSet[i].Val\n elif prop_set.PropSet[i].Name == \"summary.quickStats.overallCpuUsage\":\n vm_cpu = prop_set.PropSet[i].Val\n elif prop_set.PropSet[i].Name == \"config.hardware.numCPU\":\n vm_cpu_count = prop_set.PropSet[i].Val\n elif prop_set.PropSet[i].Name == \"runtime.host\":\n vm_host_mor = prop_set.PropSet[i].Val\n\n # Check to see if this VM is in our list or create one if not found\n vm = VMwareVM.find_by_name(mor, vm_name)\n if vm_status == \"green\":\n vm.heartbeat_status = 1\n elif vm_status == \"yellow\":\n vm.heartbeat_status = 2\n elif vm_status == \"red\":\n vm.heartbeat_status = 3\n else:\n vm.heartbeat_status = 0\n\n # Store the cpu data in the object\n if len(vm.cpu_datapoints) == 0:\n vm.cpu_datapoints = [vm_cpu]\n else:\n vm.cpu_datapoints.append(vm_cpu)\n # If we already have the max number of datapoints in our list, delete the oldest item\n if len(vm.cpu_datapoints) >= MAX_DATAPOINTS:\n del(vm.cpu_datapoints[0])\n\n vm.host_cpu_mhz = VMwareHost.get_mhz_by_host(vm_host_mor) # Get the host hz per CPU\n vm.cpu_count = vm_cpu_count\n # Update ranking value of this VM to determine if we should show it\n vm.update_relative_weight()\n\n # Once we have finished updating our VM data, grab the top MAX_VM_RESULTS and output the JSON\n # Sort by relative weight\n VMwareVM.all_vms.sort(key=operator.attrgetter('relative_weight'), reverse=True)\n\n vms = []\n for i in range(MAX_VM_RESULTS):\n vms.append({\n \"name\": VMwareVM.all_vms[i].name,\n \"status\": VMwareVM.all_vms[i].heartbeat_status,\n \"cpu\": VMwareVM.all_vms[i].cpu_datapoints,\n \"cpu_count\": VMwareVM.all_vms[i].cpu_count,\n \"host_cpu_mhz\": VMwareVM.all_vms[i].host_cpu_mhz,\n })\n\n vmware_monitor.json = json.dumps({\"vms\": vms})\n\n if __debug__:\n print vmware_monitor.json", "def get_monitor_output(name: Optional[pulumi.Input[str]] = None,\n partition: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMonitorResult]:\n ...", "def update(self):\n stats = self._thread.stats()\n if self._var_id == CONTAINER_MONITOR_STATUS:\n self._state = stats.get('status', None)\n elif self._var_id == CONTAINER_MONITOR_MEMORY_USAGE:\n self._state = stats.get('memory_usage', None)\n elif self._var_id == CONTAINER_MONITOR_CPU_PERCENTAGE:\n self._state = stats.get('cpu_percent', None)\n if 'cpu' in stats:\n self._attributes[ATTR_ONLINE_CPUS] = stats['cpu'].get('online_cpus', None)\n elif self._var_id == CONTAINER_MONITOR_MEMORY_PERCENTAGE:\n self._state = stats.get('memory_percent', None)\n # Network\n elif self._var_id == CONTAINER_MONITOR_NETWORK_UP:\n self._state = round(stats.get('network_up', None) / 1024.0, PRECISION)\n elif self._var_id == CONTAINER_MONITOR_NETWORK_DOWN:\n self._state = round(stats.get('network_down', None) / 1024.0, PRECISION)", "async def _async_start_monitor(self) -> None:\n if not sys.platform.startswith(\"linux\"):\n return\n info = await system_info.async_get_system_info(self.hass)\n if info.get(\"docker\"):\n return\n\n from pyudev import ( # pylint: disable=import-outside-toplevel\n Context,\n Monitor,\n MonitorObserver,\n )\n\n try:\n context = Context()\n except (ImportError, OSError):\n return\n\n monitor = Monitor.from_netlink(context)\n try:\n monitor.filter_by(subsystem=\"tty\")\n except ValueError as ex: # this fails on WSL\n _LOGGER.debug(\n \"Unable to setup pyudev filtering; This is expected on WSL: %s\", ex\n )\n return\n observer = MonitorObserver(\n monitor, callback=self._device_discovered, name=\"usb-observer\"\n )\n observer.start()\n\n def _stop_observer(event: Event) -> None:\n observer.stop()\n\n self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_observer)\n self.observer_active = True", "def monitor(self) -> HwMonitor:\n return self._montior", "def info(self, handle):\n\n # Each process group gathers their output\n\n groupstr = \"\"\n procstr = \"\"\n\n gcomm = self._comm.comm_group\n wcomm = self._comm.comm_world\n rcomm = self._comm.comm_rank\n\n if wcomm.rank == 0:\n handle.write(\"Data distributed over {} processes in {} groups\\n\".format(self._comm.world_size, self._comm.ngroups))\n\n for ob in self.obs:\n id = ob['id']\n tod = ob['tod']\n base = ob['baselines']\n nse = ob['noise']\n intrvl = ob['intervals']\n\n if gcomm.rank == 0:\n groupstr = \"observation {}:\\n\".format(id)\n groupstr = \"{} {} total samples, {} detectors\\n\".format(groupstr, tod.total_samples, len(tod.detectors))\n if intrvl is not None:\n groupstr = \"{} {} intervals:\\n\".format(groupstr, len(intrvl))\n for it in intrvl:\n groupstr = \"{} {} --> {} ({} --> {})\\n\".format(groupstr, it.first, it.last, it.start, it.stop)\n\n # rank zero of the group will print general information,\n # and each process will get its statistics.\n\n nsamp = tod.local_samples[1]\n dets = tod.local_dets\n\n procstr = \" proc {}\\n\".format(gcomm.rank)\n my_chunks = 1\n if tod.local_chunks is not None:\n my_chunks = tod.local_chunks[1]\n procstr = \"{} sample range {} --> {} in {} chunks:\\n\".format(procstr, tod.local_samples[0], (tod.local_samples[0] + nsamp - 1), my_chunks)\n \n if tod.local_chunks is not None:\n chkoff = tod.local_samples[0]\n for chk in range(tod.local_chunks[1]):\n abschk = tod.local_chunks[0] + chk\n chkstart = chkoff\n chkstop = chkstart + tod.total_chunks[abschk] - 1\n procstr = \"{} {} --> {}\\n\".format(procstr, chkstart, chkstop)\n chkoff += tod.total_chunks[abschk]\n\n if nsamp > 0:\n \n stamps = tod.read_times(local_start=0, n=nsamp)\n\n procstr = \"{} timestamps {} --> {}\\n\".format(procstr, stamps[0], stamps[-1])\n\n for dt in dets:\n procstr = \"{} det {}:\\n\".format(procstr, dt)\n\n pdata = tod.read_pntg(detector=dt, local_start=0, n=nsamp)\n\n procstr = \"{} pntg [{:.3e} {:.3e} {:.3e} {:.3e}] --> [{:.3e} {:.3e} {:.3e} {:.3e}]\\n\".format(procstr, pdata[0,0], pdata[0,1], pdata[0,2], pdata[0,3], pdata[-1,0], pdata[-1,1], pdata[-1,2], pdata[-1,3])\n\n data = tod.read(detector=dt, local_start=0, n=nsamp)\n flags, common = tod.read_flags(detector=dt, local_start=0, n=nsamp)\n procstr = \"{} {:.3e} ({}) --> {:.3e} ({})\\n\".format(procstr, data[0], flags[0], data[-1], flags[-1])\n good = np.where((flags | common) == 0)[0]\n procstr = \"{} {} good samples\\n\".format(procstr, len(good))\n min = np.min(data[good])\n max = np.max(data[good])\n mean = np.mean(data[good])\n rms = np.std(data[good])\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n for cname in tod.cache.keys():\n procstr = \"{} cache {}:\\n\".format(procstr, cname)\n ref = tod.cache.reference(cname)\n min = np.min(ref)\n max = np.max(ref)\n mean = np.mean(ref)\n rms = np.std(ref)\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n recvstr = \"\"\n if gcomm.rank == 0:\n groupstr = \"{}{}\".format(groupstr, procstr)\n for p in range(1, gcomm.size):\n if gcomm.rank == 0:\n recvstr = gcomm.recv(source=p, tag=p)\n groupstr = \"{}{}\".format(groupstr, recvstr)\n elif p == gcomm.rank:\n gcomm.send(procstr, dest=0, tag=p)\n gcomm.barrier()\n\n # the world rank 0 process collects output from all groups and\n # writes to the handle\n\n recvgrp = \"\"\n if wcomm.rank == 0:\n handle.write(groupstr)\n for g in range(1, self._comm.ngroups):\n if wcomm.rank == 0:\n recvgrp = rcomm.recv(source=g, tag=g)\n handle.write(recvgrp)\n elif g == self._comm.group:\n if gcomm.rank == 0:\n rcomm.send(groupstr, dest=0, tag=g)\n wcomm.barrier()\n\n return", "def monitorlist(self):\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.MONITOR_ID):\n self.symbol = self.scanner.get_symbol()\n if (self.symbol.type == self.scanner.LEFT_CURLY):\n self.symbol = self.scanner.get_symbol()\n self.monitor_point()\n while (self.symbol.type == self.scanner.NAME):\n self.monitor_point()\n\n # Check right curly bracket ends monitors block\n if (self.symbol.type == self.scanner.RIGHT_CURLY):\n self.symbol = self.scanner.get_symbol()\n else:\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.END_ID):\n # Error: missing '}'\n # Stopping Symbols: END' KEYWORD\n self.error(self.MISSING_RIGHT_CURLY,\n [self.scanner.KEYWORD],\n [self.scanner.END_ID])\n else:\n # Bad name terminated monitors incorrectly\n # Error: Invalid name\n # Stopping Symbols: END' KEYWORD\n self.error(self.NAME_STRING, [self.scanner.KEYWORD],\n [self.scanner.END_ID])\n else:\n # Error: Curly needed after 'MONITOR'\n # Stopping Symbols: END' KEYWORD\n self.error(self.NO_CURLY_MONITOR, [self.scanner.KEYWORD],\n [self.scanner.END_ID])\n else:\n # Error: 'MONITOR' keyword required\n # Stopping Symbols: END' KEYWORD\n self.error(self.NEED_MONITOR_KEYWORD, [self.scanner.KEYWORD],\n [self.scanner.END_ID])", "def gather_metric(self):\n result = self._shell.run(self.KERNEL_COMMAND).stdout\n response = {self.KERNEL_RELEASE: result}\n return response", "def get_status(self):\n dstat_lines = cat(self.PROC_DISKSTATS)\n pl = dict()\n for line in dstat_lines:\n if line:\n p = partition()\n (p.M, p.m, p.dev,\n p.r, p.r_merged, p.r_sectors, p.r_ms,\n p.w, p.w_merged, p.w_sectors, p.w_ms,\n p.io_progress, p.io_ms, p.ms_weighted) = line.strip().split()\n\n if p.dev in self.partitions:\n pl[p.dev] = p\n\n return pl", "def show_health_monitor(self, health_monitor, **_params):\r\n return self.get(self.health_monitor_path % (health_monitor),\r\n params=_params)", "def get_info():\n\n global DISKINFO\n DISKINFO = {}\n\n #Run diskutil list to get disk names.\n runcmd = subprocess.Popen(\"diskutil list -plist\", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n\n #Get the output.\n stdout = runcmd.communicate()[0]\n\n #Parse the plist (Property List).\n global PLIST\n\n PLIST = plistlib.loads(stdout)\n\n #Find the disks.\n for disk in PLIST[\"AllDisks\"]:\n #Run diskutil info to get disk info.\n runcmd = subprocess.Popen(\"diskutil info -plist \"+disk, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n stdout = runcmd.communicate()[0]\n\n #Parse the plist (Property List).\n PLIST = plistlib.loads(stdout)\n\n #Check if the disk is a partition.\n disk_is_partition = is_partition(disk)\n\n if not disk_is_partition:\n #These are devices.\n get_device_info(disk)\n\n else:\n #These are Partitions. Fix for disks w/ more than 9 partitions.\n host_disk = \"/dev/\"+disk.split(\"s\")[0]+\"s\"+disk.split(\"s\")[1]\n get_partition_info(disk, host_disk)\n\n #Check we found some disks.\n if not DISKINFO:\n raise RuntimeError(\"No Disks found!\")", "def serverinfo( self, mess, args):\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%s\\n\\n%s' % ( version, loadavg, )", "def getDisplayInfo(self):\n displayInfo = {}\n logicalDisplayRE = re.compile(\".*DisplayViewport\\{valid=true, .*orientation=(?P<orientation>\\d+),\"\n \" .*deviceWidth=(?P<width>\\d+), deviceHeight=(?P<height>\\d+).*\")\n dumpsys_display_result = self.shell(\"dumpsys display\")\n if dumpsys_display_result is not None:\n for line in dumpsys_display_result.splitlines():\n m = logicalDisplayRE.search(line, 0)\n if m:\n for prop in ['width', 'height', 'orientation']:\n displayInfo[prop] = int(m.group(prop))\n\n if 'width' not in displayInfo or 'height' not in displayInfo:\n physicalDisplayRE = re.compile('Physical size: (?P<width>\\d+)x(?P<height>\\d+)')\n m = physicalDisplayRE.search(self.shell('wm size'))\n if m:\n for prop in ['width', 'height']:\n displayInfo[prop] = int(m.group(prop))\n\n if 'width' not in displayInfo or 'height' not in displayInfo:\n # This could also be mSystem or mOverscanScreen\n phyDispRE = re.compile('\\s*mUnrestrictedScreen=\\((?P<x>\\d+),(?P<y>\\d+)\\) (?P<width>\\d+)x(?P<height>\\d+)')\n # This is known to work on older versions (i.e. API 10) where mrestrictedScreen is not available\n dispWHRE = re.compile('\\s*DisplayWidth=(?P<width>\\d+) *DisplayHeight=(?P<height>\\d+)')\n for line in self.shell('dumpsys window').splitlines():\n m = phyDispRE.search(line, 0)\n if not m:\n m = dispWHRE.search(line, 0)\n if m:\n for prop in ['width', 'height']:\n displayInfo[prop] = int(m.group(prop))\n\n if 'orientation' not in displayInfo:\n surfaceOrientationRE = re.compile(\"SurfaceOrientation:\\s+(\\d+)\")\n output = self.shell(\"dumpsys input\")\n m = surfaceOrientationRE.search(output)\n if m:\n displayInfo['orientation'] = int(m.group(1))\n\n BASE_DPI = 160.0\n density = None\n floatRE = re.compile(r\"[-+]?\\d*\\.\\d+|\\d+\")\n d = self.get_property('ro.sf.lcd_density')\n if floatRE.match(d):\n density = float(d)\n else:\n d = self.get_property('qemu.sf.lcd_density')\n if floatRE.match(d):\n density = float(d)\n else:\n physicalDensityRE = re.compile('Physical density: (?P<density>[\\d.]+)', re.MULTILINE)\n m = physicalDensityRE.search(self.shell('wm density'))\n if m:\n density = float(m.group('density'))\n if density is not None:\n displayInfo['density'] = density\n\n displayInfoKeys = {'width', 'height', 'orientation', 'density'}\n if not displayInfoKeys.issuperset(displayInfo):\n self.logger.warning(\"getDisplayInfo failed to get: %s\" % displayInfoKeys)\n\n return displayInfo", "def monitors_read_name(self) -> str:\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(1), ctypes.c_int32(0)))\n return result.value.decode('ascii')", "def monitor(self):\n import curses\n import inspect\n\n stdscr = curses.initscr()\n curses.curs_set(0)\n curses.noecho()\n curses.cbreak()\n width_split = curses.COLS//3-1\n win_done = curses.newwin(curses.LINES-1, width_split, 0, 0)\n win_running = curses.newwin(curses.LINES-1, width_split,\n 0, width_split+1)\n win_pending = curses.newwin(curses.LINES-1, width_split,\n 0, 2*width_split+1)\n stdscr.addstr(curses.LINES-1, 0,\n 'Monitoring started. Press Ctrl+C to stop.')\n stdscr.refresh()\n win_done.addstr(0, 0, 'DONE')\n win_pending.addstr(0, 0, 'PENDING')\n while True:\n try:\n win_done.addstr(1, 0,\n f'{len(self.done)} jobs done')\n list_done = list(self.done)[:curses.LINES-3]\n for idx, fut in enumerate(list_done, start=2):\n fmt_str = f'{id(fut):x} {fut._state}'\n win_done.addstr(idx, 0, fmt_str)\n win_done.refresh()\n\n win_running.clear()\n win_running.addstr(0, 0, 'RUNNING')\n win_running.addstr(1, 0,\n f'{self.running.qsize()} jobs running')\n list_running = list(self.running.items())[:curses.LINES-3]\n for idx, (fut, coro) in enumerate(list_running, start=2):\n coro_state = inspect.getcoroutinestate(coro)\n fmt_str = f'{id(fut):x} {coro_state}'\n win_running.addstr(idx, 0, fmt_str)\n win_running.refresh()\n\n win_pending.clrtoeol()\n win_pending.addstr(1, 0,\n f'{self.pending.qsize()} jobs pending')\n win_pending.refresh()\n time.sleep(.1)\n except KeyboardInterrupt:\n break\n\n curses.nocbreak()\n curses.echo()\n curses.endwin()", "def __repr__(self) -> str:\n return \"<Twilio.Monitor.V1>\"", "def server_stats():\n out = subprocess.check_output(cmd_preamble + [\"admin\", \"stats\"])\n return out.decode()", "def setGlideClientMonitorInfo(self, monitorInfo):\n\n if len(monitorInfo) == 17:\n self.adParams['GlideClientMonitorJobsIdle'] = monitorInfo[0]\n self.adParams['GlideClientMonitorJobsIdleMatching'] = monitorInfo[1]\n self.adParams['GlideClientMonitorJobsIdleEffective'] = monitorInfo[2]\n self.adParams['GlideClientMonitorJobsIdleOld'] = monitorInfo[3]\n self.adParams['GlideClientMonitorJobsIdleUnique'] = monitorInfo[4]\n self.adParams['GlideClientMonitorJobsRunning'] = monitorInfo[5]\n self.adParams['GlideClientMonitorJobsRunningHere'] = monitorInfo[6]\n self.adParams['GlideClientMonitorJobsRunningMax'] = monitorInfo[7]\n self.adParams['GlideClientMonitorGlideinsTotal'] = monitorInfo[8]\n self.adParams['GlideClientMonitorGlideinsIdle'] = monitorInfo[9]\n self.adParams['GlideClientMonitorGlideinsRunning'] = monitorInfo[10]\n self.adParams['GlideClientMonitorGlideinsFailed'] = monitorInfo[11]\n self.adParams['GlideClientMonitorGlideinsTotalCores'] = monitorInfo[12]\n self.adParams['GlideClientMonitorGlideinsIdleCores'] = monitorInfo[13]\n self.adParams['GlideClientMonitorGlideinsRunningCores'] = monitorInfo[14]\n self.adParams['GlideClientMonitorGlideinsRequestIdle'] = monitorInfo[15]\n self.adParams['GlideClientMonitorGlideinsRequestMaxRun'] = monitorInfo[16]\n else:\n raise RuntimeError('Glide client monitoring structure changed. Resource ad may have incorrect GlideClientMonitor values')", "def get_ceph_srv_info():\n services = []\n for name, pid in get_ceph_pids():\n process = psutil.Process(pid)\n services.append(CEPHSrvInfo(name, pid, process.get_cpu_percent(),\\\n process.memory_info().rss))\n return services", "async def get_discovered_device_data(self):\n json = self._api_call(\"monitors/%s/devices\" % self.sense_monitor_id)\n return await json", "def start_monitoring(self):\n pass", "def start_monitor():\n monitor_enabled = config_json[env]['MONITOR_ENABLED']\n monitor_trigger_interval_s = int( config_json[env]['MONITOR_TRIGGER_INTERVAL_S'] )\n\n # IF SCHEDULE IS ENABLED IN CONFIG:\n if monitor_enabled == \"1\":\n\n print(\"\\nSpace Weather Service Monitor: ENABLED (running every %s seconds)\" % monitor_trigger_interval_s)\n\n # RUN INITIAL CHECK SPACE WEATHER\n processes.process_check_space_weather()\n\n # CREATE SCHEDULER W/ INTERVAL TRIGGER AND START\n scheduler = BackgroundScheduler()\n scheduler.add_job(\n func = processes.process_check_space_weather,\n trigger = IntervalTrigger( seconds = monitor_trigger_interval_s ),\n id = 'check_space_weather',\n name = 'Checking Space Weather Every 30 Seconds')\n scheduler.start()\n atexit.register( lambda: scheduler.shutdown() )\n else:\n print(\"\\nSpace Weather Service Monitor: DISABLED\")", "def collect_compute_info(self, ctxt, host_id, host_info):\n logger.info(\"Info of \" + host_id + \" :\" + str(host_info))\n self._compute_node_info.add_node_info(host_id, host_info)", "def stats(self):\n\n res = self.read_block(REG_STATS, 9)\n\n ret = {\n \"completed_cycles\": (res[1] << 8) + (res[0] << 0),\n \"last_boot\": {\n \"retries\": res[2],\n \"duration\": (res[6] << 24) + (res[5] << 16) + (res[4] << 8) + (res[3] << 0)\n },\n \"forced_shutdowns\": (res[8] << 8) + (res[7] << 0)\n }\n\n return ret", "def ls(cls):\n for vm in cls._vm_agents_for_host():\n with vm:\n running = vm.qemu.process_exists()\n\n if running:\n vm_mem = vm.qemu.proc().memory_full_info()\n\n expected_size = (\n vm.cfg[\"memory\"] * 1024 * 1024\n + vm.qemu.vm_expected_overhead * 1024 * 1024\n )\n\n log.info(\n \"online\",\n machine=vm.name,\n cores=vm.cfg[\"cores\"],\n memory_booked=\"{:,.0f}\".format(vm.cfg[\"memory\"]),\n memory_pss=\"{:,.0f}\".format(vm_mem.pss / MiB),\n memory_swap=\"{:,.0f}\".format(vm_mem.swap / MiB),\n )\n else:\n log.info(\"offline\", machine=vm.name)", "def get(self, request, health_monitor_id):\n conn = get_sdk_connection(request)\n health_mon = conn.load_balancer.find_health_monitor(health_monitor_id)\n return _get_sdk_object_dict(health_mon)", "def get_device_file_dict():\n cmd = 'lshw -class disk'\n desc = \"description\"\n log_name = \"logical name\"\n serial = \"serial\"\n\n dev = []\n dev_list = []\n\n ret, output, err = run_gluster_command(cmd)\n output = output.decode('ASCII')\n dev_info = output.split('\\n')\n for line in dev_info:\n if re.search(desc, line):\n if dev:\n dev_list.append(dev)\n\n dev = []\n if re.search(log_name, line) or re.search(serial, line):\n temp = line.split(':')\n temp[1] = temp[1].strip(' ')\n dev.append(temp[1])\n dev_list.append(dev)\n for line in dev_list:\n print(line)", "def check_system_health(self):\n l_thread_list_letter = []\n l_thread_list_other = []\n for t in threading.enumerate():\n if t.name == 'MainThread':\n l_thread_list_letter.append('M')\n elif len(t.name) == 1:\n l_thread_list_letter.append(t.name)\n else:\n l_thread_list_other.append(t.name)\n l_thread_list_letter.sort()\n l_thread_list_other.sort()\n l_thread_list = '[{0}]-[{1}]'.format(''.join(l_thread_list_letter), '/'.join(l_thread_list_other))\n\n l_mem = psutil.virtual_memory()\n\n self.m_logger.info(('System Health Check - Available RAM: {0:.2f} Mb ({1:.2f} % usage) ' +\n 'Threads: {2}').format(\n l_mem.available / (1024 * 1024), l_mem.percent, l_thread_list))\n\n if l_mem.percent >= 75.0:\n self.m_logger.warning('System Health Check ALERT - Available RAM: {0:.2f} Mb ({1:.2f} % usage)'.format(\n l_mem.available / (1024 * 1024), l_mem.percent))\n\n # full system resource log every 5 minutes\n if self.m_hcCounter % 10 == 0:\n l_cpu = psutil.cpu_times()\n l_swap = psutil.swap_memory()\n l_diskRoot = psutil.disk_usage('/')\n l_net = psutil.net_io_counters()\n l_processCount = len(psutil.pids())\n\n # log message in TB_EC_MSG\n l_conn = psycopg2.connect(\n host=EcAppParam.gcm_dbServer,\n database=EcAppParam.gcm_dbDatabase,\n user=EcAppParam.gcm_dbUser,\n password=EcAppParam.gcm_dbPassword\n )\n l_cursor = l_conn.cursor()\n try:\n l_cursor.execute(\"\"\"\n insert into \"TB_EC_MSG\"(\n \"ST_TYPE\",\n \"ST_NAME\",\n \"ST_LEVEL\",\n \"ST_MODULE\",\n \"ST_FILENAME\",\n \"ST_FUNCTION\",\n \"N_LINE\",\n \"TX_MSG\"\n )\n values(%s, %s, %s, %s, %s, %s, %s, %s);\n \"\"\", (\n 'HLTH',\n 'xxx',\n 'XXX',\n 'ec_app_core',\n './ec_app_core.py',\n 'check_system_health',\n 0,\n 'MEM: {0}/CPU: {1}/SWAP: {2}/DISK(root): {3}/NET: {4}/PROCESSES: {5}'.format(\n l_mem, l_cpu, l_swap, l_diskRoot, l_net, l_processCount\n )\n ))\n l_conn.commit()\n except Exception as e:\n EcMailer.send_mail('TB_EC_MSG insert failure: {0}-{1}'.format(\n type(e).__name__,\n repr(e)\n ), 'Sent from EcAppCore::check_system_health')\n raise\n\n l_cursor.close()\n l_conn.close()\n\n self.m_hcCounter += 1", "def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()", "def mmo_cluster_hostInfo(self, mmo_connection, inc_mongos):\n return self.mmo_execute_on_cluster(mmo_connection, \"hostInfo\", inc_mongos)", "def describe_collect(self):\n logger.info(\"describe_collect()\")\n d = dict(\n source = \"elapsed time, s\",\n dtype = \"number\",\n shape = (1,)\n )\n return {\n self.name: {\n \"x\": d\n }\n }", "def load_monitoring_capabilities():\n config = ConfigParser.ConfigParser()\n for metric_name, metric_infos in METRICS.iteritems():\n if 'monitoring' in metric_infos.keys():\n monitoring_sys = str(metric_infos['monitoring'])\n config.read('configs/' + monitoring_sys + '.cfg')\n collector_api = config.get(monitoring_sys, 'collector_api')\n\n monitoring_records = DB.monitoring.find({'name': monitoring_sys})\n if monitoring_records.count() > 0:\n\n mon_record = monitoring_records[0]\n try:\n mon_metrics = mon_record['metrics']\n if metric_name not in mon_metrics:\n mon_metrics.append(metric_name)\n mon_record['metrics'] = mon_metrics\n\n DB.monitoring.update({'name': monitoring_sys},\n mon_record, upsert=True)\n except KeyError:\n print monitoring_sys + \\\n ' record malformed or insert to DB failed.'\n else:\n mon_record = {'name': monitoring_sys,\n 'metrics': [metric_name],\n 'api': collector_api}\n DB.monitoring.insert(mon_record)", "def monitoring():\n\n logging.info(\"!!! Engine start !!! {}\".format(time.strftime(\"%d.%m.%y %H:%M\")))\n\n try_connect = 0\n initialization()\n while True:\n try:\n for vrt, host in host_vrt.items():\n answer = subprocess.call(['ping', '-c', '3', vrt])\n if answer != 0:\n collection()\n time.sleep(15)\n try_connect += 1\n logging.info(\"!!! Try firs reconnection {} !!!\".format(time.strftime(\"%d.%m.%y %H:%M\")))\n if try_connect == 2:\n vrt_unreachable.append(vrt)\n with app.app_context():\n alert()\n if try_connect >= 3:\n for vm, cmd in host_cmd_vmid.items():\n if vm == vrt:\n ssh_cli(SSHClient(host, port, user, password), cmd)\n try_connect = 0\n successful_autostart()\n\n\n else:\n continue\n\n except TimeoutError:\n print('Connection timed out')\n logging.info(\"SSH Connection time out {}\".format(time.strftime(\"%d.%m.%y %H:%M\")))\n\n except paramiko.ssh_exception.NoValidConnectionsError:\n print('NoValidConnectionsError')\n bad_autostart()", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def get_curr(dev):\n\n with open('/proc/diskstats') as diskstatsfd:\n all_stats = diskstatsfd.read().rstrip().split(\"\\n\")\n stats = [x for x in all_stats if dev in x][0].split()\n\n curr = {}\n curr['reads'] = int(stats[3])\n curr['read_ms'] = int(stats[6])\n curr['writes'] = int(stats[7])\n curr['write_ms'] = int(stats[10])\n\n return curr", "def main(self):\n debug(\"Using %s\" % (self.PROC_DISKSTATS))\n\n initial = self.get_status()\n time.sleep(self.interval)\n final = self.get_status()\n\n # Get bytes/sec\n for d in self.partitions:\n r_diff = ((final[d].r_sectors - initial[d].r_sectors) * self.sector_size) / self.interval\n w_diff = ((final[d].w_sectors - initial[d].w_sectors) * self.sector_size) / self.interval\n final[d].r_rate = r_diff\n final[d].w_rate = w_diff\n \n # Status string\n msg = \" \".join([ \"%s (r: %d KB/s, w: %d KB/s)\" % (i.dev, i.r_rate / 1024, i.w_rate / 1024) for i in sorted(final.values(), key=lambda x:x.dev) ])\n performance = \" \".join([ \"'%s read'=%d '%s write'=%d\" % (i.dev, i.r_rate, i.dev, i.w_rate) for i in sorted(final.values(), key=lambda x:x.dev) ])\n\n return (EX_OK, msg, performance)" ]
[ "0.66455966", "0.6420232", "0.64127403", "0.6299886", "0.6260533", "0.6227388", "0.6212769", "0.6193962", "0.6172191", "0.6142712", "0.60478276", "0.6043625", "0.60359365", "0.5971578", "0.5969627", "0.5957598", "0.5948469", "0.5928224", "0.5919851", "0.5902216", "0.58823013", "0.5860545", "0.5846942", "0.58260006", "0.5782762", "0.57789516", "0.5762519", "0.5750942", "0.57418644", "0.5724609", "0.5687578", "0.56679344", "0.56668234", "0.56581503", "0.56445026", "0.5638736", "0.5614906", "0.56146646", "0.56139296", "0.5613476", "0.5604229", "0.56009513", "0.55911326", "0.55894715", "0.5588092", "0.5562841", "0.55427235", "0.5528466", "0.55072206", "0.55017745", "0.54903984", "0.5483779", "0.54761267", "0.5451727", "0.5449024", "0.544344", "0.5418599", "0.5418093", "0.54098374", "0.54073626", "0.5380212", "0.5376778", "0.536204", "0.5352824", "0.5332763", "0.53050846", "0.53029954", "0.52953607", "0.5291903", "0.5286392", "0.528527", "0.5280024", "0.52783126", "0.5272299", "0.5255248", "0.52535975", "0.52445096", "0.5242058", "0.52386993", "0.52386034", "0.5238403", "0.5234982", "0.5233759", "0.5230497", "0.52198863", "0.5217105", "0.52042425", "0.5203881", "0.52001077", "0.51985526", "0.51927924", "0.51915526", "0.51902384", "0.518121", "0.51805735", "0.51780474", "0.5171218", "0.5162585", "0.5161949", "0.5159027" ]
0.7485646
0
GAther ceph device information
def get_device_info(handle, timeout): device_info = dict() device_info['ls'] = ceph_mon_command(handle, 'device ls', timeout) return device_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_info(self) -> Dict[str, Any]:\n via_device = 'meter_adapter'\n if self.toon.gas.is_smart:\n via_device = 'electricity'\n\n return {\n 'name': 'Gas Meter',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'gas'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id, via_device),\n }", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"name\": self.name,\n \"manufacturer\": \"Brightech\",\n }", "def device_info(self):\n return {\n \"name\": get_device_name(self._data, self._actuator.id),\n \"identifiers\": {(DOMAIN, get_identifier(self._data, self._actuator.id))},\n \"via_device\": (DOMAIN, self._data.wiserhub.system.name),\n }", "def device_info(self):\n return {\n \"name\": self._alias,\n \"model\": self._model,\n \"manufacturer\": \"TP-Link\",\n \"connections\": {(dr.CONNECTION_NETWORK_MAC, self._mac)},\n \"sw_version\": self._sysinfo[\"sw_ver\"],\n }", "def device():\n return G.DEVICE", "def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self._config[CONF_SERIAL])},\n \"name\": self._config[CONF_NAME],\n \"manufacturer\": \"Bosch\",\n }", "def device_info(self) -> Dict[str, Any]:\n return {\n 'name': 'Electricity Meter',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'electricity'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id, 'meter_adapter'),\n }", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._uuid)},\n \"name\": self._device.device_data[self._uuid]['name'],\n \"manufacturer\": \"Nest Labs\",\n \"model\": self._device.device_data[self._uuid]['model'],\n }", "def device_info(self):\n if self._mac:\n mac = {(CONNECTION_NETWORK_MAC, self._mac)}\n else:\n mac = {}\n\n device_info = {\n ATTR_IDENTIFIERS: {(DOMAIN, self._item_id)},\n ATTR_NAME: self._name,\n ATTR_CONNECTIONS: mac,\n ATTR_MANUFACTURER: \"Google\",\n ATTR_MODEL: DEV_CLIENT_MODEL,\n \"via_device\": (DOMAIN, self._system_id),\n }\n\n return device_info", "def device_info(self):\n info = {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._data[\"port-mac-address\"])},\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} {self._data['default-name']}\",\n }\n return info", "def get_device_information(self):\n return self.mycam.devicemgmt.GetDeviceInformation()", "def get_device_file_dict():\n cmd = 'lshw -class disk'\n desc = \"description\"\n log_name = \"logical name\"\n serial = \"serial\"\n\n dev = []\n dev_list = []\n\n ret, output, err = run_gluster_command(cmd)\n output = output.decode('ASCII')\n dev_info = output.split('\\n')\n for line in dev_info:\n if re.search(desc, line):\n if dev:\n dev_list.append(dev)\n\n dev = []\n if re.search(log_name, line) or re.search(serial, line):\n temp = line.split(':')\n temp[1] = temp[1].strip(' ')\n dev.append(temp[1])\n dev_list.append(dev)\n for line in dev_list:\n print(line)", "def device_info(self):\n return {\n \"name\": get_device_name(self._data, 0),\n \"identifiers\": {(DOMAIN, get_identifier(self._data, 0))},\n \"manufacturer\": MANUFACTURER,\n \"model\": self._data.wiserhub.system.product_type,\n \"sw_version\": self._data.wiserhub.system.firmware_version,\n \"via_device\": (DOMAIN, self._data.wiserhub.system.name),\n }", "def device_info(self) -> DeviceInfo:\n return {\n \"identifiers\": {(DOMAIN, str(self.coordinator.gios.station_id))},\n \"name\": DEFAULT_NAME,\n \"manufacturer\": MANUFACTURER,\n \"entry_type\": \"service\",\n }", "def print_device_info(device):\n assert(isinstance(device, Device))\n print(\" Device Name : %s\" % device.name)\n print(\" OS Type : %s\" % device.os_type)\n print(\" IP Address : %s\" % device.ip_addr)\n print(\" Interfaces : %s\" % \", \".join(device.iflist))", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.tesla_device.id())},\n \"name\": self.tesla_device.car_name(),\n \"manufacturer\": \"Tesla\",\n \"model\": self.tesla_device.car_type,\n \"sw_version\": self.tesla_device.car_version,\n }", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.coordinator.data[\"deviceID\"])},\n \"name\": self.coordinator.data[\"deviceName\"],\n \"manufacturer\": self.coordinator.data[\"deviceManufacturer\"],\n \"model\": self.coordinator.data[\"deviceModel\"],\n \"sw_version\": self.coordinator.data[\"appVersionName\"],\n }", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"manufacturer\": \"Somfy\",\n \"name\": self.name,\n \"model\": self.tahoma_device.widget,\n \"sw_version\": self.tahoma_device.type,\n }", "def get_device_info(ns, device, human_friendly):\n if device.NumberOfBlocks and device.BlockSize:\n size = size2str(device.NumberOfBlocks * device.BlockSize, human_friendly)\n else:\n size = 'N/A'\n\n fslabel = fs.get_device_format_label(ns, device)\n return (device.DeviceID,\n device.Name,\n device.ElementName,\n size,\n fslabel)", "def device_info(self) -> Dict[str, Any]:\n agreement = self.toon.agreement\n model = agreement.display_hardware_version.rpartition('/')[0]\n sw_version = agreement.display_software_version.rpartition('/')[-1]\n return {\n 'identifiers': {\n (DOMAIN, agreement.id),\n },\n 'name': 'Toon Display',\n 'manufacturer': 'Eneco',\n 'model': model,\n 'sw_version': sw_version,\n }", "def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self.config_entry.entry_id)},\n \"name\": NAME,\n \"model\": VERSION,\n \"manufacturer\": NAME,\n }", "def device_info(self) -> dict:\n return {\n \"connections\": {(DOMAIN, self._unique_id)},\n \"name\": self._host,\n \"manufacturer\": \"IMAP E-Mail\",\n \"sw_version\": VERSION,\n }", "def get_ceph_drv_info():\n disks_info = []\n stat = psutil.disk_io_counters(perdisk=True)\n for drv in get_ceph_disk():\n info = CEPHDiskInfo(drv)\n disk = basename(drv)\n if disk in stat:\n info.rd_cnt = stat[disk].read_count\n info.wr_cnt = stat[disk].write_count\n info.rd_bytes = stat[disk].read_bytes\n info.wr_bytes = stat[disk].write_bytes\n info.rd_time = stat[disk].read_time\n info.wr_time = stat[disk].write_time\n\n disks_info.append(info)\n\n return disks_info", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.device_id)},\n \"name\": self.name,\n \"manufacturer\": self.manufacturer,\n \"model\": self._device.device_model,\n \"sw_version\": \"\",\n \"via_device\": (DOMAIN, self._controller_ip),\n }", "def device_info(self) -> dict[str, any]:\n device_information = {\n \"identifiers\": {(DOMAIN, self._dev_id)},\n \"name\": self._device_name,\n \"manufacturer\": self._manufacturer,\n \"model\": self._model,\n \"sw_version\": self._fw_version,\n }\n\n if self._dev_id != self._api.gateway_id:\n device_information[\"via_device\"] = (DOMAIN, self._api.gateway_id)\n else:\n device_information[\"name\"] = f\"Smile {self._api.smile_name}\"\n\n return device_information", "def device_info(self):\n return {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._mac)},\n \"default_name\": self._device_name,\n \"default_model\": self._device[\"device_model\"],\n \"via_device\": (DOMAIN, self._router.unique_id),\n }", "def device_info(self) -> Optional[Dict[str, Any]]:\n return {ATTR_NAME: self.name, \"identifiers\": {(DOMAIN, self._device.device_id)}}", "def device_information(self):\n return self._device_information", "def getDeviceHostDetails(self,device):\n dev_host_det = self.host.get_host_device_ver(device)\n build = dev_host_det['build']\n # redhat version\n os_ver = dev_host_det['version']\n kernel = dev_host_det['kernel']\n \n self.setBuild()\n #self.foundCardbuild = self.setBuild()\n\n str = \"Running '%s' tests on device '%s',build '%s' \\n host kernel '%s'\"%(self.testcaseStr,device,build,kernel) + \\\n \" os version '%s', machine '%s' \"%(os_ver,self.host.name)\n\n return str", "def device_info(self):\n return {\n \"name\": get_device_name(self._data, self._device_id),\n \"identifiers\": {(DOMAIN, get_identifier(self._data, self._device_id))},\n \"manufacturer\": MANUFACTURER,\n \"model\": self._data.wiserhub.devices.get_by_id(self._device_id).model,\n \"sw_version\": self._device.firmware_version,\n \"via_device\": (DOMAIN, self._data.wiserhub.system.name),\n }", "def gather_metric(self):\n device_dict = {}\n # Delete first and last line of output of adb.\n output = self._shell.run(self.COMMAND).stdout\n\n # Example Line, Device Serial Num TAB Phone Status\n # 00bd977c7f504caf\toffline\n if output:\n for line in output.split('\\n'):\n spl_line = line.split('\\t')\n # spl_line[0] is serial, [1] is status. See example line.\n device_dict[spl_line[0]] = spl_line[1]\n\n return {self.DEVICES: device_dict}", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._room_id)},\n \"name\": self._room_name,\n \"manufacturer\": MANUFACTURER,\n \"model\": MODELS[self._module_type],\n }", "def get_device_info(self): # pylint: disable=no-self-use\r\n serial = get_serial_number()\r\n model = get_model()\r\n\r\n return {\r\n \"serial\": serial,\r\n \"model\": model,\r\n }", "def device_info(self):\n info = {\n \"identifiers\": {\n (\n DOMAIN,\n \"serial-number\",\n self._ctrl.data[\"routerboard\"][\"serial-number\"],\n \"switch\",\n \"NAT\",\n )\n },\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} NAT\",\n }\n return info", "def bdev_nvme_get_discovery_info(client):\n return client.call('bdev_nvme_get_discovery_info')", "def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self._device.unique_id)},\n \"name\": self._device.name,\n \"manufacturer\": \"Apple\",\n \"model\": self._device.device_model,\n }", "def device_info(self) -> Dict[str, Any]:\n return {\n 'name': 'Boiler Module',\n 'manufacturer': 'Eneco',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'boiler_module'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id),\n }", "def get_gadget_info(gfname):\n partmass = pyg.readheader(gfname, 'massTable')[1]\n boxsize = pyg.readheader(gfname, 'boxsize')\n omegam = pyg.readheader(gfname, 'O0')\n omegal = pyg.readheader(gfname, 'Ol')\n h = pyg.readheader(gfname, 'h')\n npart = pyg.readheader(gfname, 'npartTotal')[1]\n return omegam, omegal, h, boxsize, partmass, npart", "def get_ceph_info(handle, ceph_config, timeout):\n cluster = dict()\n\n cluster['status'] = ceph_mon_command(handle,\n 'status', timeout)\n cluster['version'] = shell_command('ceph -v') + b'\\n'\n\n # ceph versions command was introduced in mimic\n version = cluster['version']\n version = str(version.decode('utf-8')).split(' ')[2].split(\".\")[0]\n\n if int(version) >= 13:\n cluster['versions'] = shell_command('ceph versions') + b'\\n'\n\n\n fsid = handle.get_fsid() + '\\n'\n cluster['fsid'] = str.encode(fsid)\n\n with open(ceph_config, 'r') as f:\n ceph_conf = f.read()\n\n cephconf = str(ceph_conf)\n cluster['ceph_conf'] = str.encode(cephconf)\n\n return cluster", "def device(self):\n return self._vars[0].device", "def device_info(self) -> DeviceInfo:\n return {\n \"identifiers\": {(DOMAIN, self._unique_id)},\n \"name\": self._device.name,\n \"manufacturer\": \"Yeelight\",\n \"model\": self._device.model,\n \"sw_version\": self._device.fw_version,\n }", "def test_get_device(self):\n pass", "def test_get_device(self):\n pass", "def get_device(self) -> str:\n pass", "def inspect_device(g) -> inspect_pb2.InspectionResults:\n\n roots = g.inspect_os()\n if len(roots) == 0:\n return inspect_pb2.InspectionResults(\n os_count=len(roots)\n\n )\n root = roots[0]\n mount_points = g.inspect_get_mountpoints(root)\n for dev, mp in sorted(mount_points.items(), key=lambda k: len(k[0])):\n try:\n g.mount_ro(mp, dev)\n except RuntimeError as msg:\n print('%s (ignored)' % msg, file=sys.stderr)\n fs = boot_inspect.system.filesystems.GuestFSFilesystem(g)\n operating_system = linux.Inspector(fs, _LINUX).inspect()\n if not operating_system:\n operating_system = windows.Inspector(g, root).inspect()\n if operating_system:\n operating_system.architecture = architecture.Inspector(g, root).inspect()\n\n g.umount_all()\n\n return inspect_pb2.InspectionResults(\n os_release=operating_system,\n os_count=1 if operating_system else 0,\n )", "async def get_device_data(self):\n pass", "def device_info(self):\r\n device_info = DeviceInfo(\r\n identifiers={(DOMAIN, self.location_name)},\r\n entry_type=DeviceEntryType.SERVICE,\r\n name=f\"{NAME}: {self.location_name}\",\r\n manufacturer=\"Met.no\",\r\n model=\"Nowcast\",\r\n configuration_url=\"https://www.met.no/en\",\r\n )\r\n return device_info", "def device_info(devid: int = 0) -> str: # pragma: no cover\n numdev = jax.device_count()\n if devid >= numdev:\n raise RuntimeError(f\"Requested information for device {devid} but only {numdev} present.\")\n dev = jax.devices()[devid]\n if dev.platform == \"cpu\":\n info = \"CPU\"\n else:\n info = f\"{dev.platform.upper()} ({dev.device_kind})\"\n return info", "def device_info(self) -> DeviceInfo:\n return self._device_info", "def get_device_info(disk):\n\n host_disk = \"/dev/\"+disk\n DISKINFO[host_disk] = {}\n DISKINFO[host_disk][\"Name\"] = host_disk\n DISKINFO[host_disk][\"Type\"] = \"Device\"\n DISKINFO[host_disk][\"HostDevice\"] = \"N/A\"\n DISKINFO[host_disk][\"Partitions\"] = []\n DISKINFO[host_disk][\"Vendor\"] = get_vendor(disk)\n DISKINFO[host_disk][\"Product\"] = get_product(disk)\n DISKINFO[host_disk][\"RawCapacity\"], DISKINFO[host_disk][\"Capacity\"] = get_capacity()\n DISKINFO[host_disk][\"Description\"] = get_description(disk)\n DISKINFO[host_disk][\"Flags\"] = get_capabilities(disk)\n DISKINFO[host_disk][\"Partitioning\"] = get_partitioning(disk)\n DISKINFO[host_disk][\"FileSystem\"] = \"N/A\"\n DISKINFO[host_disk][\"UUID\"] = \"N/A\"\n DISKINFO[host_disk][\"ID\"] = get_id(disk)\n DISKINFO[host_disk][\"BootRecord\"], DISKINFO[host_disk][\"BootRecordStrings\"] = get_boot_record(disk)\n\n return host_disk", "async def get_system_info(hass, include_components):\n\n gate_id = hass.states.get('sensor.ais_secure_android_id_dom').state\n info_object = {\n 'arch': platform.machine(),\n 'dev': 'dev' in current_version,\n 'docker': False,\n 'os_name': platform.system(),\n 'python_version': platform.python_version(),\n 'timezone': dt_util.DEFAULT_TIME_ZONE.zone,\n 'version': current_version,\n 'virtualenv': os.environ.get('VIRTUAL_ENV') is not None,\n 'hassio': hass.components.hassio.is_hassio(),\n 'gate_id': gate_id,\n }\n\n if include_components:\n info_object['components'] = list(hass.config.components)\n\n if platform.system() == 'Windows':\n info_object['os_version'] = platform.win32_ver()[0]\n elif platform.system() == 'Darwin':\n info_object['os_version'] = platform.mac_ver()[0]\n elif platform.system() == 'FreeBSD':\n info_object['os_version'] = platform.release()\n elif platform.system() == 'Linux':\n import distro\n linux_dist = await hass.async_add_job(\n distro.linux_distribution, False)\n info_object['distribution'] = linux_dist[0]\n info_object['os_version'] = linux_dist[1]\n info_object['docker'] = os.path.isfile('/.dockerenv')\n\n return info_object", "def device_info(self):\n info = {\n \"identifiers\": {\n (\n DOMAIN,\n \"serial-number\",\n self._ctrl.data[\"routerboard\"][\"serial-number\"],\n \"switch\",\n \"Scripts\",\n )\n },\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} Scripts\",\n }\n return info", "def device_info(node):\n\n if \"cpu\" in node and \"total_mbufs\" in node[\"cpu\"]:\n total_mbufs = node[\"cpu\"][\"total_mbufs\"]\n if total_mbufs != 0:\n print(\"Total Number of Buffers: {}\".format(total_mbufs))\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n linkup_devs = vpp.get_link_up_devices()\n if len(linkup_devs):\n print(\"\\nDevices with link up (can not be used with VPP):\")\n vpp.show_vpp_devices(linkup_devs, show_header=False)\n # for dev in linkup_devs:\n # print (\" \" + dev)\n kernel_devs = vpp.get_kernel_devices()\n if len(kernel_devs):\n print(\"\\nDevices bound to kernel drivers:\")\n vpp.show_vpp_devices(kernel_devs, show_header=False)\n else:\n print(\"\\nNo devices bound to kernel drivers\")\n\n dpdk_devs = vpp.get_dpdk_devices()\n if len(dpdk_devs):\n print(\"\\nDevices bound to DPDK drivers:\")\n vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices bound to DPDK drivers\")\n\n other_devs = vpp.get_other_devices()\n if len(other_devs):\n print(\"\\nDevices not bound to Kernel or DPDK drivers:\")\n vpp.show_vpp_devices(other_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices not bound to Kernel or DPDK drivers\")\n\n vpputl = VPPUtil()\n interfaces = vpputl.get_hardware(node)\n if interfaces == {}:\n return\n\n print(\"\\nDevices in use by VPP:\")\n\n if len(interfaces.items()) < 2:\n print(\"None\")\n return\n\n print(\n \"{:30} {:4} {:4} {:7} {:4} {:7}\".format(\n \"Name\", \"Numa\", \"RXQs\", \"RXDescs\", \"TXQs\", \"TXDescs\"\n )\n )\n for intf in sorted(interfaces.items()):\n name = intf[0]\n value = intf[1]\n if name == \"local0\":\n continue\n numa = rx_qs = rx_ds = tx_qs = tx_ds = \"\"\n if \"numa\" in value:\n numa = int(value[\"numa\"])\n if \"rx queues\" in value:\n rx_qs = int(value[\"rx queues\"])\n if \"rx descs\" in value:\n rx_ds = int(value[\"rx descs\"])\n if \"tx queues\" in value:\n tx_qs = int(value[\"tx queues\"])\n if \"tx descs\" in value:\n tx_ds = int(value[\"tx descs\"])\n\n print(\n \"{:30} {:>4} {:>4} {:>7} {:>4} {:>7}\".format(\n name, numa, rx_qs, rx_ds, tx_qs, tx_ds\n )\n )", "def device_info(self):\n\n return {\n \"identifiers\": {(DOMAIN, self._vin)}\n }", "def device_info(self):\n model = self.data.wiserSmart.getWiserDeviceInfo(self.appliance_id).get(\"modelId\")\n\n return {\n \"name\": self.appliance_name,\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"manufacturer\": MANUFACTURER,\n \"model\": model,\n }", "def get_device(arn=None):\n pass", "def device_info(self) -> interface.DeviceInfo:\n return self._device_info", "def show_device_information(self):\n\n if self._json_format:\n print(json.dumps(self._devices, indent=4, separators=(',', ': ')))\n return\n\n if self._long_format:\n self.show_device_information_long()\n elif self._include_enclosures and self._number_enclosures:\n self.show_device_information_enclosures()\n else:\n self.show_device_information_only()", "def device_info(self):\n info = {\n \"identifiers\": {\n (\n DOMAIN,\n \"serial-number\",\n self._ctrl.data[\"routerboard\"][\"serial-number\"],\n \"switch\",\n \"Queue\",\n )\n },\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} Queue\",\n }\n return info", "def show_device_information_long(self):\n\n for device in self._devices:\n print(\"\")\n if device['Device Type'].startswith(\"enclosu\"):\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n else:\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('Linux Device Name'):\n print(\"{0:>32}: {1}\".format(\"Linux Device Name\", device['Linux Device Name']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('Drive Capacity'):\n print(\"{0:>32}: {1}\".format(\"Drive Capacity\", device['Drive Capacity']))\n if device.get('Block Length'):\n print(\"{0:>32}: {1}\".format(\"Block Length\", device['Block Length']))\n if device.get('Power On Hours'):\n print(\"{0:>32}: {1}\".format(\"Power On Hours\", device['Power On Hours']))\n if device.get('Current Temperature'):\n print(\"{0:>32}: {1}\".format(\"Current Temperature\", device['Current Temperature']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n if device.get('Enclosure Device'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Device\", device['Enclosure Device']))\n if device.get('Enclosure Slot'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Slot\", device['Enclosure Slot']))\n if device.get('Slot Description'):\n print(\"{0:>32}: {1}\".format(\"Slot Desciption\", device['Slot Description']))\n\n if len(self._devices):\n print(\"\")", "def device_info(self) -> DeviceInfo:\n return DeviceInfo(\n identifiers={(DOMAIN, self.unique_id)},\n manufacturer=\"Volumio\",\n model=self._info[\"hardware\"],\n name=self._name,\n sw_version=self._info[\"systemversion\"],\n )", "def getDeviceInfo():\n url = \"https://api.roblox.com/reference/deviceinfo\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "def print_device_info(nodemap):\r\n\r\n #print('*** DEVICE INFORMATION ***\\n')\r\n\r\n try:\r\n result = True\r\n node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))\r\n\r\n if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):\r\n features = node_device_information.GetFeatures()\r\n #for feature in features:\r\n #node_feature = PySpin.CValuePtr(feature)\r\n #print('%s: %s' % (node_feature.GetName(),\r\n #node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable'))\r\n\r\n else:\r\n print('Device control information not available.')\r\n\r\n except PySpin.SpinnakerException as ex:\r\n print('Error: %s' % ex)\r\n return False\r\n\r\n return result", "def get_detection_info(self):\n persistent_dict = self.props[\"persistent_identifiers\"]\n persistent_dict[\"model\"] = self._get_system_hardware()\n if persistent_dict[\"model\"] not in usb_config.CAMBRIONIX_PORT_MAP:\n raise errors.DeviceError(\n \"Model {} not supported. Supported models: {}\".format(\n persistent_dict[\"model\"],\n \",\".join(usb_config.CAMBRIONIX_PORT_MAP.keys())))\n persistent_dict[\"hub_port_name\"] = self.communication_address\n persistent_dict[\"console_port_name\"] = self.communication_address\n persistent_dict[\"total_ports\"] = self.total_ports\n persistent_dict[\n \"ftdi_serial_number\"] = usb_utils.get_serial_number_from_path(\n self.communication_address)\n\n # Cambrionix does not have a separate serial number from the one shown\n # in the /dev/serial/by-id/... name.\n persistent_dict[\"serial_number\"] = self.props[\"persistent_identifiers\"][\n \"ftdi_serial_number\"]\n\n self.props[\"options\"] = {}\n\n return persistent_dict, self.props[\"options\"]", "def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {\n (\n DOMAIN,\n self._api.information.serial,\n SynoSurveillanceStation.INFO_API_KEY,\n )\n },\n \"name\": \"Surveillance Station\",\n \"manufacturer\": \"Synology\",\n \"model\": self._api.information.model,\n \"sw_version\": self._version,\n \"via_device\": (DOMAIN, self._api.information.serial),\n }", "def print_device_info(nodemap):\n print(\"\\n*** DEVICE INFORMATION ***\\n\")\n\n try:\n result = True\n node_device_information = PySpin.CCategoryPtr(nodemap.GetNode(\"DeviceInformation\"))\n\n if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):\n features = node_device_information.GetFeatures()\n for feature in features:\n node_feature = PySpin.CValuePtr(feature)\n print(\"%s: %s\" % (node_feature.GetName(),\n node_feature.ToString() if PySpin.IsReadable(node_feature) else \"Node not readable\"))\n\n else:\n print(\"Device control information not available.\")\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n return False\n\n return result", "def device_info(self) -> Dict[str, Any]:\n return {\n 'name': 'Boiler',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'boiler'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id, 'boiler_module'),\n }", "def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data", "def retrieve_device_info(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def device_info(self) -> Dict[str, Any]:\n return {\n 'name': 'Solar Panels',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'solar'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id, 'meter_adapter'),\n }", "def attributes(self, device_id=0):\n\t\t\treturn cuda.Device(device_id).get_attributes()", "def get_dmg_info(self):\n dmg = self.get_dmg_command()\n return get_dmg_network_information(dmg.network_scan())", "def _get_device(node):\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n\n # Save the device information\n node[\"devices\"] = {}\n node[\"devices\"][\"dpdk_devices\"] = vpp.get_dpdk_devices()\n node[\"devices\"][\"kernel_devices\"] = vpp.get_kernel_devices()\n node[\"devices\"][\"other_devices\"] = vpp.get_other_devices()\n node[\"devices\"][\"linkup_devices\"] = vpp.get_link_up_devices()", "async def identify(self):\n await self.send({\n \"op\": 2,\n \"d\" : {\n \"token\" : self.client.token,\n \"properties\": {\n \"$os\" : platform,\n \"$browser\": \"SpeedCord\",\n \"$device\" : \"SpeedCord\"\n },\n \"intents\" : self.client.intents,\n \"shard\" : (self.id, self.client.shard_count)\n }\n })", "def ceph_enabled(self):", "def show_dev_info(dev):\n if dev is None:\n raise ValueError('- Invalid device')\n show_dev_descriptor(dev)\n return 0", "def bdev_nvme_get_transport_statistics(client):\n return client.call('bdev_nvme_get_transport_statistics')", "def device_info(dev, testbed_obj, showcmd='show version', save_to_json=False, logstdout=True):\n\n device = testbed_obj.devices[dev]\n device.connect(log_stdout=logstdout)\n response = device.parse(showcmd)\n print(f\"Response from {dev} is of type {type(response)} and length {len(response)}\")\n print(f\"RAW response: \\n{response}\\n\")\n print(f\"FORMATTED response:\\n{json.dumps(response, indent=4)}\")\n print(response.keys())\n\n if save_to_json:\n json_filename = f\"{dev}.json\"\n with open(json_filename, 'w', encoding='utf-8') as f:\n json.dump(response, f, ensure_ascii=False, indent=4)\n print(f\"\\nFILE SAVED: Saved Response to JSON file {json_filename}\")\n\n return device, response", "def device(self):\n return self.share.device", "def device_info(self) -> NUTDeviceInfo:\n return self._device_info or NUTDeviceInfo()", "def device_info(self) -> DeviceInfo:\n return DeviceInfo(\n identifiers={(DOMAIN, self.coordinator.config_entry.entry_id)},\n name=self.coordinator.config_entry.title,\n manufacturer=\"Prusa\",\n configuration_url=self.coordinator.api.host,\n )", "def device_info(self) -> DeviceInfo:\n return DeviceInfo(\n identifiers={(DOMAIN, self._data.controller.mac)},\n configuration_url=(\n f\"https://{self._entry.data[CONF_IP_ADDRESS]}:\"\n f\"{self._entry.data[CONF_PORT]}\"\n ),\n connections={(dr.CONNECTION_NETWORK_MAC, self._data.controller.mac)},\n name=self._data.controller.name.capitalize(),\n manufacturer=\"RainMachine\",\n model=(\n f\"Version {self._version_coordinator.data['hwVer']} \"\n f\"(API: {self._version_coordinator.data['apiVer']})\"\n ),\n sw_version=self._version_coordinator.data[\"swVer\"],\n )", "def load_device():", "def device_info(self) -> DeviceInfo:\n return DeviceInfo(\n configuration_url=f\"http://{self.coordinator.host}\",\n connections={(CONNECTION_NETWORK_MAC, self.coordinator.mac)},\n identifiers={(DOMAIN, self.coordinator.unique_id)},\n manufacturer=\"AVM\",\n model=self.coordinator.model,\n name=self._device_name,\n sw_version=self.coordinator.current_firmware,\n )", "def get_device_properties(device):\n results = devices.show(device)\n return jsonify(results)", "def device(self) -> str:\n return self._device", "def get_devices():\n global managed_objects\n global devices_by_adr\n \n devices_by_adr = {}\n \n r = re.compile(\"\\/org\\/bluez\\/hci\\d*\\/dev\\_(.*)\")\n # e.g., match a string like this:\n # /org/bluez/hci0/dev_58_C9_35_2F_A1_EF\n \n for key, value in managed_objects.items():\n # print(\"key=\", key)\n m = r.match(key)\n if m is not None:\n dev_str = m.group(1) # we have a device string!\n # print(\"dev_str=\", dev_str)\n # let's flatten that dict a bit\n devices_by_adr[dev_str] = value[\"org.bluez.Device1\"]", "def get_device(self):\n raise NotImplementedError()", "def __init__(self):\n self._device_info = None", "def print_device_info(device_info):\n assert(isinstance(device_info, tuple))\n assert(len(device_info) == 4)\n print(\" Device Name : %s\" % device_info[0])\n print(\" OS Type : %s\" % device_info[1])\n print(\" IP Address : %s\" % device_info[2])\n print(\" Interfaces : %s\" % device_info[3])", "def hdu_info(self):\n return self._hdusinfo", "def get_device_map():\n ret = []\n vlist = subprocess.check_output(['ceph-volume', 'lvm', 'list',\n '--format=json'])\n for osd_id, data in json.loads(vlist.decode('utf8')).items():\n osd_id = normalize_osd_id(osd_id)\n for elem in data:\n for device in elem['devices']:\n ret.append({'id': osd_id, 'path': device})\n return ret", "async def async_update_device_info(self) -> None:\n data = await self._async_request(\"get\", \"device\")\n self._device_info = cast(Dict[str, Any], data)", "def get_os_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_OS_INFO)", "def GetGPU():\n return option['device_id']", "def get_cmdb_data(device_type):\n pass", "def test_get_info(self):\n self.addCleanup(self.sdkapi.guest_delete, self.userid)\n\n self.sdkapi.guest_create(self.userid, 1, 1024, disk_list=self.disks)\n self.sdkapi.guest_deploy(self.userid, self.image_name)\n\n # get info in shutdown state\n info_off = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_off['power_state'], 'off')\n self.assertEquals(info_off['mem_kb'], 0)\n self.assertEquals(info_off['cpu_time_us'], 0)\n\n # get info in active state\n self.sdkapi.guest_start(self.userid)\n self.assertTrue(self.sdkutils.wait_until_guest_in_power_state(\n self.userid, 'on'))\n time.sleep(1)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)\n\n # get info in paused state\n self.sdkapi.guest_pause(self.userid)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)", "def get_info(self, charger):\n data = {\n \"device_id\": self.uuid,\n \"cmd\": \"get_info\",\n \"token\": charger.token(),\n \"account_token\": self.api_token\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n response = requests.post(\"{}/box_api_secure\".format(self.BASE_URL),\n data=json.dumps(data),\n headers=headers)\n response_json = response.json()\n return response_json", "def device(self):\n return self._device", "def device(self):\n hw = self.hw()\n if hw: return hw.device()" ]
[ "0.6951333", "0.6848414", "0.6711916", "0.6700383", "0.667151", "0.66525185", "0.6542051", "0.6531767", "0.6522371", "0.6514975", "0.64998937", "0.64957225", "0.6470811", "0.6451061", "0.6438751", "0.6416084", "0.64145595", "0.6406177", "0.63985395", "0.63964", "0.63928473", "0.63734853", "0.63632107", "0.63554406", "0.63509446", "0.6329412", "0.6324491", "0.6317867", "0.63084006", "0.6302769", "0.6280191", "0.6258136", "0.62369365", "0.6228133", "0.62167454", "0.62027335", "0.61848366", "0.6155498", "0.61466146", "0.6135541", "0.6102778", "0.6099176", "0.6099176", "0.60942703", "0.6066692", "0.6065092", "0.606332", "0.604526", "0.604238", "0.6019758", "0.6004032", "0.59858084", "0.59836787", "0.5969735", "0.59616864", "0.5961548", "0.5942899", "0.5919822", "0.5913994", "0.59113616", "0.5886729", "0.5880508", "0.58774924", "0.5855629", "0.58474416", "0.5836899", "0.5830365", "0.5817301", "0.58117193", "0.5805722", "0.5789466", "0.5767759", "0.5766544", "0.5758519", "0.57571816", "0.5753536", "0.57410514", "0.57205933", "0.5712251", "0.5711968", "0.56992036", "0.5698207", "0.5697165", "0.5687142", "0.56836647", "0.56804305", "0.5668906", "0.5650099", "0.5638649", "0.5624789", "0.56210893", "0.5616557", "0.56050885", "0.5602616", "0.56021786", "0.558938", "0.55881745", "0.55875903", "0.55871695", "0.558699" ]
0.6898668
1
Gather ceph manager information
def get_manager_info(handle, timeout): mgr_info = dict() mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout) mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout) mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout) return mgr_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body", "def get_ceph_info(handle, ceph_config, timeout):\n cluster = dict()\n\n cluster['status'] = ceph_mon_command(handle,\n 'status', timeout)\n cluster['version'] = shell_command('ceph -v') + b'\\n'\n\n # ceph versions command was introduced in mimic\n version = cluster['version']\n version = str(version.decode('utf-8')).split(' ')[2].split(\".\")[0]\n\n if int(version) >= 13:\n cluster['versions'] = shell_command('ceph versions') + b'\\n'\n\n\n fsid = handle.get_fsid() + '\\n'\n cluster['fsid'] = str.encode(fsid)\n\n with open(ceph_config, 'r') as f:\n ceph_conf = f.read()\n\n cephconf = str(ceph_conf)\n cluster['ceph_conf'] = str.encode(cephconf)\n\n return cluster", "def get_monitor_info(handle, timeout):\n mon_info = dict()\n mon_info['stat'] = ceph_mon_command(handle, 'mon stat' , timeout)\n mon_info['dump'] = ceph_mon_command(handle, 'mon dump' , timeout)\n mon_info['map'] = ceph_mon_command(handle, 'mon getmap' , timeout)\n mon_info['metadata'] = ceph_mon_command(handle, 'mon metadata', timeout)\n return mon_info", "def manage_info():", "def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data", "async def get_system_info(self) -> Dict[str, Any]:\n assert self._client is not None\n return await self._client.invoke_method(\"system.info\")", "def get_device_info(handle, timeout):\n device_info = dict()\n device_info['ls'] = ceph_mon_command(handle, 'device ls', timeout)\n\n return device_info", "def get_ceph_srv_info():\n services = []\n for name, pid in get_ceph_pids():\n process = psutil.Process(pid)\n services.append(CEPHSrvInfo(name, pid, process.get_cpu_percent(),\\\n process.memory_info().rss))\n return services", "def manager():\n pass", "def info(client):\n\n return client.get_info()", "def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder", "def get_ceph_drv_info():\n disks_info = []\n stat = psutil.disk_io_counters(perdisk=True)\n for drv in get_ceph_disk():\n info = CEPHDiskInfo(drv)\n disk = basename(drv)\n if disk in stat:\n info.rd_cnt = stat[disk].read_count\n info.wr_cnt = stat[disk].write_count\n info.rd_bytes = stat[disk].read_bytes\n info.wr_bytes = stat[disk].write_bytes\n info.rd_time = stat[disk].read_time\n info.wr_time = stat[disk].write_time\n\n disks_info.append(info)\n\n return disks_info", "def bdev_nvme_get_transport_statistics(client):\n return client.call('bdev_nvme_get_transport_statistics')", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def get_health_info(handle, timeout):\n health = dict()\n\n health['stat'] = ceph_mon_command(handle, 'health' , timeout)\n # TODO command not known with ceph_mon_command\n #health['detail'] = ceph_mon_command(handle, 'health detail', timeout)\n health['detail'] = shell_command('ceph health detail') + b'\\n'\n health['df'] = ceph_mon_command(handle, 'df' , timeout)\n health['report'] = ceph_mon_command(handle, 'report' , timeout)\n\n return health", "def describe(self) -> None:\n return {\n 'cluster_metadata': self.cluster_metadata,\n 'master_url': self.master_url\n }", "def fp_meta(self):\n for server in self.machines:\n s = self.machines[server]\n print \"%s: %s (%s)\" % (s.id, s.adminPass, s)", "def getInfo():", "def all_details(self):\n self.which_owner()\n self.which_security()\n self.zabbix_details()\n\n for service, value in self.service_discovery.items():\n self.details[\"services\"][service] = self.which_service(service, **value)", "def summary(self):\n\n result = dict()\n\n result[\"control_manager\"] = self._control_manager.summary()\n result[\"data_logger\"] = self._db_manager.summary()\n result[\"alarm_manager\"] = self._alarm_manager.summary()\n result[\"machine_manager\"] = self._machine_manager.summary()\n result[\"function_manager\"] = self._function_manager.summary()\n\n return result", "def rpc_info():", "def get_snmp_information(self):\n\n snmp_output = self._send_command('/snmp print')\n snmp_community_output = self._send_command(\n '/snmp community print terse')\n\n snmp = parse_output(snmp_output)\n community_list = parse_terse_output(snmp_community_output)\n\n community = {}\n\n for item in community_list:\n community.setdefault(item.get('name'), {\n 'acl': item.get('addresses'),\n 'mode': u'rw' if item.get('write-access') == 'yes' else u'ro'\n })\n\n return {\n 'contact': snmp.get('contact'),\n 'location': snmp.get('location'),\n 'community': community,\n 'chassis_id': ''\n }", "def retrieve(self):\n\t\timport shelve\n\t\timport sys\n\t\timport glob\n\n\t\td = shelve.open(\".storedmanager\")\n\t\tif not d.has_key(\"storedmanager\"):\n\t\t\t# Check if already is done the file\n\t\t\tif len(glob.glob(\"*.tar.gz\")) != 0:\n\t\t\t\tmessage = \"clustermanager.retrive: The job is already DONE!\"\n\t\t\telse:\n\t\t\t\tmessage = \"\\nclustermanager.retrieve: ERROR Not found the\" \\\n\t\t\t\t\t+\" class stored in .storedmanager file\"\n\t\t\tsys.exit(message)\n\n\t\tcopyself = d[\"storedmanager\"]\n\t\t\n\t\t# Putting the datamembers: FIXME: If you want all the datanames\n\t\t# do it with the __dict__ and __setattr__ methods\n\t\tself.nameID = copyself.nameID\n\t\tself.jobsid = copyself.jobsid\n\t\tself.jobidevt = copyself.jobidevt\n\t\tself.tasksID = copyself.tasksID\n\t\tself.outputfiles = copyself.outputfiles\n\t\tself.njobs = copyself.njobs\n\t\tself.basedir = copyself.basedir\n\t\tself.pkgpath = copyself.pkgpath\n\t\tself.libsdir = copyself.libsdir\n\t\tself.nevents = copyself.nevents\n\t\ttry:\n\t\t\tself.taskstatus = copyself.taskstatus\n\t\texcept AttributeError:\n\t\t\t# It means we have not yet done a previous harvest\n\t\t\tpass\n\t\t\n\t\td.close()", "def manager_status(self, msg):\n result = {\n 'success': 0,\n 'msg': 'Service Manager Status',\n 'result': {\n 'status': 'running',\n 'uname': platform.uname(),\n 'frontend_endpoint': self.frontend_endpoint,\n 'backend_endpoint': self.backend_endpoint,\n 'sink_endpoint': self.sink_endpoint,\n 'mgmt_endpoint': self.mgmt_endpoint,\n 'result_publisher_port': self.result_pub_port,\n }\n }\n\n return result", "def bdev_nvme_get_discovery_info(client):\n return client.call('bdev_nvme_get_discovery_info')", "def manager_config(self, manager):\n _, body = self.request('/v1.1/managers/configs/%s' % manager, 'GET')\n return body", "def get_manager_stats(self):\n try:\n names, quantities, types, passwords = zip(*[(manager.name,\n manager.transports_in_fleet, manager.fleet_type, manager.password)\n for manager in self.manager_agents.values()])\n except ValueError:\n names, quantities, types, passwords = [], [], [], []\n\n df = pd.DataFrame.from_dict(\n {\"password\": passwords, \"name\": names, \"transports_in_fleet\": quantities, \"fleet_type\": types})\n return df", "def manager_agents(self):\n return self.get(\"manager_agents\")", "def check_manager_status(brief=True):\n ret = {\n \"manager\": {\n \"manager_id\": None,\n \"queues\": [],\n \"queue_len\": [],\n \"status\": \"stopped\",\n },\n \"workers\": [],\n \"fabrics\": [],\n \"total_queue_len\": 0,\n }\n seq = get_random_sequence()\n msg = eptMsg(MSG_TYPE.GET_MANAGER_STATUS, seq=seq, data={\"brief\": brief})\n #logger.debug(\"get manager status (seq:0x%x) brief:%r\", seq, brief)\n redis = get_redis()\n p = redis.pubsub(ignore_subscribe_messages=True)\n p.subscribe(MANAGER_CTRL_RESPONSE_CHANNEL)\n redis.publish(MANAGER_CTRL_CHANNEL, msg.jsonify())\n start_ts = time.time()\n timeout = AppStatus.MANAGER_STATUS_TIMEOUT \n try:\n if brief:\n timeout = AppStatus.MANAGER_STATUS_BRIEF_TIMEOUT\n while start_ts + timeout > time.time():\n data = p.get_message(timeout=1)\n if data is not None:\n channel = data[\"channel\"]\n if channel == MANAGER_CTRL_RESPONSE_CHANNEL:\n msg = eptMsg.parse(data[\"data\"]) \n if msg.msg_type == MSG_TYPE.MANAGER_STATUS:\n #logger.debug(\"received manager status (seq:0x%x)\", msg.seq)\n ret[\"manager\"] = msg.data[\"manager\"]\n ret[\"workers\"] = msg.data[\"workers\"]\n ret[\"fabrics\"] = msg.data[\"fabrics\"]\n ret[\"total_queue_len\"] = msg.data[\"total_queue_len\"]\n return ret\n except Exception as e:\n logger.debug(\"Traceback:\\n%s\", traceback.format_exc())\n logger.debug(\"error: %s\", e)\n finally:\n if redis is not None and hasattr(redis, \"connection_pool\"):\n redis.connection_pool.disconnect()\n\n logger.warn(\"no manager response within timeout(%s sec)\", timeout)\n return ret", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "async def botinfo(self, ctx):\n\n dev = await self.bot.fetch_user(170506717140877312)\n\n start = perf_counter()\n status_msg = await ctx.send('Beregner ping...')\n end = perf_counter()\n ping = int((end - start) * 1000)\n\n now = time()\n diff = int(now - self.bot.uptime)\n days, remainder = divmod(diff, 24 * 60 * 60)\n hours, remainder = divmod(remainder, 60 * 60)\n minutes, seconds = divmod(remainder, 60)\n\n process = Process(getpid())\n memory_usage = round(process.memory_info().rss / 1000000, 1)\n cpu_percent = process.cpu_percent()\n\n total_members = []\n online_members = []\n idle_members = []\n dnd_members = []\n offline_members = []\n for guild in self.bot.guilds:\n for member in guild.members:\n if member.id in total_members:\n continue\n total_members.append(member.id)\n if str(member.status) == 'online':\n online_members.append(member.id)\n elif str(member.status) == 'idle':\n idle_members.append(member.id)\n elif str(member.status) == 'dnd':\n dnd_members.append(member.id)\n elif str(member.status) == 'offline':\n offline_members.append(member.id)\n\n embed = discord.Embed(color=ctx.me.color, url=self.bot.misc['website'])\n embed.set_author(name=dev.name, icon_url=dev.avatar_url)\n embed.set_thumbnail(url=self.bot.user.avatar_url)\n embed.add_field(name='Dev', value=f'{dev.mention}\\n{dev.name}#{dev.discriminator}')\n embed.add_field(name='Oppetid', value=f'{days}d {hours}t {minutes}m {seconds}s')\n embed.add_field(name='Ping', value=f'Ekte ping: {ping} ms\\nWebsocket ping: {int(self.bot.latency * 1000)} ms')\n embed.add_field(name='Servere', value=len(self.bot.guilds))\n embed.add_field(name='Discord.py', value=discord.__version__)\n embed.add_field(name='Python', value=platform.python_version())\n embed.add_field(name='Ressursbruk', value=f'RAM: {memory_usage} MB\\nCPU: {cpu_percent}%')\n embed.add_field(name='Maskin', value=f'{platform.system()} {platform.release()}')\n embed.add_field(name=f'Brukere ({len(total_members)})',\n value=f'{self.bot.emoji[\"online\"]}{len(online_members)} ' +\n f'{self.bot.emoji[\"idle\"]}{len(idle_members)} ' +\n f'{self.bot.emoji[\"dnd\"]}{len(dnd_members)} ' +\n f'{self.bot.emoji[\"offline\"]}{len(offline_members)}')\n embed.add_field(name='Lenker', value='[Inviter](https://discordapp.com/oauth2/authorize?client_' +\n f'id={self.bot.user.id}&permissions=388174&scope=bot) ' +\n f'| [Nettside]({self.bot.misc[\"website\"]}) ' +\n f'| [Kildekode]({self.bot.misc[\"source_code\"]})')\n await Defaults.set_footer(ctx, embed)\n await status_msg.edit(embed=embed, content=None)", "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def get_host_stats(self, refresh=False):", "def is_mgr():\n if get_cluster_vendor() == \"sgi\":\n return sgi_cluster.is_sac()\n elif get_cluster_vendor() == \"ibm\": \n return ibm_cluster.is_xcat_mgr()\n\n return False", "def do_serverinfo(self, server):\n print('QManager server:', self._server)\n server_info = self._qm.get_server_info()\n for k, v in server_info.items():\n print(' %s: %s' % (k, v))", "def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0", "def mmo_cluster_hostInfo(self, mmo_connection, inc_mongos):\n return self.mmo_execute_on_cluster(mmo_connection, \"hostInfo\", inc_mongos)", "def run(ceph_cluster, **kw):\n config = kw[\"config\"]\n installer = ceph_cluster.get_nodes(role=\"installer\")[0]\n cephadm = CephAdm(installer)\n\n # Set value of osd memory target autotune to False\n cephadm.ceph.config.set(\n key=\"osd_memory_target_autotune\", value=\"false\", daemon=\"osd\"\n )\n log.info(\"OSD Memory autotuning is now set to False\")\n\n # Set the memory target at the host level\n host = config.get(\"hosts\")\n value = config.get(\"value\")\n node = get_nodes_by_ids(ceph_cluster, host)\n hostname = node[0].shortname\n log.info(\"Setting the memory target on host\")\n cephadm.ceph.config.set(\n key=\"osd_memory_target\", value=value, daemon=\"osd/host:{hostname}\"\n )\n log.info(f\"Memory target is set on host '{hostname}' : '{value}'\")\n\n # Verify that the option is set in ceph config dump\n kw = {\"format\": \"json-pretty\"}\n out = cephadm.ceph.config.dump(**kw)\n data = json.loads(out[0])\n found = False\n for data_config in data:\n if data_config[\"name\"] == \"osd_memory_target\" and data_config[\"value\"] == str(\n value\n ):\n found = True\n break\n if not found:\n raise ConfigError(\n \"osd_memory_target not found or has an incorrect value in ceph config\"\n )\n log.info(\"osd memory target is validated in ceph config\")\n\n # Verify osd memory target for osd on the host matches the host level value\n out = cephadm.ceph._osd.tree(**kw)\n data = json.loads(out[0])\n for item in data[\"nodes\"]:\n if item.get(\"type\") == \"host\" and item.get(\"name\") == hostname:\n osd_list = item[\"children\"]\n osd_id = f\"osd.{str(osd_list[0])}\"\n out = cephadm.ceph.config.get(who=\"osd_id\", key=\"osd_memory_target\")\n if out[0].strip() not in str(value):\n raise UnexpectedStateError(\n f\"osd memory target for '{osd_id}' doesnot match host value '{value}' \"\n )\n log.info(\n f\"osd memory target for '{osd_id}' matches the host \"\n f\"level value : '{value}'\"\n )\n return 0", "def get_info(self):\n pass", "def get_info(self):\n pass", "def main():\n partition = 'Common'\n login, pwd, device = get_device_data()\n mgmt = ManagementRoot(login, pwd, device)\n ltm = mgmt.tm.ltm\n pools = ltm.pools.get_collection()\n nodes = ltm.nodes.get_collection()\n display_nodes(nodes)\n display_pools(pools)\n virtuals = ltm.virtuals.get_collection()\n display_virtuals(virtuals)", "def run(ceph_cluster, **kw):\n\n node = ceph_cluster.get_nodes(role=\"mgr\")[0]\n\n # Enable dashboard plug-in in non-active mgr\n out = CephAdm(node).ceph.mgr.module(action=\"enable\", module=\"dashboard\")\n exp_out = \"enabled\"\n if exp_out not in out:\n DashboardPluginWithCephMgr(\"Dashboard plug-in not enable\")\n\n # Verification dashboard plug-in\n out = CephAdm(node).ceph.mgr.module(action=\"ls\")\n exp_out = \"dashboard\"\n if exp_out not in out:\n DashboardPluginWithCephMgr(\"Dashboard plug-in not listed under enabled_modules\")\n\n # Verification mgr service\n out = CephAdm(node).ceph.mgr.services()\n if exp_out not in out:\n DashboardPluginWithCephMgr(\"Dashboard plug-in not listed under enabled_modules\")\n return 0", "def get(self):\n ret_dict = {}\n\n ret_dict[\"autoignore_rules\"] = self.shared_memory_manager_dict[\n \"autoignore_rules\"\n ]\n\n ret_dict[\"config_timestamp\"] = self.shared_memory_manager_dict[\n \"config_timestamp\"\n ]\n\n self.write(ret_dict)", "def get_info():\n\n global DISKINFO\n DISKINFO = {}\n\n #Run diskutil list to get disk names.\n runcmd = subprocess.Popen(\"diskutil list -plist\", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n\n #Get the output.\n stdout = runcmd.communicate()[0]\n\n #Parse the plist (Property List).\n global PLIST\n\n PLIST = plistlib.loads(stdout)\n\n #Find the disks.\n for disk in PLIST[\"AllDisks\"]:\n #Run diskutil info to get disk info.\n runcmd = subprocess.Popen(\"diskutil info -plist \"+disk, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n stdout = runcmd.communicate()[0]\n\n #Parse the plist (Property List).\n PLIST = plistlib.loads(stdout)\n\n #Check if the disk is a partition.\n disk_is_partition = is_partition(disk)\n\n if not disk_is_partition:\n #These are devices.\n get_device_info(disk)\n\n else:\n #These are Partitions. Fix for disks w/ more than 9 partitions.\n host_disk = \"/dev/\"+disk.split(\"s\")[0]+\"s\"+disk.split(\"s\")[1]\n get_partition_info(disk, host_disk)\n\n #Check we found some disks.\n if not DISKINFO:\n raise RuntimeError(\"No Disks found!\")", "def bdev_nvme_get_mdns_discovery_info(client):\n return client.call('bdev_nvme_get_mdns_discovery_info')", "def run(ceph_cluster, **kw):\n log.info(run.__doc__)\n config = kw[\"config\"]\n cephadm = CephAdmin(cluster=ceph_cluster, **config)\n rados_obj = RadosOrchestrator(node=cephadm)\n mon_obj = MonConfigMethods(rados_obj=rados_obj)\n checksum = \"crc32c\"\n\n def restart_osd_service():\n osd_services = rados_obj.list_orch_services(service_type=\"osd\")\n for osd_service in osd_services:\n cephadm.shell(args=[f\"ceph orch restart {osd_service}\"])\n time.sleep(30)\n\n def create_pool_write_iops(param, pool_type):\n try:\n pool_name = f\"{pool_type}_pool_{param}\"\n assert (\n rados_obj.create_pool(pool_name=pool_name)\n if \"repli\" in pool_type\n else rados_obj.create_erasure_pool(\n name=pool_name, **{\"pool_name\": pool_name}\n )\n )\n if param == checksum:\n # set checksum value for the pool\n rados_obj.set_pool_property(\n pool=pool_name, props=\"csum_type\", value=param\n )\n # verify checksum value for the pool\n assert (\n param\n == rados_obj.get_pool_property(pool=pool_name, props=\"csum_type\")[\n \"csum_type\"\n ]\n )\n # rados bench will perform IOPs and also verify the num of objs written\n assert rados_obj.bench_write(\n pool_name=pool_name, **{\"max_objs\": 500, \"verify_stats\": False}\n )\n except Exception:\n raise\n finally:\n assert rados_obj.detete_pool(pool=pool_name)\n\n def modify_cache_size(factor):\n cache_value = int(1073741824 * factor)\n cache_cfg = {\n \"section\": \"osd\",\n \"name\": \"bluestore_cache_size_hdd\",\n \"value\": cache_value,\n }\n assert mon_obj.set_config(**cache_cfg)\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_hdd\")\n log.info(\n f\"bluestore_cache_size_hdd modified value - {out} | Expected {cache_value}\"\n )\n assert int(out.strip(\"\\n\")) == cache_value\n\n cache_value = int(3221225472 * factor)\n cache_cfg = {\n \"section\": \"osd\",\n \"name\": \"bluestore_cache_size_ssd\",\n \"value\": cache_value,\n }\n assert mon_obj.set_config(**cache_cfg)\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_ssd\")\n log.info(\n f\"bluestore_cache_size_ssd modified value - {out} | Expected {cache_value}\"\n )\n assert int(out.strip(\"\\n\")) == cache_value\n\n if config.get(\"checksums\"):\n doc = (\n \"\\n #CEPH-83571646\"\n \"\\n\\t Apply all the applicable different checksum algorithms on pools backed by bluestore\"\n \"\\n\\t\\t Valid algos: none, crc32c, crc32c_16, crc32c_8, xxhash32, xxhash64\"\n \"\\n\\t 1. Create individual replicated pools for each checksum\"\n \"\\n\\t 2. Verify the default checksum algorithm is crc32c\"\n \"\\n\\t 3. Set different checksum algorithm as global and for each pool\"\n \"\\n\\t 4. Verify the checksum algo being set correctly\"\n \"\\n\\t 5. Write data to each pool using rados bench\"\n \"\\n\\t 6. cleanup - Remove all the pools created\"\n )\n log.info(doc)\n log.info(\"Running test case to verify BlueStore checksum algorithms\")\n checksum_list = config.get(\"checksums\")\n\n try:\n # verify default checksum value\n out, _ = cephadm.shell([\"ceph config get osd bluestore_csum_type\"])\n log.info(f\"BlueStore OSD default checksum: {out} | Expected: crc32c\")\n assert \"crc32c\" in out\n\n for checksum in checksum_list:\n # create pools with given config when OSD csum_type is default crc32c\n create_pool_write_iops(\n param=checksum, pool_type=\"replicated\"\n ) if \"crc\" in checksum else create_pool_write_iops(\n param=checksum, pool_type=\"ec\"\n )\n\n for checksum in checksum_list:\n # set the global checksum value\n cfg = {\n \"section\": \"osd\",\n \"name\": \"bluestore_csum_type\",\n \"value\": checksum,\n }\n assert mon_obj.set_config(**cfg)\n\n # verify the newly set global checksum value\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_csum_type\")\n assert checksum in out\n log.info(f\"global checksum set verified - {out}\")\n\n # create pools with given config when OSD csum_type is varied\n create_pool_write_iops(\n param=checksum, pool_type=\"replicated\"\n ) if \"crc\" in checksum else create_pool_write_iops(\n param=checksum, pool_type=\"ec\"\n )\n\n except Exception as E:\n log.error(f\"Verification failed with exception: {E.__doc__}\")\n log.error(E)\n log.exception(E)\n return 1\n finally:\n # reset global checksum config\n assert mon_obj.remove_config(\n **{\"section\": \"osd\", \"name\": \"bluestore_csum_type\"}\n )\n\n # restart osd services\n restart_osd_service()\n wait_for_clean_pg_sets(rados_obj, timeout=300, _sleep=10)\n\n log.info(\"BlueStore Checksum algorithm verification completed.\")\n return 0\n\n if config.get(\"bluestore_cache\"):\n doc = (\n \"\\n #CEPH-83571675\"\n \"\\n\\t Verify BlueStore cache default values.\"\n \"\\n\\t Tune cache parameters and perform IOPS\"\n \"\\n\\t 1. Verify the default value for - bluestore_cache_size(0)\"\n \" | bluestore_cache_size_hdd (1GB) | bluestore_cache_size_ssd (3GB)\"\n \"\\n\\t 2. Modify the value of bluestore_cache_size_ssd and bluestore_cache_size_hdd\"\n \"\\n\\t 3. Verify the values being reflected in ceph config\"\n \"\\n\\t 4. Create replicated and ec pool and perform IOPS\"\n \"\\n\\t 5. cleanup - Remove all the pools created and reset configs modified\"\n )\n log.info(doc)\n log.info(\"Running test case to verify BlueStore Cache size tuning\")\n\n try:\n # verify default value for bluestore cache\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size\")\n log.info(f\"bluestore_cache_size default value - {out} | Expected 0\")\n assert int(out.strip(\"\\n\")) == 0\n\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_hdd\")\n log.info(\n f\"bluestore_cache_size_hdd default value - {out} | Expected 1073741824\"\n )\n assert int(out.strip(\"\\n\")) == 1073741824\n\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_ssd\")\n log.info(\n f\"bluestore_cache_size_ssd default value - {out} | Expected 3221225472\"\n )\n assert int(out.strip(\"\\n\")) == 3221225472\n\n # modify ssd and hdd cache (increase)\n modify_cache_size(factor=1.5)\n\n # restart osd services\n restart_osd_service()\n\n # perform iops\n create_pool_write_iops(param=\"cache_inc\", pool_type=\"replicated\")\n create_pool_write_iops(param=\"cache_inc\", pool_type=\"ec\")\n\n # modify ssd and hdd cache (decrease)\n modify_cache_size(factor=0.7)\n\n # restart osd services\n restart_osd_service()\n\n # perform iops\n create_pool_write_iops(param=\"cache_dec\", pool_type=\"replicated\")\n create_pool_write_iops(param=\"cache_dec\", pool_type=\"ec\")\n\n except Exception as E:\n log.error(f\"Verification failed with exception: {E.__doc__}\")\n log.error(E)\n log.exception(E)\n return 1\n finally:\n # reset modified cache configs\n mon_obj.remove_config(\n **{\"section\": \"osd\", \"name\": \"bluestore_cache_size_hdd\"}\n )\n mon_obj.remove_config(\n **{\"section\": \"osd\", \"name\": \"bluestore_cache_size_ssd\"}\n )\n\n # restart osd services\n restart_osd_service()\n wait_for_clean_pg_sets(rados_obj, timeout=300, _sleep=10)\n\n log.info(\"BlueStore cache size tuning verification completed.\")\n return 0", "def ls(cls):\n for vm in cls._vm_agents_for_host():\n with vm:\n running = vm.qemu.process_exists()\n\n if running:\n vm_mem = vm.qemu.proc().memory_full_info()\n\n expected_size = (\n vm.cfg[\"memory\"] * 1024 * 1024\n + vm.qemu.vm_expected_overhead * 1024 * 1024\n )\n\n log.info(\n \"online\",\n machine=vm.name,\n cores=vm.cfg[\"cores\"],\n memory_booked=\"{:,.0f}\".format(vm.cfg[\"memory\"]),\n memory_pss=\"{:,.0f}\".format(vm_mem.pss / MiB),\n memory_swap=\"{:,.0f}\".format(vm_mem.swap / MiB),\n )\n else:\n log.info(\"offline\", machine=vm.name)", "def get(self):\n server = self.get_argument(\"server\")\n redis_info = self.stats_provider.get_info(server)\n databases=[]\n\n for key in sorted(redis_info.keys()):\n if key.startswith(\"db\"):\n database = redis_info[key]\n database['name']=key\n databases.append(database)\n\n total_keys=0\n for database in databases:\n total_keys+=database.get(\"keys\")\n\n if(total_keys==0):\n databases=[{\"name\" : \"db0\", \"keys\" : \"0\", \"expires\" : \"0\"}]\n\n redis_info['databases'] = databases\n redis_info['total_keys']= self.shorten_number(total_keys)\n\n uptime_seconds = redis_info['uptime_in_seconds']\n redis_info['uptime'] = self.shorten_time(uptime_seconds)\n\n commands_processed = redis_info['total_commands_processed']\n commands_processed = self.shorten_number(commands_processed)\n redis_info['total_commands_processed_human'] = commands_processed\n\n self.write(redis_info)", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def get_managers():\n return {'managers': get_users('managers')}", "def _getComponentsInfo(self):\n result = {}\n et = ElementTree()\n components = self.agentCompleteConfig.listComponents_() + \\\n self.agentCompleteConfig.listWebapps_()\n for comp in components:\n compConfig = getattr(self.agentCompleteConfig, comp)\n daemonXml = os.path.join(compConfig.componentDir, \"Daemon.xml\")\n if not os.path.exists(daemonXml):\n logging.warn(\"%s: can't read file '%s' of component '%s', ignored.\" %\n (self.__class__.__name__, daemonXml, comp))\n continue\n tree = et.parse(daemonXml)\n pid = None\n for child in tree.getchildren():\n if child.tag == \"ProcessID\":\n pid = child.get(\"Value\")\n if pid:\n result[comp] = pid # componentName, componentPID\n return result", "def home(request):\n ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL)\n\n cresp, cluster_health = ceph.health(body='json')\n sresp, cluster_status = ceph.status(body='json')\n\n # Monitors\n all_mons = cluster_status['output']['monmap']['mons']\n up_mons = cluster_status['output']['health']['timechecks']['mons']\n total_mon_count = len(all_mons)\n mons_ok = 0\n mons_warn = 0\n mons_crit = 0\n\n for mon in up_mons:\n if mon['health'] == \"HEALTH_OK\":\n mons_ok += 1\n else:\n mons_warn += 1\n\n mons_crit = total_mon_count - (mons_ok + mons_warn)\n\n # Activity\n pgmap = cluster_status['output']['pgmap']\n activities = {}\n if 'read_bytes_sec' in pgmap:\n activities['Read'] = filesize.naturalsize(pgmap.get('read_bytes_sec'))\n if 'write_bytes_sec' in pgmap:\n activities['Write'] = filesize.naturalsize(pgmap.get('write_bytes_sec'))\n if 'op_per_sec' in pgmap:\n activities['Ops'] = pgmap.get('op_per_sec')\n if 'recovering_objects_per_sec' in pgmap:\n activities['Recovering Objects'] = pgmap.get('recovering_objects_per_sec')\n if 'recovering_bytes_per_sec' in pgmap:\n activities['Recovery Speed'] = filesize.naturalsize(pgmap.get('recovering_bytes_per_sec'))\n if 'recovering_keys_per_sec' in pgmap:\n activities['Recovering Keys'] = pgmap.get('recovering_keys_per_sec')\n\n # Get a rough estimate of cluster free space. Is this accurate ?\n presp, pg_stat = ceph.pg_stat(body='json')\n bytes_total = cluster_status['output']['pgmap']['bytes_total']\n bytes_used = cluster_status['output']['pgmap']['bytes_used']\n\n data_avail, data_scale = filesize.naturalsize(bytes_total).split()\n scale = filesize.suffixes['decimal'].index(data_scale)+1\n data_used = round(float(bytes_used)/pow(1024, scale), 1)\n\n # pgs\n pg_statuses = cluster_status['output']['pgmap']\n\n pg_ok = 0\n pg_warn = 0\n pg_crit = 0\n\n # pg states\n pg_warn_status = re.compile(\"(creating|degraded|replay|splitting|scrubbing|repair|recovering|backfill|wait-backfill|remapped)\")\n pg_crit_status = re.compile(\"(down|inconsistent|incomplete|stale|peering)\")\n\n for state in pg_statuses['pgs_by_state']:\n if state['state_name'] == \"active+clean\":\n pg_ok = pg_ok + state['count']\n\n elif pg_warn_status.search(state['state_name']):\n pg_warn = pg_warn + state['count']\n\n elif pg_crit_status.search(state['state_name']):\n pg_crit = pg_crit + state['count']\n\n # pg statuses\n pg_states = dict()\n\n for state in pg_statuses['pgs_by_state']:\n pg_states[state['state_name']] = state['count']\n\n # osds\n dresp, osd_dump = ceph.osd_dump(body='json')\n osd_state = osd_dump['output']['osds']\n\n osds_ok = 0\n osds_warn = 0\n osds_crit = 0\n\n # Possible states are: exists, up, autoout, new, ???\n osd_up = re.compile(\"(?=.*exists)(?=.*up)\")\n osd_down = re.compile(\"(?=.*exists)(?=.*autoout)\")\n\n for osd_status in osd_state:\n if osd_up.search(str(osd_status['state'])):\n osds_ok += 1\n elif osd_down.search(str(osd_status['state'])):\n osds_warn += 1\n else:\n osds_crit += 1\n\n return render_to_response('dashboard.html', locals())", "def server_stats():\n out = subprocess.check_output(cmd_preamble + [\"admin\", \"stats\"])\n return out.decode()", "def ceph_enabled(self):", "def get_supervisor_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_SUPERVISOR_INFO)", "def get_info_from_os_resource_manager(client_provider, resource_name):\n resource_description = resources_description[resource_name]\n\n client_name = resource_description[\"retrieved_from_component\"]\n client_inst = getattr(client_provider, client_name)\n\n client_api_version = utils.get_nested_attr(\n client_inst,\n client_provider.clients_version_attr_path[client_name]\n )\n\n matched_api = \\\n resource_description[\"supported_api_versions\"][client_api_version]\n\n resource_manager_name = matched_api[\"resource_manager_name\"]\n resource_manager = getattr(client_inst, resource_manager_name)\n\n attributes_white_list = matched_api[\"attributes_white_list\"]\n\n additional_display_options = \\\n matched_api.get(\"additional_display_options\", {})\n\n resource_info = _get_data_from_resource_manager(\n resource_manager,\n attributes_white_list,\n additional_display_options\n )\n\n return resource_info", "def list_podmanager(cls):\n return cls.dbdriver.list_podmanager()", "def fetch_status():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((GEARMAND_HOST, GEARMAND_PORT))\n log_verbose('Connected to Gearmand at %s:%s' % (GEARMAND_HOST, GEARMAND_PORT))\n except socket.error, e:\n collectd.error('gearmand_info plugin: Error connecting to %s:%d - %r'\n % (GEARMAND_HOST, GEARMAND_PORT, e))\n return None\n fp = s.makefile('r')\n log_verbose('Sending info command')\n s.sendall('status\\r\\n')\n\n status = {}\n while True:\n data = fp.readline().strip()\n log_verbose('Received data: %r' % data)\n if not data or data == '.':\n break\n function, total, running, available_workers = data.split('\\t')\n status[function] = {\n 'total': total,\n 'running': running,\n 'available_workers': available_workers}\n\n s.close()\n return status", "def gather_metric(self):\n device_dict = {}\n # Delete first and last line of output of adb.\n output = self._shell.run(self.COMMAND).stdout\n\n # Example Line, Device Serial Num TAB Phone Status\n # 00bd977c7f504caf\toffline\n if output:\n for line in output.split('\\n'):\n spl_line = line.split('\\t')\n # spl_line[0] is serial, [1] is status. See example line.\n device_dict[spl_line[0]] = spl_line[1]\n\n return {self.DEVICES: device_dict}", "def bdev_nvme_get_controller_health_info(client, name):\n params = {}\n params['name'] = name\n return client.call('bdev_nvme_get_controller_health_info', params)", "def get_info_admin(self):\n return self.get_info(\"HS_ADMIN\")", "def gather_configs(self):\n configs = []\n for what in self.order:\n for key in self.plugins[what]:\n mgr = self.plugins[what][key]\n c = mgr.config(what='get')\n if c is not None:\n c.update({\n 'description': mgr.description\n })\n # print(\"Gathering configuration from \", c)\n configs.append(c)\n return configs", "def redis_info(self):\n def func(server):\n return server.info()\n self.__run_redis_cmd(func)", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def sys_info(self):\n\n for i in self._nodes.items():\n print(\"\\n==============================\")\n name = i[0]\n node = i[1]\n\n print(\"NODE: {}\\n\".format(name))\n\n # CPU\n print(\"CPU:\")\n self.cpu_info(node)\n\n # Grub\n print(\"\\nGrub Command Line:\")\n if \"grub\" in node:\n print(\" Current: {}\".format(node[\"grub\"][\"current_cmdline\"]))\n print(\" Configured: {}\".format(node[\"grub\"][\"default_cmdline\"]))\n\n # Huge Pages\n print(\"\\nHuge Pages:\")\n self.hugepage_info(node)\n\n # Devices\n print(\"\\nDevices:\")\n self.device_info(node)\n\n # Status\n print(\"\\nVPP Service Status:\")\n state, errors = VPPUtil.status(node)\n print(\" {}\".format(state))\n for e in errors:\n print(\" {}\".format(e))\n\n # Minimum system resources\n self.min_system_resources(node)\n\n print(\"\\n==============================\")", "def bdev_rbd_get_clusters_info(client, name):\n params = {}\n if name:\n params['name'] = name\n return client.call('bdev_rbd_get_clusters_info', params)", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def get_srv_config(name):\n cmd = \"ceph --admin-daemon %s/%s.asok config show\" % \\\n (CEPH_SOCKET_PATH, name)\n out = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, \\\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n return json.loads(out.stdout.read())", "def server_info(ctx):\n data = ctx.obj.get_server_info()\n output_json_data(data)", "def get_data_manager():\n return IncomingEdge.items", "def host_info(vm_hostname):\n with _get_vm(vm_hostname) as vm:\n\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n info = vm.info()\n\n # Disconnect fabric now to avoid messages after the table\n disconnect_all()\n\n categories = (\n ('General', (\n 'hypervisor',\n 'status',\n )),\n ('Network', (\n 'intern_ip',\n 'mac_address',\n )),\n ('Resources', (\n 'num_cpu',\n 'max_cpus',\n 'memory',\n 'memory_free',\n 'max_mem',\n 'disk',\n 'disk_size_gib',\n 'disk_free_gib',\n )),\n # Anything else will appear in this section\n ('Other', None),\n )\n\n def _progress_bar(free_key, capacity_key, result_key, unit):\n \"\"\"Helper to show nice progress bars.\"\"\"\n if free_key not in info or capacity_key not in info:\n return\n free = info[free_key]\n del info[free_key]\n capacity = info[capacity_key]\n del info[capacity_key]\n\n simple_stats = (\n 'Current: {} {unit}\\n'\n 'Free: {} {unit}\\n'\n 'Max: {} {unit}'.format(\n capacity - free, free, capacity, unit=unit))\n\n if not 0 <= free <= capacity > 0:\n log.warning(\n '{} ({}) and {} ({}) have weird ratio, skipping progress '\n 'calculation'.format(\n free_key, free, capacity_key, capacity)\n )\n info[result_key] = red(simple_stats)\n return\n\n assert 0 <= free <= capacity\n ratio = 1 - float(free) / float(capacity)\n if ratio >= 0.9:\n color = red\n elif ratio >= 0.8:\n color = yellow\n else:\n color = green\n\n max_bars = 20\n num_bars = int(round(ratio * max_bars))\n info[result_key] = (\n '[{}{}] {}%\\n{}'.format(\n color('#' * num_bars), ' ' * (max_bars - num_bars),\n int(round(ratio * 100)),\n simple_stats,\n )\n )\n\n _progress_bar('memory_free', 'memory', 'memory', 'MiB')\n _progress_bar('disk_free_gib', 'disk_size_gib', 'disk', 'GiB')\n\n max_key_len = max(len(k) for k in info.keys())\n for category, keys in categories:\n # Handle 'Other' section by defaulting to all keys\n keys = list(keys or info.keys())\n\n # Any info available for the category?\n if not any(k in info for k in keys):\n continue\n\n print('')\n print(white(category, bold=True))\n for k in keys:\n if k not in info:\n continue\n\n # Properly re-indent multiline values\n value = str(info.pop(k))\n value = ('\\n' + ' ' * (max_key_len + 3)).join(\n value.splitlines()\n )\n print('{} : {}'.format(k.ljust(max_key_len), value))", "def describe_cost_management_exports(self):\n return [{\"name\": self.export_name, \"container\": self.container, \"directory\": self.directory}]", "def startManager(self):\n\t\tlogging.info(\"----->>>The DeviceDataManager will be started\")\n\t\tself.sysPerfManager.startManager()\n\t\tself.sensorAdapterManager.startManager()\n\t\tif self.enableRedis:\n\t\t\tself.redisClient.connectClient()\n\t\t\n\t\tif self.enableMqtt:\n\t\t\tself.mqttClient.connectClient()", "def info():\n\n print('Maptool\\n--------\\n')\n print('Version: ' + __version__)\n print('Path: ' + __path__[0])\n print('Date: ' + __date__)\n print()\n\n import sys\n print('Python version=' + sys.version + '\\n')\n\n try:\n mm = __import__('pymongo')\n print('%10s %10s %s' % ('pymongo', mm.version, mm.__path__[0]))\n except ImportError:\n print('pymongo Not Found')\n\n for modui in ['numpy', 'scipy', 'mayavi', 'matplotlib', 'tqdm',\n 'future', 'nose', 'coverage', 'spglib', 'pyhull', 'pymatgen', 'qmpy', ]:\n try:\n mm = __import__(modui)\n print('%10s %10s %s' % (modui, mm.__version__, mm.__path__[0]))\n except ImportError:\n print('%10s %10s Not Found' % (modui, ''))\n\n if ASE:\n import ase\n #from ase import version as ase_version\n print('%10s %10s %s' % ('ase', ase.__version__, ase.__path__[0]))\n else:\n print('%10s %10s Not Found' % ('ase', ''))", "def getInfo(self):\n mr_job_cursor = self._storage.find()\n self._logger.info(\"Current jobs registered in MapReduce manager:\")\n for mr_job in mr_job_cursor:\n self._logger.info(\"\\t%s: Processed from %s to (%s --> %s)\" %\n (mr_job[\"_dataBlobID\"], mr_job['initial'], mr_job['start'], mr_job['end']))", "def info(self):\n return InfoManager(session=self._session)", "def mgmt_tool(self) -> MgmtClient:\n return self._mgmt_tool", "def _centec_manager_monitor(self, args):\n daemon = CentecDaemon(CentecTorMechanismDriver.pid_file,\n CentecTorMechanismDriver.centec_manger_name)\n if daemon.is_running():\n pass\n else:\n LOG.error(_(\"Centec manager is not running, restarting ...\"))\n self.run_centec_manager(args)", "def get(self):\n status = \"stopped\"\n shared_memory_locks[\"data_worker\"].acquire()\n if self.shared_memory_manager_dict[\"data_worker_running\"]:\n status = \"running\"\n shared_memory_locks[\"data_worker\"].release()\n if self.shared_memory_manager_dict[\"service_reconfiguring\"]:\n status += \",reconfiguring\"\n self.write({\"status\": status})", "def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos", "async def get_system_info(hass, include_components):\n\n gate_id = hass.states.get('sensor.ais_secure_android_id_dom').state\n info_object = {\n 'arch': platform.machine(),\n 'dev': 'dev' in current_version,\n 'docker': False,\n 'os_name': platform.system(),\n 'python_version': platform.python_version(),\n 'timezone': dt_util.DEFAULT_TIME_ZONE.zone,\n 'version': current_version,\n 'virtualenv': os.environ.get('VIRTUAL_ENV') is not None,\n 'hassio': hass.components.hassio.is_hassio(),\n 'gate_id': gate_id,\n }\n\n if include_components:\n info_object['components'] = list(hass.config.components)\n\n if platform.system() == 'Windows':\n info_object['os_version'] = platform.win32_ver()[0]\n elif platform.system() == 'Darwin':\n info_object['os_version'] = platform.mac_ver()[0]\n elif platform.system() == 'FreeBSD':\n info_object['os_version'] = platform.release()\n elif platform.system() == 'Linux':\n import distro\n linux_dist = await hass.async_add_job(\n distro.linux_distribution, False)\n info_object['distribution'] = linux_dist[0]\n info_object['os_version'] = linux_dist[1]\n info_object['docker'] = os.path.isfile('/.dockerenv')\n\n return info_object", "def get_alerts(node: CephAdmin) -> dict:\n cmd = \"ceph health detail\"\n all_alerts = {}\n out, err = node.shell([cmd])\n regex = r\"(\\(MUTED[\\w\\s,-]*\\))?\\s*\\[\\w{3}\\]\\s([\\w_]*):\"\n alerts = re.findall(regex, out)\n all_alerts[\"active_alerts\"] = [alert[1] for alert in alerts if not alert[0]]\n all_alerts[\"muted_alerts\"] = [alert[1] for alert in alerts if alert[0]]\n return all_alerts", "def get_switch_details_from_mgmt(self, using):\n ret_output = {}\n #Get the console mgmt handle\n console = self.connect_mgmt_ip(using)\n console.sendline('terminal length 0')\n console.expect(SWITCH_PROMPT)\n console.sendline('show inventory | xml')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['inv'] = console.before\n console.sendline('show system uptime | xml')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['uptime'] = console.before\n console.sendline('show accounting log | grep \"configure\" | last 1')\n console.expect(SWITCH_PROMPT,120)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['idletime'] = console.before\n console.sendline('terminal length 15')\n console.expect(SWITCH_PROMPT)\n console.sendline('show clock | last 1')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['clock'] = console.before\n console.close()\n return ret_output", "async def serverinfo(self,ctx):\n g = ctx.guild\n embed = discord.Embed()\n embed.set_thumbnail(url = g.icon_url)\n embed.title = \"{} - {}\".format(g.name,g.id)\n embed.add_field(name = \"Owner\",value=\"{} - {}\".format(g.owner,g.owner.id),inline=False)\n embed.add_field(name = \"Created at\", value = str(g.created_at), inline=False)\n embed.add_field(name = \"Total Roles\", value= str(len(g.roles)), inline=False)\n embed.add_field(name = \"Total Members\", value= str(g.member_count), inline=False)\n embed.add_field(name = \"Premium Member\", value= str(g.premium_subscription_count), inline=False)\n embed.add_field(name = \"Premium Tier\", value= str(g.premium_tier), inline=False)\n await self.bot.say(ctx,embed = embed)", "def cli():\n while True:\n try:\n # Get the whole information on each edge.\n l_edge = list()\n s_rsc = '{}/edge'.format(etcdc.prefix)\n \n try:\n r = etcdc.read(s_rsc, recursive=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n for child in r.children:\n l_app = list()\n d = ast.literal_eval(child.value)\n # get hosts\n print(PROJECT_ROOT + '/' + d['endpoint'])\n l_hosts = kube_list_node(PROJECT_ROOT + '/' + d['endpoint'])\n d['hosts'] = len(l_hosts)\n d_nodes = dict() # {'name': 'ip', ...}\n for item in l_hosts:\n d_nodes[item.metadata.name] = item.status.addresses[0].address\n # log.debug(d_nodes)\n # get # of tenants and apps\n l_tenants = get_tenant(d['name'])\n d['tenants'] = len(l_tenants)\n d['apps'] = 0\n for e in l_tenants:\n if 'app' in e:\n d['apps'] += len(e['app'])\n \n d['cpu'] = 0\n d['memory'] = 0\n i_total_cores = 0\n i_total_memory = 0\n i_total_storage = 0\n for h in l_hosts:\n i_total_cores += int(h.status.capacity['cpu'])\n i_total_memory += int(h.status.capacity['memory'].\n replace('Ki', ''))\n d['tot_cpu'] = i_total_cores\n d['tot_mem'] = int(i_total_memory / (1024*1024))\n \n # Get loadavg and free mem\n if d['name'] == 'edge1':\n ssh_server = 'harden.iorchard.co.kr'\n elif d['name'] == 'edge2':\n ssh_server = 'durant.iorchard.co.kr'\n RSC = 'ssh -p42544 {} get_rsc.sh'.format(ssh_server)\n (b_res, s_out) = cmd(RSC, 3, False)\n l = s_out.split(\"\\n\")\n d['used_cpu'] = (float(l[0]) + float(l[1]) + float(l[2]))\n avail_mem = (int(l[3]) + int(l[4]) + int(l[5])) / (1024*1024)\n d['used_mem'] = d['tot_mem'] - avail_mem\n d['cpu'] = int(d['used_cpu'] / d['tot_cpu'] * 100)\n d['memory'] = int(d['used_mem'] / d['tot_mem'] * 100)\n # ceph storage\n CEPH = \"kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + \" -n rook-ceph exec -it \" \\\n + \"$(kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + \" -n rook-ceph get po \" \\\n + \"-l app=rook-ceph-tools \" \\\n + \"-o jsonpath='{.items[0].metadata.name}') -- \" \\\n + \"ceph df --format json\"\n (b_res, s_out) = cmd(CEPH, 3, False)\n print(s_out)\n d['status'] = 'Healthy' if b_res else 'Unhealthy'\n d_stor = ast.literal_eval(s_out)\n d['tot_stor'] = int(d_stor['stats']['total_bytes'] / pow(1024, 3))\n d['used_stor'] = int(d_stor['stats']['total_used_bytes'] / pow(1024, 3))\n d['storage'] = int(d['used_stor'] / d['tot_stor'] * 100)\n # Update etcd status\n try:\n s = '{}/edge/{}'.format(etcdc.prefix,\n d['name'])\n # log.debug(d)\n etcdc.write(s, d, prevExist=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n \n # Update app status\n s_app = '{}/app'.format(etcdc.prefix)\n try:\n r_app = etcdc.read(s_app, recursive=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n for app_child in r_app.children:\n if app_child.value is not None:\n d_app = dict()\n app = ast.literal_eval(app_child.value)\n if app['edge'] == d['name']:\n d_app['name'] = app['name']\n d_app['username'] = GUAC_USER\n d_app['password'] = GUAC_PASS\n # Get catalog info.\n s_cat = '{}/catalog/{}'.format(etcdc.prefix,\n app['catalog'])\n try:\n r_cat = etcdc.read(s_cat)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n cat = ast.literal_eval(r_cat.value)\n app['cat_type'] = cat['type']\n app['cat_name'] = cat['name']\n app['cat_logo'] = cat['logo']\n # Get app status\n if app['cat_type'] == 'vm':\n # first, look at DataVolume status of app.\n CMD = \"kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get dv ' \\\n + app['name'] \\\n + \" -o jsonpath='{range .status}{.phase},{.progress}{end}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n l_out = s_out.split(',')\n if l_out[0] == 'Succeeded':\n # Get vm status of app\n CMD = \"kubectl --kubeconfig \" + PROJECT_ROOT \\\n + '/' \\\n + d['endpoint'] + ' get vm ' \\\n + app['name'] \\\n + \" -o jsonpath='{.status.ready}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res and s_out == 'true':\n # update app status 'running'.\n app.update({'status': 'running'})\n \n if app['edge'] == d['name']:\n # Get where app is running.\n CMD = \"kubectl --kubeconfig \" \\\n + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get vmi ' \\\n + app['name'] \\\n + \" -o jsonpath='{.status.nodeName}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['hostname'] = d_nodes[s_out]\n # Get nodeport for app.\n CMD = \"kubectl --kubeconfig \" \\\n + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get svc ' \\\n + app['name'] \\\n + \" -o jsonpath='{.spec.ports[0].nodePort}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['port'] = s_out\n else:\n # update app status 'stopped'\n app.update({'status': 'stopped'})\n elif l_out[0] == 'ImportInProgress':\n # update app status 'building' and \n app.update({'status': 'building ({})'.format(l_out[1])})\n elif app['cat_type'] == 'container':\n app.update({'status': 'running'})\n \n try:\n s = '{}/app/{}'.format(etcdc.prefix,\n app['name'])\n # log.debug(app)\n etcdc.write(s, app, prevExist=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n \n if 'port' in d_app:\n l_app.append(d_app)\n # render guac-config.j2 and copy it to guac broker server\n log.debug(l_app)\n template = env.get_template('broker.j2')\n s_out = template.render(l_app=l_app)\n s_tmp = '/tmp/{}.broker'.format(d['name'])\n try:\n with open(s_tmp, 'w') as f:\n f.write(s_out)\n except Exception as e:\n log.error(e)\n else:\n CMD = \"scp \" \\\n + \"-P42544 {} {}\".format(s_tmp, d['broker_ip']) \\\n + \":/etc/guacamole/noauth-config.xml\"\n log.debug(CMD)\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['port'] = s_out\n \n l_edge.append(d)\n \n # log.debug(l_edge)\n log.debug(l_app)\n \n time.sleep(1)\n except:\n log.error('unknown error')", "def get_host_info(self):\n\n if len(self.index) == 0:\n # Need to load index from cache\n self.load_index_from_cache()\n\n if not self.args.host in self.index:\n # try updating the cache\n self.do_api_calls_update_cache()\n if not self.args.host in self.index:\n # host might not exist anymore\n return self.json_format_dict({}, True)\n\n node_id = self.index[self.args.host]\n print \"NODE ID %s\" % node_id\n print \"INDEX: %s\" % self.index\n\n node = self.get_node(node_id)\n node_vars = {}\n for direct_attr in [\n \"api_id\",\n \"datacenter_id\",\n \"label\",\n \"display_group\",\n \"create_dt\",\n \"total_hd\",\n \"total_xfer\",\n \"total_ram\",\n \"status\",\n \"alert_cpu_enabled\",\n \"alert_cpu_threshold\",\n \"alert_diskio_enabled\",\n \"alert_diskio_threshold\",\n \"alert_bwin_enabled\",\n \"alert_bwin_threshold\",\n \"alert_bwout_enabled\",\n \"alert_bwout_threshold\",\n \"alert_bwquota_enabled\",\n \"alert_bwquota_threshold\",\n \"backup_weekly_daily\",\n \"backup_window\",\n \"watchdog\"\n ]:\n node_vars[direct_attr] = getattr(node, direct_attr)\n\n node_vars[\"datacenter_city\"] = self.get_datacenter_city(node)\n node_vars[\"public_ip\"] = [addr.address for addr in node.ipaddresses if addr.is_public][0]\n\n return self.json_format_dict(node_vars, True)", "def info(self) -> dict:", "def GetManager(self):\r\n\r\n return self.manager", "def info(self):\n pp = pprint.PrettyPrinter(indent=4)\n print_text_box('Info')\n pp.pprint(self.manager.data[\"info\"])\n print('')", "def get_system_info():\n query = {\"type\": \"op\", \"cmd\": \"<show><system><info></info></system></show>\"}\n\n return __proxy__[\"panos.call\"](query)", "async def mempool() -> dict:\n return {\"mempool\": chain.mempool_state()}", "def collect(self):\n self.status['serial'] = self.config.get('dlmconfig', 'serial')\n self.status['timestamp'] = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime())\n self.status['uptime'] = system.stats.uptime()\n self.status['free_disk_space_sdcard'] = system.stats.disk_usage('root')\n self.status['free_disk_space_stick'] = system.stats.disk_usage('sda1')\n self.status['wwan_reception'] = system.interfaces.WwanInterface.signal_strength(self.config.get('network', 'iface'))", "def manager_configs_list(self):\n _, body = self.request('/v1.1/managers/configs', 'GET')\n return body", "def test_get_info(self):\n self.addCleanup(self.sdkapi.guest_delete, self.userid)\n\n self.sdkapi.guest_create(self.userid, 1, 1024, disk_list=self.disks)\n self.sdkapi.guest_deploy(self.userid, self.image_name)\n\n # get info in shutdown state\n info_off = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_off['power_state'], 'off')\n self.assertEquals(info_off['mem_kb'], 0)\n self.assertEquals(info_off['cpu_time_us'], 0)\n\n # get info in active state\n self.sdkapi.guest_start(self.userid)\n self.assertTrue(self.sdkutils.wait_until_guest_in_power_state(\n self.userid, 'on'))\n time.sleep(1)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)\n\n # get info in paused state\n self.sdkapi.guest_pause(self.userid)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)", "def device_info(self) -> DeviceInfo:\n return {\n \"identifiers\": {(DOMAIN, str(self.coordinator.gios.station_id))},\n \"name\": DEFAULT_NAME,\n \"manufacturer\": MANUFACTURER,\n \"entry_type\": \"service\",\n }", "def get_service_summary(cls):\n\n try:\n monit_stats = subprocess.check_output([\"monit\", \"summary\"])\n except subprocess.CalledProcessError:\n logger.warn(\"get_service_summary: failed to query monit.\")\n raise ServiceException('Failed to query monit.')\n\n monit_stats_dict = {}\n for line in monit_stats.split(\"\\n\"):\n tokens = line.split()\n if 'Process' in tokens:\n process_name = tokens[1][1:-1] # Remove quotes.\n process_status = ' '.join(tokens[2:]).lower()\n monit_stats_dict[process_name] = process_status\n logger.debug(\"Monit stats: {}\".format(monit_stats_dict))\n\n # Get status of processes managed by the ServiceManager.\n monit_stats_dict.update(\n {'-'.join([server.type, str(server.port)]): server.state\n for server in ServiceManager.get_state()})\n\n return monit_stats_dict", "def __init__(self, processManager, clientManager):\n self.processManager = processManager\n self.clientManager = clientManager\n self.engine_types = {}\n self.engine_allocations = {}\n self.engine_instances = {}", "def info() -> None:", "def zabbix_details(self):\n if self.details[\"owner\"] == \"team-unclassified\":\n self.which_owner()\n\n self.details[\"zabbix\"] = {\"groups\": []}\n\n if self.details[\"datacenter\"] and self.details[\"env\"]:\n self.details[\"zabbix\"][\"groups\"].append(self.details[\"datacenter\"] + self.details['env'])\n elif self.details[\"datacenter\"]:\n self.details[\"zabbix\"][\"groups\"].append(self.details[\"datacenter\"])\n\n self.details[\"zabbix\"][\"groups\"].append(self.details[\"owner\"])\n\n if \"tools\" in self.details[\"owner\"]:\n self.details[\"zabbix\"][\"groups\"].append(\"team-tools\")\n\n if self.details[\"function\"] == \"vsnhn\":\n self.details[\"zabbix\"][\"groups\"].append(\"team-storage\")", "def get_manager(api_version=None):\n from manager import get_keystone_manager\n return get_keystone_manager(get_local_endpoint(), get_admin_token(),\n api_version)" ]
[ "0.65334594", "0.6489982", "0.6204253", "0.60092634", "0.6008082", "0.5893398", "0.5754568", "0.5699375", "0.56716347", "0.5611492", "0.5582611", "0.5579014", "0.5577621", "0.5576179", "0.55696046", "0.5542833", "0.55382943", "0.55256027", "0.5521256", "0.55209965", "0.5510726", "0.5487898", "0.54478604", "0.54429203", "0.54427075", "0.5438311", "0.5398613", "0.53938276", "0.538157", "0.5380126", "0.5365327", "0.5357114", "0.534839", "0.5347106", "0.53367966", "0.5331016", "0.5328081", "0.5319689", "0.5301814", "0.5301814", "0.52993286", "0.5296856", "0.52921766", "0.52538985", "0.5246523", "0.523612", "0.5225568", "0.52212983", "0.5216496", "0.52140415", "0.520367", "0.51954615", "0.51905215", "0.51893836", "0.5186018", "0.5176467", "0.5170973", "0.51703864", "0.51672846", "0.51559854", "0.51470935", "0.51457655", "0.51409847", "0.5129511", "0.5126174", "0.5125438", "0.5122592", "0.5119406", "0.5114182", "0.51073456", "0.51061684", "0.5095137", "0.50920945", "0.50917006", "0.5089063", "0.5088282", "0.5086149", "0.5084047", "0.50794154", "0.50710046", "0.506718", "0.5066528", "0.50476176", "0.5040402", "0.5035995", "0.50324494", "0.5029376", "0.5028863", "0.5025484", "0.5024309", "0.5020682", "0.5019893", "0.50179416", "0.50168794", "0.50135034", "0.5011272", "0.5007003", "0.4998792", "0.49910393", "0.49833924" ]
0.7890347
0
Writes the diagnostics to specific files and creates a tarball for the same
def dict_to_files(result_dict, dest_dir): tempdir = tempfile.mkdtemp() # timestamp every generated dignostic file timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%I%S") tarball = '{0}/ceph-collect_{1}.tar.gz'.format(dest_dir, timestamp) with tarfile.open(tarball, 'w:gz') as tar: for filename, content in result_dict.items(): for contentname, contentdata in content.items(): tmpfile = '{0}/{1}'.format(tempdir, filename + "-" + contentname) LOGGER.debug('Writing file %s', tmpfile) print('Writing file %s', tmpfile) with open(tmpfile, 'wb') as f: f.write(contentdata) f.close() tar.add(name=tmpfile, arcname='ceph-collect_{0}/{1}'.format(timestamp, filename + "-" + contentname)) tar.close() LOGGER.info("Diagnostics are written to : "+ tarball) LOGGER.info("Cleaning up temporary directory") shutil.rmtree(tempdir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_tarball(args, tarfilename, archivefiles=[]):\n if not archivefiles:\n return None\n \n manifest_filename, manifest_uuid = render_manifest(args, archivefiles)\n try:\n with tarfile.open(tarfilename, f\"{FILE_FLAG}:gz\") as tarball:\n file_count = 0\n for fname in archivefiles:\n LOG.debug(f\"Adding {fname} to {tarfilename}: \")\n if fname.endswith(\".csv\"):\n upload_name = f\"{manifest_uuid}_openshift_usage_report.{file_count}.csv\"\n tarball.add(fname, arcname=upload_name)\n file_count += 1\n tarball.add(manifest_filename, arcname=\"manifest.json\")\n except FileExistsError as exc:\n LOG.critical(exc)\n sys.exit(2)\n LOG.info(f\"Wrote: {tarfilename}\")\n return f\"{tarfilename}\"", "def report(self, output_dir):", "def archive(self):\n logging.info(_('Creating compressed archive...'))\n\n report_file_ext = 'bz2'\n compressor = 'bzip2'\n caller = Caller({})\n try:\n caller.call('xz --version')\n report_file_ext = 'xz'\n compressor = 'xz'\n except Exception:\n logging.debug('xz compression not available')\n\n if not os.path.exists(self.conf[\"output\"]):\n os.makedirs(self.conf[\"output\"])\n\n self.conf[\"path\"] = os.path.join(\n self.conf[\"output\"],\n \"sosreport-%s-%s.tar.%s\" % (\n 'LogCollector',\n time.strftime(\"%Y%m%d%H%M%S\"),\n report_file_ext\n )\n )\n\n if self.conf[\"ticket_number\"]:\n self.conf[\"path\"] = os.path.join(\n self.conf[\"output\"],\n \"sosreport-%s-%s-%s.tar.%s\" % (\n 'LogCollector',\n self.conf[\"ticket_number\"],\n time.strftime(\"%Y%m%d%H%M%S\"),\n report_file_ext\n )\n )\n\n config = {\n 'report': os.path.splitext(self.conf['path'])[0],\n 'compressed_report': self.conf['path'],\n 'compressor': compressor,\n 'directory': self.conf[\"local_tmp_dir\"],\n 'rname': os.path.basename(self.conf['path']).split('.')[0],\n }\n caller.configuration = config\n shutil.move(\n os.path.join(\n self.conf[\"local_tmp_dir\"],\n 'working'\n ),\n os.path.join(\n self.conf[\"local_tmp_dir\"],\n config[\"rname\"]\n ),\n )\n caller.call(\"tar -cf '%(report)s' -C '%(directory)s' '%(rname)s'\")\n shutil.rmtree(self.conf[\"local_tmp_dir\"])\n caller.call(\"%(compressor)s -1 '%(report)s'\")\n os.chmod(self.conf[\"path\"], stat.S_IRUSR | stat.S_IWUSR)\n sha256_out = caller.call(\"sha256sum '%(compressed_report)s'\")\n checksum = sha256_out.split()[0]\n with open(\"%s.sha256\" % self.conf[\"path\"], 'w') as checksum_file:\n checksum_file.write(sha256_out)\n\n msg = ''\n if os.path.exists(self.conf[\"path\"]):\n archiveSize = float(os.path.getsize(self.conf[\"path\"])) / (1 << 20)\n\n size = '%.1fM' % archiveSize\n\n msg = _(\n 'Log files have been collected and placed in {path}\\n'\n 'The sha256 for this file is {checksum} and its size is {size}'\n ).format(\n path=self.conf[\"path\"],\n size=size,\n checksum=checksum,\n )\n\n if archiveSize >= 1000:\n msg += _(\n '\\nYou can use the following filters -c, -d, -H in the '\n 'next execution to limit the number of Datacenters,\\n'\n 'Clusters or Hosts that are collected in order to '\n 'reduce the archive size.'\n )\n return msg", "def final_output_analysis(samples_dict, dir_results_path):\n with open(path.join(dir_results_path, 'corrupted_processes.txt'), 'w', encoding='utf-8', errors='replace') as c_out:\n with open(path.join(dir_results_path, 'analysis.txt'), 'w', encoding='utf-8', errors='replace') as i_out:\n with open(path.join(dir_results_path, 'syscalls.txt'), 'w', encoding='utf-8', errors='replace') as s_out:\n for uuid in sorted(samples_dict.keys()):\n reduced_sample = samples_dict[uuid]\n\n i_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n s_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n c_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n\n # corrupted processes section\n process_repr = '\\t\\t{:15s}\\t{:10d}\\t{:15s}\\tby:\\t{:15s}\\t{:10d}\\n'\n for process in reduced_sample.corrupted_processes:\n c_out.write(process_repr.format(process[0],\n process[1],\n process[2],\n process[3],\n process[4]))\n\n # instruction count section\n i_out.write(string_utils.out_final + '\\t' + str(reduced_sample.total_instruction) + '\\n')\n i_out.write(string_utils.out_terminating + '\\t' + str(reduced_sample.terminate_all) + '\\t')\n i_out.write(string_utils.out_sleeping + '\\t' + str(reduced_sample.sleep_all) + '\\t')\n i_out.write(string_utils.out_crashing + '\\t' + str(reduced_sample.crash_all) + '\\t')\n i_out.write(string_utils.out_raising_error + '\\t' + str(reduced_sample.error_all) + '\\t')\n i_out.write(string_utils.out_writes_file + '\\t' + str(reduced_sample.write_file) + '\\n')\n\n # system calls count section\n s_out.write(string_utils.syscall_final + '\\t' + str(reduced_sample.total_syscalls) + '\\n')\n\n i_out.write('\\n')\n s_out.write('\\n')\n c_out.write('\\n')", "def clean_up(self):\n try:\n data_dir = os.environ[\"DATA\"]\n plots_dir = os.environ[\"PLOTS\"]\n logs_dir = os.environ[\"LOGS\"]\n except KeyError as detail:\n print \"GenerateSpectrum.clean_up: error\", detail, \"not set\"\n print \" --> source analysis environment scripts before running!\"\n sys.exit(1)\n for root, dirs, files in os.walk(os.getcwd()):\n for file in files:\n is_data = re.search(r\".*\\.root$\", file)\n is_plot = re.search(r\".*\\.png$\", file)\n hostname = socket.gethostname()\n is_log = re.search(r\"^rat\\.\"+hostname+r\"\\.[0-9]+\\.log$\", file)\n if is_data:\n try:\n root_file = TFile(file)\n tree = root_file.Get(\"T\")\n tree.ls()\n except ReferenceError as detail:\n \"generate_spectrum.clean_up: error in TFile,\", detail\n sys.exit(1)\n file_manips.copy_file(os.path.join(root, file), data_dir)\n elif is_plot:\n file_manips.copy_file(os.path.join(root, file), plots_dir)\n elif is_log:\n file_manips.copy_file(os.path.join(root, file), logs_dir)", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def write_report(self):\n\n def report_array(f, label, array):\n f.write(label)\n for val in array:\n f.write('{:.4f},\\t'.format(val))\n f.write('\\n')\n\n report_file = FLAGS.report_file\n\n with open(report_file, 'w') as f:\n f.write('Mean Error 2D: {}\\n'.format(\n safe_divide(self._error_2d, self._matched)))\n f.write('Mean 3D IoU: {}\\n'.format(\n safe_divide(self._iou_3d, self._matched)))\n f.write('Mean Azimuth Error: {}\\n'.format(\n safe_divide(self._azimuth_error, self._matched)))\n f.write('Mean Polar Error: {}\\n'.format(\n safe_divide(self._polar_error, self._matched)))\n\n f.write('\\n')\n f.write('IoU Thresholds: ')\n for threshold in self._iou_thresholds:\n f.write('{:.4f},\\t'.format(threshold))\n f.write('\\n')\n report_array(f, 'AP @3D IoU : ', self._iou_ap.aps)\n\n f.write('\\n')\n f.write('2D Thresholds : ')\n for threshold in self._pixel_thresholds:\n f.write('{:.4f},\\t'.format(threshold * 0.1))\n f.write('\\n')\n report_array(f, 'AP @2D Pixel : ', self._pixel_ap.aps)\n f.write('\\n')\n\n f.write('Azimuth Thresh: ')\n for threshold in self._azimuth_thresholds:\n f.write('{:.4f},\\t'.format(threshold * 0.1))\n f.write('\\n')\n report_array(f, 'AP @Azimuth : ', self._azimuth_ap.aps)\n f.write('\\n')\n\n f.write('Polar Thresh : ')\n for threshold in self._polar_thresholds:\n f.write('{:.4f},\\t'.format(threshold * 0.1))\n f.write('\\n')\n report_array(f, 'AP @Polar : ', self._polar_ap.aps)", "def gatherfiles(self):\n\t\tfrom subprocess import Popen,PIPE\n\t\timport os\n\t\timport tarfile\n\t\timport glob\n\t\t\n\t\tprint \"=== \",self.nameID,\": Joining all the files in one\"\n\t\t# FIXME: Only there are 1 file, not needed the hadd\n\t\tfinalfile = os.path.join(\"Results\",self.outputfile)\n\t\t# FIXED BUG: just cp when there is only one file, otherwise\n\t\t# there are problems with the TTree\n\t\tif len(self.outputfiles) == 1:\n\t\t\t# Note that when there is only 1 file, always its #task=1\n\t\t\tcommand = [ 'cp', self.outputfiles[1], finalfile ]\n\t\telse:\n\t\t\tcommand = [ 'haddPlus', finalfile ]\n\t\t\tfor f in self.outputfiles.itervalues():\n\t\t\t\tcommand.append( f )\n\t\tp = Popen( command ,stdout=PIPE,stderr=PIPE ).communicate()\n\t\t# Checking if everything was allright\n\t\ttotalevts = self.getevents(finalfile,True)\n\t\tif totalevts != self.nevents:\n\t\t\tmessage = \"\\033[33;1mclustermanager.gatherfiles: WARNING\\033[0m the total file\"\n\t\t\tmessage += \"'\"+finalfile+\"' do not contain all the events:\\n\"\n\t\t\tmessage += \"Total events to be processed:\"+str(self.nevents)+\"\\n\"\n\t\t\tmessage += \"Total events in '\"+finalfile+\"':\"+str(totalevts)+\"\\n\"\n\t\t\tprint message\n\t\t\treturn \n\t\t# If everything was fine, deleting the files \n\t\t# and cleaning the directory\n\t\tfor f in self.outputfiles.itervalues():\n\t\t\tos.remove( f )\n\t\t# Taring and compressing\n\t\tfilestotar = glob.glob(\"./*.*\")\n\t\tfilestotar.append( \".storedmanager\")\n\t\ttar = tarfile.open(os.path.basename(self.cwd)+\".tar.gz\",\"w:gz\")\n\t\tfor f in filestotar:\n\t\t\ttar.add(f)\n\t\ttar.close()\n\t\t# if everything was fine, deleting the files\n\t\tif os.path.exists(os.path.basename(self.cwd)+\".tar.gz\"):\n\t\t\tfor f in filestotar:\n\t\t\t\tos.remove(f)\n\t\telse:\n\t\t\tmessage = \"\\033[33;1mclustermanager.gatherfiles: WARNING\\033[0m I can't manage\\n\"\n\t\t\tmessage += \"to create the backup .tar.gz file\\n\"\n\t\t\tprint message\n\n\t\tprint \"Created \"+finalfile\n\t\tprint \"========= Process Completed =========\"", "def setup(outpath):\n time = datetime.now().strftime(\"%d_%m_%Y_%H_%M_%S\")\n temp = os.path.join(outpath, \"data\", \"temp\")\n result = os.path.join(outpath, \"results\")\n logs = os.path.join(outpath, \"logs\")\n download = os.path.join(outpath, \"data\", \"download\")\n chromsizes = os.path.join(outpath,\n \"data\", \"chromsizes\")\n if not os.path.exists(download):\n os.makedirs(download)\n if not os.path.exists(temp):\n os.makedirs(temp)\n if not os.path.exists(result):\n os.makedirs(result)\n if not os.path.exists(logs):\n os.makedirs(logs)\n if not os.path.exists(chromsizes):\n os.makedirs(chromsizes)\n\n logname = time + \"_tfanalyzer.log\"\n logfile = os.path.join(logs, logname)\n logging.basicConfig(filename=logfile, level=logging.INFO)\n return logfile", "def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)", "def do_write(self, args):\n\t\tasplit = args.split(\" \")\n\t\tfname = asplit[0]\n\t\twhat = asplit[1]\n\n\t\tif what == \"summary\" or what == \"oldsummary\":\n\t\t\twith open(fname, 'w') as f:\n\t\t\t\tform = DresherInterface.summary_format if what == \"summary\" else DresherInterface.oldsummary_format\n\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\tf.write(x)\n\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t#for lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t#\tdw.writerow(dict(zip(form, [self.get_language_info(lang, x) for x in form])))\n\t\t\t\tfor lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\t\tf.write(str(self.get_language_info(lang, x)))\n\t\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tf.write(\"\\t\")\n\t\tif what == \"hierarchies\":\n\t\t\t# format: #vowels, langname, hierarchy, len(hier), #of marks, lfeats, inv, freq, \n\t\t\t# how many times each feat marked, the actual marks, vowel:feature set, unused features\n\t\t\t# take fname to be name of directory to write outfiles to\n\t\t\tif not os.path.exists(fname):\n\t\t\t\tos.mkdir(fname)\n\t\t\tfor lang in self.languages:\n\t\t\t\tnum_vowels = self.get_language_info(lang, \"linv\")\n\t\t\t\tname = lang.name\n\t\t\t\tnum_feats = self.get_language_info(lang, \"lfeats\")\n\t\t\t\tinv = self.get_language_info(lang, \"inv\")\n\t\t\t\tfreq = self.get_language_info(lang, \"freq\")\n\t\t\t\tinv_feats = lang.phone_feat_dict\n\t\t\t\twith open(os.path.join(fname,name.replace(\" \",\"\")+\".txt\"), 'w') as f:\n\t\t\t\t\tf.write(\"num_vowels\\tname\\thierarchy\\tlen_hier\\tnum_marks\\tnumfeats\\tinv\\tfreq\\tfeat_marks\\tinv_marks\\tinv_feats\\tunused_feats\\n\")\n\t\t\t\t\tfor h in lang.hierarchies:\n\t\t\t\t\t\tf.write(str(num_vowels))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(name)\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(h))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(len(h)))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tspec = SDA(lang._phones, lang._features, h)\n\t\t\t\t\t\tmarkedness = sum([x for phone in spec.keys() for x in spec[phone] if x == 1])\n\t\t\t\t\t\tf.write(str(markedness))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(num_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(freq))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tfeat_counts = {f:sum([spec[phone][i] for phone in spec.keys() if spec[phone][i] == 1]) for i, f in enumerate(h)}\n\t\t\t\t\t\tf.write(str(feat_counts))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(spec))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(list(set(lang._features)-set(h))))\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t# make sure all the threads that need to be finished have finished\n\t\t# using .join() on the appropriate groups of threads", "def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)", "def generate_report_directories_and_files(self):\n target = r'X:\\ANALYST WORK FILES\\Peter\\Rover\\reports\\ '\n mushroom_target = r'X:\\ANALYST WORK FILES\\Peter\\Rover\\reports\\mushroom_reports\\ '\n for key, value in self.finished_reports_dictionary.items():\n if self.basic_reports_dictionary == \"MUSH\":\n try:\n jobnumber = str(key)\n filename = mushroom_target[:-1] + jobnumber[0:6] + '\\\\' + jobnumber + '_raw.tex'\n filename = filename.replace('/', '-')\n with self.safe_open_w(filename) as f:\n f.write(value)\n except OSError:\n pass\n else:\n try:\n jobnumber = str(key)\n filename = target[:-1] + jobnumber[0:6] + '\\\\' + jobnumber + '_raw.tex'\n filename = filename.replace('/', '-')\n with self.safe_open_w(filename) as f:\n f.write(value)\n except OSError:\n pass\n if self.basic_reports_dictionary == \"MUSH\":\n pass\n else:\n for key, value in self.basic_reports_dictionary.items():\n try:\n jobnumber = str(key)\n filename = target[:-1] + jobnumber + '\\\\' + jobnumber + '.txt'\n filename = filename.replace('/', '-')\n with self.safe_open_w(filename) as f:\n for item in value:\n f.write(item[0])\n f.write(item[1].to_string())\n f.write('\\n\\n')\n except OSError:\n pass", "def write_data_files(self):\n \n logging.info('\\n Start writing data files \\n')\n \n for i, (data_file, label_file) in enumerate(self.files):\n data_file, label_file = Path(data_file), Path(label_file)\n logging.info('Writing .hdf5 file for : [{}]'.format(str(data_file)))\n \n file_name = self.save_data_folder / '{}.hdf5'.format(label_file.name[:-4])\n if file_name.exists():\n continue\n \n with h5py.File(str(file_name), 'w') as writer:\n self.serialize_samples(\n writer, data_file, label_file)", "def get_result_files(self):\n name_pattern = \"{mapper}.{ngs_library.name}\"\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"out\", name_pattern + \"{ext}\"), ext=EXT_VALUES\n )\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"log\", \"{mapper}.{ngs_library.name}.{ext}\"),\n ext=(\n \"log\",\n \"conda_info.txt\",\n \"conda_list.txt\",\n \"log.md5\",\n \"conda_info.txt.md5\",\n \"conda_list.txt.md5\",\n ),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt.md5\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html.md5\"\n )\n )\n\n for sheet in self.shortcut_sheets:\n for ngs_library in sheet.all_ngs_libraries:\n if ngs_library.name in self.ngs_library_to_kit:\n extraction_type = ngs_library.test_sample.extra_infos[\"extractionType\"]\n suffix = (\n \"_long\"\n if ngs_library.extra_infos[\"seqPlatform\"] in (\"PacBio\", \"ONP\")\n else \"\"\n )\n # Per-sample target coverage report.\n yield from expand(\n os.path.join(\n \"output\", name_pattern, \"report\", \"cov_qc\", name_pattern + \".{ext}\"\n ),\n mapper=self.config[\"tools\"][extraction_type.lower() + suffix],\n ngs_library=[ngs_library],\n ext=[\"txt\", \"txt.md5\"],\n )\n yield \"output/target_cov_report/out/target_cov_report.txt\"\n yield \"output/target_cov_report/out/target_cov_report.txt.md5\"\n if (\n self.config[\"picard_hs_metrics\"][\"path_targets_interval_list\"]\n and self.config[\"picard_hs_metrics\"][\"path_baits_interval_list\"]\n ):\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt.md5\"\n )\n )\n if self.config[\"compute_coverage_bed\"]:\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"report\", \"coverage\", name_pattern + \"{ext}\"),\n ext=(\".bed.gz\", \".bed.gz.tbi\"),\n )\n else:\n print(\n \"Genome-wide coverage BED generation disabled\", file=sys.stderr\n ) # pragma: no cover", "def generate_files(self):\n\t\tapply_stemmer, xml_file, query_file, expected_file = self.read_config_file()\n\t\tself.generate_query_file(query_file, xml_file, apply_stemmer)\n\t\tself.generate_expected_file(expected_file, xml_file)\n\t\tlogging.info('FINALIZADO: MÓDULO PROCESSADOR DE CONSULTAS')", "def main():\n global MASK\n start_time = time()\n parser = initArgparse()\n args = parser.parse_args()\n dirtree = args.directorytree\n filetree = args.filetree\n meta = args.metadata\n newmeta = args.newmetadata\n sfv = args.sfv\n yes = args.yes\n MASK = args.exclude\n\n for i in args.DIRECTORY:\n if Path(i).exists() is True:\n basepath = Path(i)\n else:\n raise NotADirectoryError(f\"{i} does not exist\")\n default = False\n if dirtree == sfv == filetree == meta == newmeta is False:\n default = True\n if dirtree is True or default is True:\n dirtree_file = f\"{basepath.name}_directory_tree.txt\"\n checkFileExists(basepath, dirtree_file, yes)\n createDirectoryTree(basepath, dirtree_file)\n if sfv is True or default is True:\n sfv_file = f\"{basepath.name}.sfv\"\n checkFileExists(basepath, sfv_file, yes)\n createSfv(basepath, sfv_file)\n if filetree is True or default is True:\n csvtree_file = f\"{basepath.name}_file_tree.csv\"\n jsontree_file = f\"{basepath.name}_file_tree.json\"\n checkFileExists(basepath, jsontree_file, yes)\n checkFileExists(basepath, csvtree_file, yes)\n createFileTree(basepath, jsontree_file, csvtree_file)\n if meta is True or default is True:\n metadata_file = f\"{basepath.name}_metadata.json\"\n checkFileExists(basepath, metadata_file, yes)\n createMetadata(basepath, metadata_file)\n if newmeta is True:\n createNewMetadata(basepath)\n filesCache.cache_clear()\n getFileInfo.cache_clear()\n killTika()\n\n stop_time = time()\n print(f\"Finished in {round(stop_time-start_time, 2)} seconds\")", "def setup_files(args):\n postfix = 'reinforce'\n has_value_model = False\n if args.baseline:\n postfix = \"reinforce-baseline\"\n has_value_model = True\n elif args.actor_critic:\n postfix = \"actor-critic\"\n has_value_model = True\n elif args.a2c:\n postfix = \"a2c\"\n has_value_model = True\n elif args.random:\n postfix = \"random\"\n\n # create the folder for log files\n try:\n os.mkdir(postfix)\n except FileExistsError:\n print(postfix, \" folder exists\")\n\n fileid = \"%s-%d\" % (postfix, int(time.time()))\n actor_weights = \"actor_weights-%s.h5\" % fileid\n actor_weights = os.path.join(postfix, actor_weights)\n encoder_weights = \"encoder_weights-%s.h5\" % fileid\n encoder_weights = os.path.join(postfix, encoder_weights)\n value_weights = None\n if has_value_model:\n value_weights = \"value_weights-%s.h5\" % fileid\n value_weights = os.path.join(postfix, value_weights)\n\n outdir = \"/tmp/%s\" % postfix\n\n misc = (postfix, fileid, outdir, has_value_model)\n weights = (actor_weights, encoder_weights, value_weights)\n\n return weights, misc", "def organise_qa_output(metadata, base_dir, write_tag):\n filenames = metadata['FITSImageFilename']\n for i, fits_file in enumerate(filenames):\n kat_target = katpoint.Target(metadata['KatpointTargets'][i])\n\n # Move QA report and create metadata\n pb_filebase = os.path.splitext(fits_file)[0] + '_PB'\n qa_report = pb_filebase + '_continuum_validation_snr5.0_int'\n pb_dir = _productdir(metadata, base_dir, i, '_PB', write_tag)\n\n qa_dir = _productdir(metadata, base_dir, i, '_QA', write_tag)\n os.mkdir(qa_dir)\n os.rename(os.path.join(pb_dir, qa_report), qa_dir)\n make_report_metadata(metadata, qa_dir)\n\n # Move RMS image and create metadata\n rms_dir = _productdir(metadata, base_dir, i, '_RMS', write_tag)\n os.mkdir(rms_dir)\n rms_image = pb_filebase + '_aegean_rms'\n mean_pb_rms = _calc_rms(os.path.join(pb_dir, rms_image + FITS_EXT))\n\n make_image_metadata(metadata, '_PB', pb_dir, i,\n 'Continuum Image PB corrected',\n 'Continuum image PB corrected',\n mean_pb_rms)\n\n os.rename(os.path.join(pb_dir, rms_image + FITS_EXT),\n os.path.join(rms_dir, rms_image + FITS_EXT))\n _add_missing_axes(os.path.join(rms_dir, rms_image + FITS_EXT))\n _caption_pngs(rms_dir, rms_image, kat_target, 'RMS PB Corrected')\n make_image_metadata(metadata, '_PB_aegean_rms', rms_dir, i,\n 'Continuum PB Corrected RMS Image',\n 'Continuum PB Corrected RMS image',\n mean_pb_rms)\n\n # Move MEAN image and create metadata\n bkg_dir = _productdir(metadata, base_dir, i, '_BKG', write_tag)\n os.mkdir(bkg_dir)\n bkg_image = pb_filebase + '_aegean_bkg'\n os.rename(os.path.join(pb_dir, bkg_image + FITS_EXT),\n os.path.join(bkg_dir, bkg_image + FITS_EXT))\n _add_missing_axes(os.path.join(bkg_dir, bkg_image + FITS_EXT))\n _caption_pngs(bkg_dir, bkg_image, kat_target, 'MEAN PB Corrected')\n make_image_metadata(metadata, '_PB_aegean_bkg', bkg_dir, i,\n 'Continuum PB Corrected Mean Image',\n 'Continuum PB Corrected Mean image',\n mean_pb_rms)\n\n # Remove .writing tag\n dir_list = [pb_dir, qa_dir, rms_dir, bkg_dir]\n for product_dir in dir_list:\n os.rename(product_dir, os.path.splitext(product_dir)[0])", "def main():\r\n\r\n directory = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n path = os.path.join(directory, 'dump_3')\r\n if not (os.path.exists(path)):\r\n os.mkdir(path)\r\n\r\n for date in range(1, 31):\r\n # date-month-year\r\n # file_name1 = path + '\\\\' + str(date) + '-8-2020' + '_file1.txt'\r\n\r\n # year-month-date\r\n # file_name1 = path + '\\\\' + '2020-08-' + str(date) + '_file3.txt'\r\n\r\n # month_year_date\r\n file_name1 = path + '\\\\' + 'Aug_2020_' + str(date) + '_file5.txt'\r\n\r\n # date-month-year\r\n # file_name2 = path + '\\\\' + str(date) + '-8-2020' + '_file2.txt'\r\n\r\n # year-month-date\r\n # file_name2 = path + '\\\\' + '2020-08-' + str(date) + '_file4.txt'\r\n\r\n # month_year_date\r\n file_name2 = path + '\\\\' + 'Aug_2020_' + str(date) + '_file6.txt'\r\n\r\n rows = []\r\n for row in range(100):\r\n string = 'asddfgfhgkhjghkweoriuywoipywbnxvnmznvnmbatr'\r\n rows.append(string)\r\n with open(file_name1, 'w') as f1, open(file_name2, 'w') as f2:\r\n f1.writelines(rows)\r\n f2.writelines(rows)", "def analyze(self):\n for f in self.files:\n tokenizer = Tokenizer(f)\n self.write_tokens(tokenizer)\n compilation_engine = CompilationEngine(tokenizer, f)\n compilation_engine.compile()\n self.write_syntax_tree(compilation_engine)\n compilation_engine.VMwriter.create_file(f[:-5])", "def _check_trace_files(self):\n\n if not self._parameters.trace_dir:\n # traces will be written to stderr. No need to check trace files.\n return\n if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:\n # Output files are handled by tf.summary operations, no need to precreate\n # them.\n return\n if not gfile.Exists(self._parameters.trace_dir):\n file_io.recursive_create_dir(self._parameters.trace_dir)\n if not gfile.Exists(self._parameters.trace_dir):\n raise RuntimeError('Failed to create trace directory at %s' %\n self._parameters.trace_dir)", "def create_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF,\n DIR_BACK, DIR_TEXT, DIR_FINAL)\n \n for dir in dirs:\n try:\n os.mkdir(os.path.join(cwd, dir))\n except OSError, e:\n print 'directory (', dir, ') already exists'", "def combineAllGraphFiles(chroms, final_out):\n outfile = open(final_out,'w');\n outfile.close();\n \n for chrom in chroms:\n graph_file = chrom + \".graph\";\n try:\n if os.system('%s %s >> %s' %\n (cat, graph_file, final_out)): raise\n except: sys.stderr.write(\"cat failed at %s\\n\" % chrom)", "def generate_data():\n for subdir, dirs, files in os.walk(legend_images_dir):\n for _file in files:\n getTables(_file)\n\n file_list = []\n for subdir, dirs, files in os.walk(pdf_output_dir):\n for _file in files:\n if _file.endswith('.pdf'):\n file_list.append(_file)\n\n print (\"Writing merged output in Output.pdf...\")\n current_dir = os.getcwd()\n mergeOutput(file_list, current_dir + \"/Output.pdf\")\n\n clean()", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def _go_through_summary_reports(self):\n\n for result_file in self.result_files:\n self.cur_8digit_dir = os.path.split(result_file)[0]\n try:\n with open(result_file) as f_in:\n sum_rep = json.load(f_in)\n if sum_rep.has_key('Artifacts'):\n for linked_artifact in sum_rep['Artifacts']:\n artifact_path = linked_artifact['Path']\n # For now assume only files are linked (no folders)\n rel_path_from_results = os.path.join(self.cur_8digit_dir, artifact_path)\n if os.path.exists(rel_path_from_results):\n self.files_for_export.append(os.path.join('results',\n rel_path_from_results))\n if artifact_path.endswith('.json'):\n function_tag = artifact_path.replace('.','_').replace('/','_')\n\n if hasattr(self, function_tag):\n getattr(self, function_tag)()\n except IOError:\n print '{0} does not exist on this filesystem. I cannot be check for references '\\\n 'to other files.'.format(result_file)", "def write_output_summary(outfile, read_scores, args):\n\theader = ['sim_info_file', 'sim_sam_file', 'analysis_info_file', 'results_file', 'junc_type', 'score_type', \n\t\t\t 'true_positives', 'true_negatives', 'false_positives', 'false_negatives']\n\t\t\t \n\tfilenames = [args.sim_info, args.sim_sam, args.analysis_info, args.output]\n\ttypes = ['tp', 'tn', 'fp', 'fn']\n\t\t\t \n\twith open(args.output_summary, \"w\") as outfile:\n\t\toutfile.write(\"\\t\".join(header) + \"\\n\")\n\t\t\n\t\tfor score_type in read_scores:\n\t\t\tfor junc_type in read_scores[score_type]:\n\t\t\t\tif junc_type == 'discord':\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]/2) for type in types]\n\t\t\t\telse:\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]) for type in types]\n\t\t\t\tline = filenames + [junc_type, score_type] + scores\n\t\t\t\toutfile.write(\"\\t\".join(line) + \"\\n\")", "def create_files(self, kind):\n\n\t\t#Creates TS row data. \n\t\tself.ts_row = self.lar_gen.make_ts_row()\n\n\t\t#Creates a TS row dataframe. \n\t\tts_df = pd.DataFrame(self.ts_row, index=[1])\n\n\t\t#The following produces a clean file. \n\t\tif kind == 'clean_file':\n\n\t\t\t#Creates a first row of LAR to begin the dataframe.\n\t\t\t#All other rows are concatenated to the dataframe until\n\t\t\t#the length of the dataframe reaches the file length specified in the \n\t\t\t#test filepaths yaml file.\n\n\t\t\tfor i in range(0, self.clean_config[\"file_length\"][\"value\"] ):\n\t\t\t\tprint('Creating row {i}'.format(i=i))\n\t\t\t\tif i==0:\n\t\t\t\t\tfirst_row = self.make_clean_lar_row(ts_row=self.ts_row)\n\t\t\t\t\tlar_frame = pd.DataFrame(first_row, index=[1])\n\t\t\t\telse:\n\t\t\t\t\tnew_row = self.make_clean_lar_row(ts_row=self.ts_row)\n\t\t\t\t\tnew_row = pd.DataFrame(new_row, index=[1])\n\t\t\t\t\tlar_frame = pd.concat([lar_frame, new_row], axis=0)\n\t\t\t\n\t\t\t#Writes the file to a clean filepath specified in the test_filepaths\n\t\t\t#configuration. \n\t\t\tout_file_path = self.filepaths['clean_filepath'].format(bank_name=self.clean_config[\"name\"][\"value\"])\n\t\t\tout_file_bank_name = self.filepaths['clean_filename'].format(row_count=self.clean_config[\"file_length\"][\"value\"] , bank_name=self.clean_config[\"name\"][\"value\"])\n\n\n\t\t\tutils.write_file(ts_input=ts_df, lar_input=lar_frame, path=out_file_path, name=out_file_bank_name)\n\n\t\t#For error files. \n\t\tif kind == 'error_files':\n\n\t\t\t#Modifies clean data and outputs \n\t\t\t#resulting files that fail specific edits.\n\t\t\t\n\t\t\t#Instantiates the edit file maker.\n\t\t\tfile_maker = test_data(ts_schema=self.ts_schema_df, \n\t\t\t\t\t\t\t\t lar_schema=self.lar_schema_df, \n\t\t\t\t\t\t\t\t geographic_data=self.geographic_data) \n\n\t\t\t#Pulls in the clean data filepath and name from the\n\t\t\t#test filepaths yaml file. \n\t\t\tts_data, lar_data = utils.read_data_file(path=self.filepaths['clean_filepath'].format(bank_name=self.clean_config[\"name\"][\"value\"]),\n\t\t\t\t\n\t\t\t\tdata_file=self.filepaths[\"clean_filename\"].format(bank_name=self.clean_config[\"name\"][\"value\"], row_count=self.clean_config[\"file_length\"][\"value\"])) \n\t\t\t\t\t\n\n\t\t\t#Passes clean file data to the file maker object.\n\t\t\tfile_maker.load_data_frames(ts_data, lar_data) \n\n\t\t\t#Generates a file for each edit function in file maker. \n\t\t\tedits = []\n\t\t\t\n\t\t\t#Loops over all data modification functions. \n\t\t\tfor func in dir(file_maker): \n\n\t\t\t\t#Checks if function is a numbered syntax or validity edit.\n\t\t\t\tif func[:1] in (\"s\", \"v\", \"q\") and func[1:4].isdigit()==True: \n\t\t\t\t\tprint(\"applying:\", func)\n\t\t\t\t\t#Applies data modification functions and produces files.\n\t\t\t\t\tgetattr(file_maker, func)()", "def create_txt_files(self, op_dir=None):\n for tb_nm, tb_cont in list(self.tables_info['tables'].items()):\n op_fl = '{}_{}.txt'.format(self.report_basename, tb_nm)\n if op_dir:\n op_fl = os.path.join(op_dir, op_fl)\n with open(op_fl, 'w') as TXT:\n TXT.write(tb_cont)", "def archive_test_logs(days, archive_path, all_logs):\n for day in days.keys():\n daydir = datetime.strptime(day, \"%Y%m%d\").strftime(\"%m-%d-%Y\")\n for scenario in days[day].keys():\n # temporary log directories are stored by scenario + date\n datename = scenario + \"-\" + datetime.strptime(day, \"%Y%m%d\").strftime(\"%Y-%m-%d\")\n if datename not in all_logs:\n raise RuntimeError(f\"Missing all_log entry for {datename}\")\n\n if not os.path.exists(all_logs[datename].name):\n raise RuntimeError(f\"Missing log directory for {datename}\")\n\n tmpdir = all_logs[datename].name\n failed = days[day][scenario][\"failed-tests\"]\n flakes = days[day][scenario][\"flaky-tests\"]\n\n scenario_archive = os.path.join(archive_path, daydir, scenario)\n os.makedirs(os.path.join(scenario_archive, \"failed\"))\n os.makedirs(os.path.join(scenario_archive, \"flakes\"))\n # data is organized by test names as keys with lists of tests\n for name in failed:\n i = 1\n for t in sorted(failed[name], key=lambda x: x[\"start_time\"]):\n try:\n logdir = kstest_logdir(tmpdir, t)\n if not os.path.exists(logdir):\n raise RuntimeError(f\"Missing logdir - {logdir}\")\n except RuntimeError:\n continue\n dst = os.path.join(scenario_archive, \"failed\", name, str(i))\n shutil.copytree(logdir, dst)\n i += 1\n\n for name in flakes:\n i = 1\n for t in sorted(flakes[name], key=lambda x: x[\"start_time\"]):\n try:\n logdir = kstest_logdir(tmpdir, t)\n if not logdir or not os.path.exists(logdir):\n raise RuntimeError(f\"Missing logdir - {logdir}\")\n except RuntimeError:\n continue\n dst = os.path.join(scenario_archive, \"flakes\", name, str(i))\n shutil.copytree(logdir, dst)\n i += 1", "def _OpenOutputFiles(self):\n self.gfile = open(self.geomout, \"w\")\n self.efile = open(self.energyout, \"w\")\n self.PrintEnergyHeader()", "def clean_up_submission_lf_files(\n sim_dir, submission_files_to_tar=[], lf_files_to_tar=[]\n):\n submission_files_to_tar += SUBMISSION_FILES + SUBMISSION_SL_LOGS\n lf_files_to_tar += LF_FILES\n\n # create temporary submission dir\n submission_dir, _ = create_temp_dirs(sim_dir, SUBMISSION_DIR_NAME)\n\n # create temporary lf dir\n lf_dir, lf_sub_dir = create_temp_dirs(sim_dir, LF_DIR_NAME, LF_SUB_DIR_NAME)\n\n # move files to submission dir\n move_files(sim_dir, submission_dir, submission_files_to_tar)\n\n tar_files(submission_dir, os.path.join(sim_dir, SUBMISSION_TAR))\n\n # move files to lf dir\n move_files(os.path.join(sim_dir, \"LF\"), lf_dir, lf_files_to_tar)\n # copy e3d segments to lf sub dir\n e3d_segs_dir = os.path.join(sim_dir, \"LF\", \"OutBin\")\n for f in os.listdir(e3d_segs_dir):\n if \"-\" in f: # e3d segments have '-' in the name\n shutil.move(os.path.join(e3d_segs_dir, f), os.path.join(lf_sub_dir, f))\n\n tar_files(lf_dir, os.path.join(sim_dir, LF_TAR))\n\n # remove temporary submission and lf dir\n shutil.rmtree(lf_dir)\n shutil.rmtree(submission_dir)", "def _make_files(self):\n if not self.path.is_dir():\n raise FileNotFoundError(f\"Path {self.path} does not exist.\")\n\n # Make the filepaths\n self.file_points = self.path / \"point.dat\"\n self.file_lines = self.path / \"line.dat\"\n self.file_cadastre = self.path / \"cadastre.dat\"\n self.file_portals = self.path / \"portals.dat\"\n\n with open(self.file_points, \"w\") as f:\n # 2 lines ignored\n header = datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n f.write(header)\n self.points_dfs = []\n with open(self.file_lines, \"w\") as f:\n # 5 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + 3 * \"Generated: \\n\"\n + \"Name,Section,source_group,x1,y1,z1,x2,y2,z2,width,vert. ext.,-,-,\"\n \"emission_rate[kg/h/km],-,-,-,-\\n\"\n )\n f.write(header)\n with open(self.file_cadastre, \"w\") as f:\n # 1 line ignored\n header = \"x,y,z,dx,dy,dz,emission_rate[kg/h],-,-,-,source_group\\n\"\n f.write(header)\n with open(self.file_portals, \"w\") as f:\n # 2 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + \"x1,y1,x2,y2,z0,z1,emission_rate[kg/h],-,-,-,source_group\\n\"\n )\n f.write(header)\n\n\n # File to save the source groups values\n self.file_source_groups = self.path / \"source_groups.json\"\n with open(self.file_source_groups, \"w\") as f:\n # reverse the dict (items become keys and vice versa)\n reversed_source_groups = {v: k for k, v in self.source_groups.items()}\n json.dump(reversed_source_groups, f, indent=2)", "def package():\n \n hou.hipFile.save()\n currentHip = hou.expandString(hou.hipFile.name())\n\n # create a temp directory we are going to fill with crap\n tempFilePath = tempfile.mkdtemp()\n \n otls = os.path.join(tempFilePath, \"otls\")\n os.mkdir(otls)\n files = os.path.join(tempFilePath, \"files\")\n os.mkdir(files)\n \n # Get all the external references to the hipfile\n fileOnDisk = hou.fileReferences()\n\n # loop and do what comes natural.\n for _file in fileOnDisk:\n\n parm = _file[0]\n filepath = _file[1]\n \n # if its a otl we need to store it.\n if filepath.endswith(\".otl\"):\n \n shutil.copy(hou.expandString(filepath), otls)\n \n else:\n \n if not os.path.isfile(hou.expandString(filepath)): \n \n continue\n \n # create a directory in files and save 1 file to that location\n tmpFileName = os.path.basename(hou.expandString(filepath))\n tmpFileDir = os.path.basename(os.path.dirname(hou.expandString(filepath)))\n path = os.path.join(files, tmpFileDir)\n \n if not os.path.isdir(path):\n \n os.mkdir(path)\n\n shutil.copy(hou.expandString(filepath), os.path.join(path, os.path.basename(hou.expandString(filepath))))\n\n try:\n \n if not parm.node().isLocked():\n \n parm.set(os.path.join(path.replace(tempFilePath, \"$HIP\"), tmpFileName))\n \n except hou.PermissionError: \n \n logging.warning(\"Error hardening parm :\" + str(parm.name()) + \"on node \" +parm.node().path())\n\n hou.hipFile.save(os.path.join(tempFilePath, os.path.basename(hou.expandString(hou.hipFile.name()))))\n # Load the source hipfile\n hou.hipFile.load(currentHip)\n \n # create a zipfile and package everything. then copy it to the home.\n zipfileLoc = zipdir(tempFilePath)\n shutil.move(zipfileLoc, os.path.join(hou.expandString(\"~\"), \"package.zip\"))\n shutil.rmtree(tempFilePath)", "def test_create_files(self):\n\n testdir = \"test_output\"\n test_submission = Submission()\n self.addCleanup(os.remove, \"submission.tar.gz\")\n self.addCleanup(shutil.rmtree, testdir)\n\n test_submission.create_files(testdir)\n\n self.doCleanups()", "def write_solutions_to_files(\n solutions: List[Solution], path: Path, folder_name: str = \"CodeWars\"\n) -> None:\n stat = {}\n path = path / folder_name\n for solution in solutions:\n if solution.kyu in stat:\n stat[solution.kyu] += 1\n else:\n stat[solution.kyu] = 1\n\n path_to_solution = get_dir(solution, path)\n path_to_solution.mkdir(parents=True, exist_ok=True)\n\n create_file(\n path_to_solution / f\"main{EXTENSIONS[solution.language]}\", solution.solution\n )\n\n create_file(path_to_solution / \"README.md\", f\"Link: {solution.link}\")\n\n kuy_stat = \"\\n\".join([f\":star: {x}: {stat[x]} :star:\" for x in sorted(stat)])\n create_file(\n path / \"README.md\",\n f\"Total: {sum([stat[x] for x in stat])}\\n\" f\"Detail statistic:\\n{kuy_stat}\",\n )", "def post_build(self, manager):\n if not self.output_files_dir.exists():\n return\n\n output_file_dirs = [\n d for d in self.output_files_dir.rglob(\"*\") if d.is_dir()\n ] + [self.output_files_dir]\n for output_file_dir in output_file_dirs:\n stem = output_file_dir.relative_to(self.output_files_dir)\n api_path = self.api_dir / stem / ALL_JSON\n\n yield self.task(\n name=f\"contents:{stem}\",\n doc=f\"create a Jupyter Contents API response for {stem}\",\n actions=[\n (self.one_contents_path, [output_file_dir, api_path]),\n (self.maybe_timestamp, [api_path]),\n ],\n file_dep=[p for p in output_file_dir.rglob(\"*\") if not p.is_dir()],\n targets=[api_path],\n )", "def main():\n data_visualisation()\n write_hyper_params()\n write_result_tables()\n write_box_plots()", "def export_fasta(self, metadata, analysistype, reportpath, cutoff, program):\n logging.info('Creating FASTA-formatted files of outputs')\n for sample in metadata:\n # Set the name of the FASTA output file\n sample[analysistype].fasta_output = os.path.join(reportpath, '{sn}_{prog}.fasta'.format(sn=sample.name,\n prog=analysistype))\n # Remove the file if it exists. Otherwise, if the samples are processed by the pipeline more than\n # once, the same results will be appended to the file\n try:\n os.remove(sample[analysistype].fasta_output)\n except FileNotFoundError:\n pass\n # Process the sample only if the script could find targets\n if sample[analysistype].blastresults != 'NA' and sample[analysistype].blastresults:\n # Open the FASTA output file in append mode\n with open(sample[analysistype].fasta_output, 'a+') as fasta_output:\n for target in sorted(sample[analysistype].targetnames):\n index = 0\n for hit in sample[analysistype].blastlist:\n if hit['subject_id'] == target:\n # Set the name and percent id to avoid writing out the dictionary[key] multiple times\n if float(hit['percent_match']) >= cutoff:\n # If the 'align' option was not specified, the .dnaseq attribute will be an empty\n # dictionary. Populate this attribute as required\n try:\n # The .dnaseq attribute will not exist for amino-acid based searches\n if program == 'blastn':\n fasta = sample[analysistype].dnaseq[target][index]\n else:\n # The .targetsequence attribute will be sufficient\n fasta = Seq(sample[analysistype].targetsequence[target][index])\n except (KeyError, IndexError):\n # Align the protein (and nucleotide) sequences to the reference\n sample = self.alignprotein(sample=sample,\n analysistype=analysistype,\n target=target,\n program=program,\n index=index,\n hit=hit)\n try:\n if program == 'blastn':\n fasta = sample[analysistype].dnaseq[target][index]\n else:\n fasta = Seq(sample[analysistype].targetsequence[target][index])\n except IndexError:\n fasta = str()\n # Create the SeqRecord of the FASTA sequence\n if fasta:\n try:\n record = SeqRecord(fasta,\n id='{name}_{target}'\n .format(name=sample.name,\n target=target),\n description='')\n # Write the FASTA-formatted record to file\n fasta_output.write(record.format('fasta'))\n except (AttributeError, TypeError):\n pass\n index += 1\n # Return the updated metadata object\n return metadata", "def save_report(keyword):\n for file in glob.glob(keyword+\"_CNA/*\"):\n df_cna = pd.read_table(file, sep=\"\\t\", index_col=0)\n df_cna_report = generate_report(df_cna)\n new_folder1 = keyword+\"_report\"\n if not os.path.exists(new_folder1):\n os.mkdir(new_folder1)\n filename = os.path.split(file)[1]\n output_name = os.path.join(new_folder1, filename)\n df_cna_report.to_csv(output_name, sep=\"\\t\")\n yield df_cna_report", "def setup_gauge_files(self,outdir):\n import os\n gauge_path = os.path.join(outdir,self.gauge_dir_name)\n if not os.path.exists(gauge_path):\n try:\n os.makedirs(gauge_path)\n except OSError:\n print \"gauge directory already exists, ignoring\"\n \n for gauge in self.gauge_file_names: \n gauge_file = os.path.join(gauge_path,gauge)\n if os.path.isfile(gauge_file): \n os.remove(gauge_file)\n self.gauge_files.append(open(gauge_file,'a'))", "def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()", "def initialize_output_files(self):\r\n if not self.C.restart:\r\n print(\"* Touching output files.\", flush=True)\r\n # begin writing `generation.csv` file\r\n csv_path_and_filename = self.C.job_dir + \"generation.csv\"\r\n util.properties_to_csv(\r\n prop_dict=self.ts_properties,\r\n csv_filename=csv_path_and_filename,\r\n epoch_key=\"Training set\",\r\n append=False,\r\n )\r\n\r\n # begin writing `convergence.csv` file\r\n util.write_model_status(append=False)\r\n\r\n # create `generation/` subdirectory to write generation output to\r\n os.makedirs(self.C.job_dir + \"generation/\", exist_ok=True)", "def report(self):\n # for the file name 'f' and the size 's',\n # print formatted output.\n for f,s in self.warning_files:\n print '%-*s :%d' % (self.name_size, f, s)", "def report():\n \n parser = argparse.ArgumentParser(\n \n description='pyrpipe diagnostic utility\\nGenerate analysis report.',\n \n usage='''pyrpipe_diagnostic report [<args>] <logfile>\n \n ''') \n parser.add_argument('-o', help='out file \\ndefault: same as input logfile',action=\"store\")\n parser.add_argument('-s','--summary', help='Print quick summary and exit',default=False,dest='summary', action='store_true')\n parser.add_argument('-e', help='report output type: [md,pdf,html] \\ndefault: pdf',default='pdf',action=\"store\")\n parser.add_argument('-c',help='Report options [(f)ull,fa(i)l,(p)ass]\\ndefault: f',default='f',action=\"store\")\n parser.add_argument('-v',help='verbose',action=\"store_true\")\n parser.add_argument('logfile', help='The log file generated by pyrpipe',action=\"store\")\n args = parser.parse_args(sys.argv[2:])\n \n logFile=args.logfile\n envLog=reports.checkEnvLog(logFile) \n #parse args\n if args.summary:\n #print summary\n reports.generate_summary(logFile,envLog,coverage='a')\n return\n \n vFlag=args.v\n if vFlag:\n print(\"Generating report\")\n \n \n outFile=\"\"\n if args.o is None:\n outFile=pu.get_file_basename(args.logfile)\n else:\n outFile=args.o\n outFile+='.'+args.e\n \n if args.e in ['pdf','html','md']:\n htmlReport=reports.generateHTMLReport('simpleDiv.html',logFile,envLog,coverage=args.c)\n if args.e=='pdf':\n reports.writeHtmlToPdf(htmlReport,outFile)\n elif args.e=='html':\n reports.writeHtml(htmlReport,outFile)\n elif args.e == 'md':\n reports.writeHtmlToMarkdown(htmlReport,outFile)\n else:\n pu.print_boldred(\"unknown extension:\"+args.e+\". Exiting\")", "def writestat(self, outfile=None, hubble=None):\n s = self._base()\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n if outfile is None: outfile = self._base().filename+'.stat'\n print \"write stat file to \", outfile\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n for ii in np.arange(self._nhalos)+1:\n print '%d '%ii,\n sys.stdout.flush()\n h = self[ii].properties # halo index starts with 1 not 0\n## 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(self[ii].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n## may want to add implement satellite test and false central breakup test.\n ss = \" \" # can adjust column spacing\n outstring = str(ii)+ss\n outstring += str(len(self[ii]))+ss+str(len(self[ii].g))+ss\n outstring += str(len(self[ii].s)) + ss+str(len(self[ii].dark))+ss\n outstring += str(h['m']/hubble)+ss+str(h['r']/hubble)+ss\n outstring += str(self[ii].g['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].s['mass'].in_units('Msol').sum())+ss\n outstring += str(self[ii].d['mass'].in_units('Msol').sum())+ss\n outstring += str(h['vmax'])+ss+str(h['vmax_r']/hubble)+ss\n outstring += str(h['vrms'])+ss\n ## pos: convert kpc/h to mpc (no h).\n outstring += str(h['pos'][0][0]/hubble)+ss\n outstring += str(h['pos'][0][1]/hubble)+ss\n outstring += str(h['pos'][0][2]/hubble)+ss\n outstring += str(h['vel'][0][0])+ss+str(h['vel'][0][1])+ss\n outstring += str(h['vel'][0][2])+ss\n outstring += contam+ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\"+ss # false central breakup.\n print >> fpout, outstring\n fpout.close()", "def make_header_files():\n os.makedirs(DATA_DIR) if not os.path.exists(DATA_DIR) else None\n from dkistdataratemodel.units import frame\n from dkist_data_model.generator.dataproducts.visp import CalibratedVISP\n\n \"\"\"\n Generate VISP\n \"\"\"\n visp = CalibratedVISP(end_condition=20*frame)\n\n visp_files = visp.to_fits(\"sp_5_labelled\",\n path_template=os.path.join(DATA_DIR, 'visp_5d_{i:02d}.fits'))\n\n with ZipFile(os.path.join(DATA_DIR, \"visp.zip\"), \"w\") as myzip:\n for fname in visp_files:\n myzip.write(fname, os.path.split(fname)[1])\n os.remove(fname)\n\n \"\"\"\n Generate VTF\n \"\"\"\n from dkist_data_model.generator.dataproducts.vtf import CalibratedVTF\n vtf = CalibratedVTF(end_condition=96*frame)\n\n vtf_files = vtf.to_fits(\"5d_test\",\n path_template=os.path.join(DATA_DIR, 'vtf_5d_{i:02d}.fits'))\n\n with ZipFile(os.path.join(DATA_DIR, \"vtf.zip\"), \"w\") as myzip:\n for fname in vtf_files:\n myzip.write(fname, os.path.split(fname)[1])\n os.remove(fname)", "def main():\n file_one_path, file_two_path, output_path =\\\n get_command_line_arguments(\n ['/home/ehler002/project/groups/go/Data/Cluster_Data/Dataset.txt',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/translated_genes.fpkm_table',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/Full_fpkm_Table.txt'])\n pattern = 'CRO_T'\n for file_path in [file_one_path, file_two_path]:\n assert os.path.exists(file_path), 'File %s does not exist.' % file_path\n start_time = datetime.datetime.now()\n print('Started concatenation at %s' % start_time)\n file_contents, headers = get_file_contents(file_two_path)\n file_contents = sort_file_contents(file_contents)\n file_contents = remove_pattern(file_contents, pattern)\n concatenate_files(file_one_path, file_contents, headers, output_path)\n print('Finished concatenation in %s' % (datetime.datetime.now() - start_time))", "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def main():\n\n\t# Consolidate the individual email files into a single spam file\n\t# and a single ham file\n\tmakeDataFileFromEmails( \"D:\\\\Softwares\\spark-2.1.0-bin-hadoop2.7\\\\spark-2.1.0-bin-hadoop2.7\\\\bin\\\\My_Examples\\\\Spam-Ham\\\\20050311_spam_2.tar\\\\20050311_spam_2\\\\spam_2\\\\\", \n\t\t\"D:\\\\Softwares\\spark-2.1.0-bin-hadoop2.7\\\\spark-2.1.0-bin-hadoop2.7\\\\bin\\\\My_Examples\\\\Spam-Ham\\\\20050311_spam_2.tar\\\\20050311_spam_2\\\\spam.txt\")\n\tmakeDataFileFromEmails( \"D:\\\\Softwares\\\\spark-2.1.0-bin-hadoop2.7\\\\spark-2.1.0-bin-hadoop2.7\\\\bin\\\\My_Examples\\\\Spam-Ham\\\\20030228_easy_ham.tar\\\\20030228_easy_ham\\\\easy_ham\\\\\", \n\t\t\"D:\\\\Softwares\\\\spark-2.1.0-bin-hadoop2.7\\\\spark-2.1.0-bin-hadoop2.7\\\\bin\\\\My_Examples\\\\Spam-Ham\\\\20030228_easy_ham.tar\\\\20030228_easy_ham\\\\ham.txt\" )", "def main():\n config_file = get_conf(get_config_name())\n if not config_file:\n sys.exit(1)\n log = get_last_file(config_file[\"LOG_DIR\"])\n MAIN_LOGGER.info(\"we've got log file named %s\", log.path)\n file_name = os.path.join(os.path.dirname(__file__), config_file['REPORT_DIR'],\n \"report-{}.html\".format(log.date))\n if os.path.exists(file_name):\n MAIN_LOGGER.info(\"%s already exists\", file_name)\n sys.exit()\n res = gen_parse_log(log, config_file['PERCENT_FAILS'])\n if not res:\n sys.exit(1)\n MAIN_LOGGER.info(\"log parsed\")\n report = []\n for _ in range(int(config_file[\"REPORT_SIZE\"])):\n try:\n report.append(next(res))\n except StopIteration:\n pass\n MAIN_LOGGER.info(\"report file name %s\", file_name)\n\n if report:\n save_report(report, config_file['TEMPLATE_FILE'], file_name)", "def create_report(folderpath):\n\n outputfolder = create_folder(DEFAULT_OUTPUT_FOLDER)\n\n folderpath = os.path.expanduser(folderpath)\n updatesByHour = collections.defaultdict(list)\n\n now = datetime.now()\n\n for root, folders, files in os.walk(folderpath, followlinks=False):\n for filename in files:\n if filename not in IGNORE_THESE_FILES:\n filepath = pathlib.Path(root, filename)\n mtime = datetime.fromtimestamp(filepath.stat().st_mtime)\n\n if mtime.year == now.year and mtime.month == now.month:\n # For now only deal with this month\n mtime_str = mtime.strftime(\"%Y-%m-%d %H:00\")\n updatesByHour[mtime_str].append((root,filename))\n\n outputFilePath = pathlib.Path(outputfolder, now.strftime(\"%Y-%m.md\"))\n\n with open(outputFilePath, \"w\") as output_file:\n output_file.write(\"# \"+folderpath+\"\\n\")\n for updateTime in sorted(updatesByHour.keys()):\n output_file.write(\"## \"+updateTime+\"\\n\")\n previous_root = None\n previous_pattern=None\n s=\"\"\n for root, filename in sorted(updatesByHour[updateTime]):\n if not previous_root == root:\n # Print a Directory heading\n this_folder=root[len(folderpath):]\n if not len(this_folder.strip()):\n this_folder=folderpath\n output_file.write(\"### \"+this_folder+\" \\n\")\n this_pattern=re.sub(\"[0-9]\",\"x\",filename)\n if not previous_pattern==this_pattern:\n if len(s):\n listItem = \"* \" + s \n output_file.write(listItem[:-2]+\"\\n\")\n s=\"\"\n s=s+str(filename)+\", \"\n previous_root = root\n previous_pattern=this_pattern", "def main():\n args = setup_args()\n header_info = extract_header_info_from_probes(args.probe)\n\n for gene in header_info.keys():\n # check there is a folder for gene, else create it\n gene_out_dir = os.path.join(args.output_path, gene)\n if not os.path.exists(gene_out_dir):\n os.mkdir(gene_out_dir)\n\n gene_msa_fname = os.path.join(gene_out_dir, '{}_msa.fa'.format(gene))\n gene_ref = os.path.join(os.path.abspath(args.gene_refs), gene + '.fa')\n generate_msa_for_gene(gene, header_info[gene], gene_ref, gene_msa_fname)", "def generateSummary(fn, allimages):\n\n # create necessary directories\n d = dirname(join(opts.root, fn))\n if not exists(d):\n os.makedirs(d)\n\n otext = u\"\"\n\n for i in allimages:\n l = i._filename\n l += ','\n if i._title:\n l += i._title\n # Make sure it's on a single line\n# print l\n otext += l.replace('\\n', ' ') + '\\n'\n\n # Write out file.\n try:\n afn = join(opts.root, fn)\n tfile = open(afn, \"w\")\n tfile.write(otext.encode(config.Coding))\n tfile.close()\n\n except IOError, e:\n print >> sys.stderr, \"Error: can't open file: %s\" % fn", "def generate_report():\n if os.path.isdir(\"build/coverage\"):\n shutil.rmtree(\"build/coverage\")\n commands = '''\nscons -uij32 --optimization=coverage controller/cplusplus_test\nlcov --base-directory build/coverage --directory build/coverage -c -o build/coverage/controller_test.info\ngenhtml -o build/coverage/controller/test_coverage -t test --num-spaces 4 build/coverage/controller_test.info\n'''\n for cmd in commands.splitlines():\n cmd_args = cmd.split()\n if (len(cmd_args) == 0):\n continue\n cmd = cmd_args[0]\n cmd_path = find_executable(cmd)\n if not cmd_path:\n continue\n pid = os.fork()\n if pid == 0:\n # Avoid stdout buffering by execing command into child process.\n os.execv(cmd_path, cmd_args)\n os.waitpid(pid, 0)", "def main():\n print(\n \"\"\"\n\n ##########################################################\n # #\n # #\n # Compiling Colocalized Cyano Datasets #\n # #\n # #\n ##########################################################\n\n \n \"\"\"\n )\n cyanoFiles = glob.glob(f\"{COLOCALIZED_DIR}*.csv\")\n makedir(COMPILED_DIR)\n dfCompiled = pd.DataFrame({})\n for cyanoFile in cyanoFiles:\n print(f\"Compiling {cyanoFile}\")\n data = unify(cyanoFile)\n if len(dfCompiled ) < 1:\n dfCompiled = data\n else:\n dfCompiled = pd.concat([dfCompiled, data], ignore_index=True) \n dfCompiled.to_csv(f\"{COMPILED_DIR}compiled.csv\", index=False)", "def write_merge_script(s,inputs=[]):\n assert len(inputs)>0\n # hadd determines if we are merging main histograms file, or unfolding files\n hadd = True if s.jobtype == \"MRG\" else False\n s.jobfile = os.path.join(s.submitdir, 'merge_wasym.sh' if hadd else 'munfold_wasym.sh')\n s.outROOT = ('root_' if hadd else 'unfold_')+s.tag+\".root\"\n s.outROOTpath = os.path.join('results','ana_wasym',s.outROOT)\n pre = 'merge' if hadd else 'munfold'\n s.outOU = os.path.join(s.submitdir, pre+'_wasym.out.log')\n s.outER = os.path.join(s.submitdir, pre+'_wasym.err.log')\n s.outLOG = os.path.join(s.submitdir, pre+'_wasym.log.log')\n flist = 'wasym.root.list' if hadd else 'wasym.unfold.list'\n s.outputs += [flist]\n f = open(s.jobfile, \"w\")\n print >>f, SH_PRE%(s.fdic[0],s.fdic[1])\n print >>f,'RMODE=merge'\n print >>f,'nexpected=%d'%len(inputs)\n print >>f,'ntot=0'\n print >>f,'rm -f ${ROOTDIR}/%s ; touch ${ROOTDIR}/%s;'%(flist,flist)\n for fin in inputs:\n fname = fin if hadd else '%s.unfold'%fin\n print >>f,'f=\"${RESDIR}/%s.root\"'%fname\n print >>f,'st=`xrd uct3-xrd.mwt2.org existfile $f`'\n print >>f,'if [ \"$st\" == \"The file exists.\" ]; then'\n # xrootd files: reduce cache size, since hadd is stupid and will eat 100% of RAM\n print >>f,'echo ${RESHOST}/$f?cachesz=1000000 >> ${ROOTDIR}/%s'%flist\n print >>f,'((ntot++))'\n print >>f,'else'\n print >>f,'echo ERROR: failed to locate file $f'\n print >>f,'fi'\n print >>f,'if [ \"$ntot\" -eq \"$nexpected\" ]; then echo \"ALL DONE\"; else echo \"ERROR: missing `expr $nexpected - $ntot` files\"; echo exit 202; exit 202; fi'\n print >>f,'if [ \"$ntot\" -eq \"0\" ]; then echo \"ERROR: no files to merge\"; echo exit 203; exit 203; fi'\n print >>f,\"\"\"\n# a special version of hadd that adds files in chunks of 20\nfunction hadd2() {\n local per\n per=30 #20\n fin=$1\n opts=$2\n fout=$3\n shift\n n=`cat $fin | wc -l`\n ngrp=`expr $n / $per`\n nrem=`expr $n % $per`\n if [ \\\"$nrem\\\" == \\\"0\\\" ]; then ngrp=`expr $ngrp - 1`; fi\n for igrp in `seq 0 $ngrp`; do\n\timin=`expr $per \\* $igrp`\n\timax=`expr $per \\* $igrp + $per`\n\tif [ \\\"$imax\\\" -gt \\\"$n\\\" ]; then imax=`expr $per \\* $igrp + $nrem`; fi\n\t# offset by 1\n\timin=`expr $imin + 1`\n\timax=`expr $imax`\n\tidel=`expr $imax - $imin + 1`\n\techo \\\"===== Part $igrp / $ngrp : $imin to $imax\\\"\n\techo hadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\thadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\tst=$?\n\tif [ \\\"$st\\\" != \\\"0\\\" ]; then\n\t echo \\\"ERROR: merge step $igrp failed. Bailing out...\\\"\n\t return $st\n\tfi\n done\n # remove opts to speed up the last step and prevent creation of additional ntuple cycles;2\n echo hadd ${fout} ${fout}.TMPHADD_*root*\n hadd ${fout} ${fout}.TMPHADD_*root*\n st=$?\n rm -f ${fout}.TMPHADD_*root*\n return $st\n}\n \"\"\"\n if False:\n if hadd:\n print >>f, 'echo hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'echo hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'hadd2 ${ROOTDIR}/%s \"%s\" %s'%(flist,\"-O\" if hadd else \"-T\",s.outROOTpath)\n print >>f, \"status=$?\"\n print >>f, SH_POST\n f.close()\n os.system('chmod +x %s'%s.jobfile)\n s.write_submit_script()\n return True", "def main():\n parser = ArgumentParser(description=\"write to a file\")\n\n parser.add_argument(\"-i\",\"--input\", type=setup.is_valid_h5_file, required=True, nargs='+',\n help=\"path(s) of HDF5 master file(s)\")\n\n parser.add_argument(\"-b\",\"--beamcenter\", nargs=2, required=True,\n help=\"beam center in X and Y (two arguments)\")\n\n parser.add_argument(\"-r\",\"--oscillation\", type=float, default=1,\n help=\"oscillation angle per well, default = 1\")\n\n parser.add_argument(\"-d\",\"--distance\", type=float, default=100,\n help=\"detector distance in mm\")\n\n parser.add_argument(\"-w\",\"--wavelength\", type=float, default=1.216,\n help=\"Wavelength in Angstrom, default is 1.216\")\n\n parser.add_argument(\"-f\",\"--framesperdegree\", type=int, default=5,\n help=\"Number of frames per degree, default is 5\")\n\n parser.add_argument(\"-t\",\"--totalframes\", type=int, default=0,\n help=\"Total number of frames to be processed, default all\")\n\n parser.add_argument(\"--output\", default=os.getcwd(),\n help=\"Use this option to change output directory, default pwd\")\n\n parser.add_argument(\"-sg\",\"--spacegroup\", type=int, default=0,\n help=\"Space group\")\n\n parser.add_argument(\"-u\",\"--unitcell\", type=str, default=\"50 50 50 90 90 90\",\n help=\"unit cell\")\n\n argslist = parser.parse_args()\n for masterfile in argslist.input:\n master1= Master(argslist,masterfile)\n master1.printDataWells()", "def generate(self, fileName):\n self.preProcess()\n styleFile = open(fileName, 'w')\n # write head part\n head = \"\"\"#!/usr/bin/env python\n\nimport os\n\nfrom WMQuality.Code import Code\n\n# output of the log files\n# prefix of the files in cvs\n# quality script for using pylint:\nqualityScript = '%s'\n# output file:\nqualityReport = '%s'\n# rating threshold (min: 0, max 10)\nthreshold = %s\n\npackages = {\\\\\n \"\"\" % (self.script, self.report, self.threshold)\n styleFile.writelines(head)\n styleFile.writelines('\\n')\n\n for moduleName in self.module.keys():\n # find the one with the most votes per module:\n # register this.\n styleFile.writelines(\" '\" + moduleName + \"':'\" + self.module[moduleName] + \"',\\\\\\n\")\n styleFile.writelines('}\\n')\n tail = \"\"\"\ncode = Code(qualityScript, qualityReport, WMCore.WMInit.getWMBASE(), threshold, packages)\ncode.run()\ncode.summaryText()\n \"\"\"\n styleFile.writelines(tail)\n styleFile.close()", "def WriteErrorsToFile():\n if(not __errorsTracked__ is None):\n if(len(__errorsTracked__)>0):\n formattedLogName = '_'.join[\"ErrorLog\",\"GarageChecker\",datetime.date,datetime.time]\n WriteToFile(formattedLogName,__errorsTracked__)\n __errorsTracked__ = []", "def create_coverage_files(self):\n\n # Select if normalisation is based on fragment numbers\n if self._args.paired_end and not self._args.no_norm_by_fragments:\n norm_by_fragments = True\n else:\n norm_by_fragments = False\n if norm_by_fragments:\n reads_or_fragments = \"fragments\"\n alignment_stats_path = (\n self._pathcreator.fragment_alignments_stats_path\n )\n else:\n reads_or_fragments = \"reads\"\n alignment_stats_path = self._pathcreator.read_alignments_stats_path\n\n # Get alignment stats\n raw_stat_data_reader = RawStatDataReader()\n alignment_stats = [raw_stat_data_reader.read(alignment_stats_path)]\n # Lib names was paired end\n lib_names = list(alignment_stats[0].keys())\n was_paired_end_alignment = self._was_paired_end_alignment(lib_names)\n\n # Quit if the wrong parameters have been chosen for the subcommand\n if was_paired_end_alignment and not self._args.paired_end:\n self._write_err_msg_and_quit(\n \"The alignemnt seems to be based on paired end reads. \"\n \"Please also set \"\n \"the option '-P' or '--paired_end'.\\n\"\n )\n\n if self._args.no_fragments and not self._args.paired_end:\n self._write_err_msg_and_quit(\n \"The option '-nf' or \"\n \"'--no_fragments' is only valid \"\n \"for paired end reads. If you have \"\n \"paired end reads, please also set \"\n \"the option '-P' or '--paired_end'.\\n\"\n )\n\n if self._args.no_norm_by_fragments and not self._args.paired_end:\n self._write_err_msg_and_quit(\n \"The option '-nnf' or \"\n \"'--no_norm_by_fragments' is only valid \"\n \"for paired end reads. If you have \"\n \"paired end reads, please also set \"\n \"the option '-P' or '--paired_end'.\\n\"\n )\n\n # Set read files and lib names\n if not was_paired_end_alignment:\n self._pathcreator.set_read_files_dep_file_lists_single_end(\n self._pathcreator.get_read_files(), lib_names\n )\n else:\n self._pathcreator.set_read_files_dep_file_lists_paired_end(\n self._pathcreator.get_read_files(), lib_names\n )\n # If fragments should be used and they were not created during alignment,\n # they will be created now\n if not self._args.no_fragments and self._args.paired_end:\n bam_files_exist = []\n for (\n bam_fragment_path\n ) in self._pathcreator.aligned_fragments_bam_paths:\n bam_files_exist.append(os.path.exists(bam_fragment_path))\n # If any of the bam files containing fragments is missing, create all\n # of them\n if not all(bam_files_exist):\n self._build_fragments()\n\n # Set alignment paths to fragments or single reads\n if not self._args.no_fragments and self._args.paired_end:\n alignment_paths = self._pathcreator.aligned_fragments_bam_paths\n else:\n alignment_paths = self._pathcreator.read_alignment_bam_paths\n # determine species cross mapped reads\n self._pathcreator.set_ref_seq_paths_by_species()\n if not self._args.count_cross_aligned_reads:\n self._crossmapped_reads_by_lib = {}\n for lib_name, read_alignment_path in zip(\n lib_names, self._pathcreator.read_alignment_bam_paths\n ):\n # retrieve the cross mapped reads from the single read files\n # to also get reads where two mates map to different\n # species. This would not be possible with the built fragments\n self._crossmapped_reads_by_lib[\n lib_name\n ] = self.determine_crossmapped_reads(read_alignment_path)\n\n if not self._args.non_strand_specific:\n strands = [\"forward\", \"reverse\"]\n else:\n strands = [\"forward_and_reverse\"]\n\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species = {}\n for sp in self._species_folder_prefixes_and_display_names.keys():\n # Retrieve the either the no. of uniquely aligned reads or\n # the number of species exclusive aligned reads (\"all aligned\" - \"cross aligned\") (Default behaviour) or\n # number of all aligned reads for each library of the given species\n read_files_aligned_read_freq = {}\n for read_file, attributes in alignment_stats[0].items():\n # If option normalize by uniquely is chosen, only the sum of uniquely aligned reads is used for normalisation\n # this excludes species cross mapped reads, split aligned reads and multiple aligned reads\n if self._args.normalize_by_uniquely:\n read_files_aligned_read_freq[read_file] = attributes[\n \"species_stats\"\n ][sp][f\"no_of_uniquely_aligned_{reads_or_fragments}\"]\n elif self._args.normalize_cross_aligned_reads_included:\n read_files_aligned_read_freq[read_file] = attributes[\n \"species_stats\"\n ][sp][f\"no_of_aligned_{reads_or_fragments}\"]\n # Default: Number of aligned reads without the cross aligned reads are used for normalization\n else:\n read_files_aligned_read_freq[read_file] = (\n attributes[\"species_stats\"][sp][\n f\"no_of_aligned_{reads_or_fragments}\"\n ]\n - attributes[\"species_stats\"][sp][\n f\"no_of_cross_aligned_{reads_or_fragments}\"\n ]\n )\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species[\n sp\n ] = {}\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species[\n sp\n ][\"read_files_aligned_read_freq\"] = read_files_aligned_read_freq\n # Retrieve the min no. of aligned reads\n # of all libraries for the given species\n min_no_of_aligned_reads = float(\n min(read_files_aligned_read_freq.values())\n )\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species[\n sp\n ][\"min_no_of_aligned_reads\"] = min_no_of_aligned_reads\n self._pathcreator.set_coverage_folder_and_file_names(\n strands,\n lib_names,\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species,\n )\n\n project_creator = ProjectCreator()\n project_creator.create_subfolders(\n self._pathcreator.required_coverage_folders()\n )\n self._test_folder_existance(\n self._pathcreator.required_coverage_folders()\n )\n\n # get references by species\n references_by_species = self._get_references_by_species()\n\n # Run the coverage file creation species-wise\n for (\n sp\n ) in (\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species.keys()\n ):\n # Run the generation of coverage in parallel\n\n jobs = []\n with concurrent.futures.ProcessPoolExecutor(\n max_workers=self._args.processes\n ) as executor:\n for lib_name, bam_path in zip(lib_names, alignment_paths):\n if not self._args.count_cross_aligned_reads:\n cross_mapped_reads = self._crossmapped_reads_by_lib[\n lib_name\n ]\n else:\n cross_mapped_reads = None\n coverage_creator = CoverageCreator(\n self._args,\n strands,\n self._pathcreator.coverage_files_by_species[sp][\n lib_name\n ],\n references_by_species[sp],\n self._args.count_cross_aligned_reads,\n cross_mapped_reads,\n )\n jobs.append(\n executor.submit(\n coverage_creator.create_coverage_files_for_lib,\n lib_name,\n bam_path,\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species[\n sp\n ][\n \"read_files_aligned_read_freq\"\n ][\n lib_name\n ],\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species[\n sp\n ][\n \"min_no_of_aligned_reads\"\n ],\n )\n )\n # Evaluate thread outcome\n self._check_job_completeness(jobs)", "def zip_imagenet100c():\n #First make sure the directory we are given is correct!\n if not os.path.isdir(DATA_SRC_ROOT):\n raise Exception(\"Bad filepath given\")\n\n #create the destiantion directories if they don't exist\n if not os.path.isdir(IMAGENET100_DIR):\n os.mkdir(IMAGENET100_DIR)\n\n #grab the subset wnids for the 100 class-subset\n with open(IMAGENET100_CLASSES) as f:\n subset_wnids = f.readlines()\n subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab\n\n #Grab the names of all of the folders inside the root data source\n #Structure is distortion/sub_distortion/level/wnids\n for distortion in os.listdir(DATA_SRC_ROOT):\n if distortion != \"meta.bin\":\n print(distortion)\n\n folder_path = os.path.join(DATA_SRC_ROOT, distortion)\n\n if not os.path.isdir(folder_path):\n continue\n\n for sub_distortion in os.listdir(folder_path):\n print(sub_distortion)\n\n subfolder_path = os.path.join(folder_path, sub_distortion)\n\n if not os.path.isdir(subfolder_path):\n continue\n\n for level in os.listdir(subfolder_path):\n print(level)\n\n level_path = os.path.join(subfolder_path, level)\n\n #grab the correcrt validation d9recotires\n for wnid in os.listdir(level_path):\n wnid_path = os.path.join(level_path, wnid)\n\n if not os.path.isdir(wnid_path):\n continue\n\n if wnid in subset_wnids:\n dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid)\n\n shutil.copytree(wnid_path, dest_path)\n\n #copy the metadata bin file\n meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin')\n meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin')\n\n shutil.copy(meta_file, meta_dest)\n\n #Zip the destinatio file\n shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR)", "def main(): \n # Parse Arguments\n args = parse_arguments()\n\n # Print outdir\n print(\"Writing output to \" + args.outdir)\n\n # Print start statement\n print('Starting script for ' + args.file + ' at ' + str(datetime.datetime.now()), flush=True)\n\n # Put all the files in a function that will further handle the files as dataframe\n create_df(args.file, args.outdir)\n\n # Script is finished\n print('All done for ' + args.file + ' at ' + str(datetime.datetime.now()), flush=True)", "def genStixDoc(\n outputDir_,\n targetFileSha1_,\n targetFileSha256_,\n targetFileSha512_,\n targetFileSsdeep_,\n targetFileMd5_,\n targetFileSize_,\n targetFileName_,\n ipv4Addresses_,\n hostNames_):\n parsedTargetFileName = reFileName(targetFileName_)[1]\n parsedTargetFilePrefix = reFileName(targetFileName_)[0]\n stix.utils.set_id_namespace({\"http://www.nickdriver.com/cuckoo2CRITs\" : \"cuckoo2CRITs\"})\n NS = cybox.utils.Namespace(\"http://www.nickdriver.com/cuckoo2CRITs\", \"cuckoo2CRITs\")\n cybox.utils.set_id_namespace(NS)\n stix_package = STIXPackage()\n\n stix_header = STIXHeader()\n stix_header.title = 'File: ' + parsedTargetFileName + ' with the associated hashes, network indicators'\n stix_header.description = 'File: ' + parsedTargetFileName + ' with the associated hashes, network indicators'\n stix_package.stix_header = stix_header\n\n #Will take this out later\n # Create the ttp\n malware_instance = MalwareInstance()\n malware_instance.add_name(parsedTargetFileName)\n malware_instance.description = targetFileSha1_\n ttp = TTP(title='TTP: ' + parsedTargetFileName)\n ttp.behavior = Behavior()\n ttp.behavior.add_malware_instance(malware_instance)\n #stix_package.add_ttp(ttp)\n \n #Trying to create an array that will be added later...\n stix_observables = []\n \n #This works - leaving intact until the new portion works\n '''\n # Create the indicator for the ipv4 addresses\n ipv4Object = Address(ipv4Addresses_, Address.CAT_IPV4)\n #stix_msg['stix_observables'].extend(Observables([ipv4Object]))\n stix_observables.extend([ipv4Object])\n '''\n for ip in ipv4Addresses_:\n\t\tipv4Object = Address(ip, Address.CAT_IPV4)\n\t\tstix_observables.extend([ipv4Object])\n \n \n '''\n #This works - leaving intact until the new portion works\n # Create the indicator for the domain names\n domainNameObject = DomainName()\n domainNameObject.value = hostNames_\n '''\n for name in hostNames_:\n\t\tdomainNameObject = DomainName()\n\t\tdomainNameObject.value = name\n\t\tstix_observables.extend([domainNameObject])\n\t\t\n \n\n \n # Create the observable for the file\n fileObject = File()\n fileObject.file_name = parsedTargetFileName\n #fileObject.file_name.condition = 'Equals'\n fileObject.size_in_bytes = targetFileSize_\n #fileObject.size_in_bytes.condition = 'Equals'\n fileObject.add_hash(Hash(targetFileSha1_, type_='SHA1', exact=True))\n fileObject.add_hash(Hash(targetFileSha256_, type_='SHA256', exact=True))\n fileObject.add_hash(Hash(targetFileSha512_, type_='SHA512', exact=True))\n fileObject.add_hash(Hash(targetFileSsdeep_, type_='SSDEEP', exact=True))\n fileObject.add_hash(Hash(targetFileMd5_, type_='MD5', exact=True))\n \n stix_observables.extend([fileObject])\n \n \n stix_package.observables = Observables(stix_observables)\n \n #DEBUG\n #stagedStixDoc = stix_package.to_xml()\n #pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(stagedStixDoc)\n\t\t\n #print \"stix_observables list\"\n\n #pp.pprint(stix_observables)\n \n '''\n #VERY BASIC STIX ATTEMPT - THIS WORKS!\n a = Address(\"1.2.3.4\", Address.CAT_IPV4)\n d = DomainName()\n d.value = \"cybox.mitre.org\"\n stix_package.observables = Observables([a, d])\n #concensus - Observable does not work - ObservableS does\n '''\n\t\n\t\n\t###UNCOMMENT THIS WHEN DONE###\n\t\n \n stagedStixDoc = stix_package.to_xml()\n stagedStixDoc = fixAddressObject(stagedStixDoc)\n stagedStixDoc = fixDomainObject(stagedStixDoc)\n today = datetime.datetime.now()\n now = today.strftime('%Y-%m-%d_%H%M%S')\n if not os.path.exists(outputDir_):\n os.makedirs(outputDir_)\n with open (outputDir_ + '/' + now + '-' + targetFileSha1_ + '.stix.xml', 'a') as myfile:\n myfile.write(stagedStixDoc)\n _l.debug('Wrote file: ' + now + '-' + targetFileSha1_ + '.stix.xml')\n \n return", "def run_main():\n\n parser = argparse.ArgumentParser(description=\"Scan a run directory and create files to \")\n parser.add_argument('--run-directory', dest='run_directory',\n action='store', default='',\n help='path to directory with xed files to process')\n args = parser.parse_args(sys.argv[1:])\n\n if not os.path.isdir(args.run_directory):\n sys.stderr.write(\"{0} is not a directory, exiting\\n\".format(args.run_directory))\n return 1\n run_name = os.path.abspath(args.run_directory)\n\n if os.path.basename(run_name):\n run_name = os.path.basename(run_name)\n else:\n run_name = os.path.split(run_name)[0].split('/')[-1]\n\n if not os.path.exists('info'):\n os.mkdir('info')\n\n for directory in os.listdir(args.run_directory):\n if not os.path.isdir(os.path.join(args.run_directory, directory)):\n continue\n csv_filename = \"info/{0}_{1}_files.csv\".format(run_name, directory)\n entries = glob.glob(os.path.join(args.run_directory, directory, '*.xed'))\n if len(entries) == 0:\n continue\n with open(csv_filename, 'w') as file_obj:\n csv_writer = csv.writer(file_obj)\n csv_writer.writerow(['Run', 'Data Set', 'File'])\n for entry in entries:\n uri = \"srm://ceph-se.osgconnect.net:8443/srm/v2/\" + \\\n \"server?SFN=/cephfs/srm/xenon/\" + \\\n entry.replace('/xenon/', '')\n csv_writer.writerow([run_name, directory, uri])", "def make_libfile():\n # wfc3_obsmodes_uvis\n wfc3_uvis = [\n \"f218w\",\n \"f225w\",\n \"f275w\",\n \"f336w\",\n \"f390m\",\n \"f390w\",\n \"f410m\",\n \"f438w\",\n \"f467m\",\n \"f475w\",\n \"f547m\",\n \"f555w\",\n \"f606w\",\n \"f621m\",\n \"f625w\",\n \"f689m\",\n \"f763m\",\n \"f775w\",\n \"f814w\",\n \"f845m\",\n ]\n\n wfc3_ir = [\n \"f098m\",\n \"f105w\",\n \"f110w\",\n \"f125w\",\n \"f127m\",\n \"f139m\",\n \"f140w\",\n \"f153m\",\n \"f160w\",\n ]\n\n wfpc2 = [\n \"f122m\",\n \"f157w\",\n \"f336w\",\n \"f410m\",\n \"f467m\",\n \"f547m\",\n \"f439w\",\n \"f569w\",\n \"f675w\",\n \"f791w\",\n \"f170w\",\n \"f185w\",\n \"f218w\",\n \"f255w\",\n \"f300w\",\n \"f380w\",\n \"f555w\",\n \"f622w\",\n \"f450w\",\n \"f606w\",\n \"f702w\",\n \"f814w\",\n ]\n\n acs_wfc = [\n \"f435w\",\n \"f475w\",\n \"f550m\",\n \"f555w\",\n \"f606w\",\n \"f625w\",\n \"f775w\",\n \"f814w\",\n ]\n # galex\n galex = [\"fuv\", \"nuv\"]\n\n # Open hd5 file for writing\n hf = h5py.File(__ROOT__ + \"filters.hd5\", \"w\")\n\n # Create group for nice hierarchical structure\n f = hf.create_group(\"filters\")\n\n # Define arrays for \"contents\" / descriptive information\n tablenames = []\n observatories = []\n instruments = []\n names = []\n norms = []\n cwaves = []\n pwaves = []\n comments = []\n\n # Loop through WFC3_UVIS filters\n for filt in wfc3_uvis:\n\n # define uvis 1 and uvis2 modes\n mode_1 = \"wfc3, uvis1, \" + filt\n mode_2 = \"wfc3, uvis2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of uvis1 and uvis2\")\n\n # Loop through WFC3_IR filters\n for filt in wfc3_ir:\n\n # define ir mode\n mode = \"wfc3, ir, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # Loop through WFPC2 filters\n for filt in wfpc2:\n\n # define chips 1, 2, 3, 4 modes\n mode_1 = \"wfpc2, 1, \" + filt\n mode_2 = \"wfpc2, 2, \" + filt\n mode_3 = \"wfpc2, 3, \" + filt\n mode_4 = \"wfpc2, 4, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n bp_3 = stsyn.band(mode_3)\n bp_4 = stsyn.band(mode_4)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave), bp_3(wave), bp_4(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFPC2_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFPC2\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of 1, 2, 3, 4\")\n\n # Loop through ACS filters\n for filt in acs_wfc:\n\n # define wfc1, wfc2 modes\n mode_1 = \"acs, wfc1, \" + filt\n mode_2 = \"acs, wfc2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_ACS_WFC_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"ACS_WFC\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of wfc1 and wfc2\")\n\n # Loop through GALEX filters:\n for filt in galex:\n # define ir mode\n mode = \"galex,\" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"GALEX_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"GALEX\")\n instruments.append(\"GALEX\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # smash the contents arrays together\n contents = np.array(\n list(\n zip(\n tablenames,\n observatories,\n instruments,\n names,\n norms,\n cwaves,\n pwaves,\n comments,\n )\n ),\n dtype=[\n (\"TABLENAME\", \"S40\"),\n (\"OBSERVATORY\", \"S30\"),\n (\"INSTRUMENT\", \"S30\"),\n (\"NAME\", \"S10\"),\n (\"NORM\", \"<f8\"),\n (\"CWAVE\", \"<f8\"),\n (\"PWAVE\", \"<f8\"),\n (\"COMMENT\", \"S100\"),\n ],\n )\n\n # add the contents array as an hd5 dataset\n hf.create_dataset(\"content\", data=contents)\n\n # close the file\n hf.close()", "def __merge_container_reports(self):\n print('Copying container output xml files to top level')\n files_to_merge = []\n try:\n for suite in self.execution_file_json['suites']:\n if 'suitefile' in suite:\n name = suite['suitefile'].replace('.robot', '')\n else:\n name = suite['suitedirectory']\n print('Copying xml file for suite: %s' % name)\n output_xml_path = os.path.join(self.output_path, name, ParallelRunner.ROBOT_XML.replace('SUITE', name))\n destination_path = os.path.join(self.output_path, ParallelRunner.ROBOT_XML.replace('SUITE', name))\n shutil.copyfile(src=output_xml_path, dst=destination_path)\n files_to_merge.append(destination_path)\n except Exception:\n pass\n print('Merging container output xml into html report')\n try:\n log_path = os.path.join(self.output_path, 'allsuites_log.html')\n report_path = os.path.join(self.output_path, 'allsuites_report.html')\n rebot(*files_to_merge, name='AllSuites', log=log_path, report=report_path)\n except Exception as e:\n print('Error merging container xml output: %s' % str(e))\n raise", "def check_all_files_and_dirs(self):\n err = 0\n err_m = ''\n warning = 0\n warning_m = ''\n # Check the pdb file for refinement\n if self.refine_pdb_in == None:\n err = 1\n err_m += '\\nPdb file should be supplied'\n else:\n if self.check_single_file(self.refine_pdb_in):\n self.refine_pdb_in = os.path.abspath(self.refine_pdb_in)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(self.refine_pdb_in)\n\n # Check the pdb file for distance analysis\n if self.check_single_file(self.X8_pdb_in):\n self.X8_pdb_in = os.path.abspath(self.X8_pdb_in)\n else:\n self.X8_pdb_in != None\n warning = 1\n warning_m += '\\nXtrapol8 pdb_in not found. No additional analysis will be applied'\n\n # Check additional files and append them to a string\n additional = \"\"\n for fle in self.additional:\n if len(fle)>0:\n if self.check_single_file(fle):\n new_add = os.path.abspath(fle)\n additional = additional + \"%s \" % (new_add)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(fle)\n self.additional = additional\n\n #Check the output directory\n if os.path.isdir(self.outdir):\n self.outdir = os.path.abspath(self.outdir)\n else:\n err = 1\n err_m += \"\\nXtrapol8 output directory cannot be found.\" \\\n \"Please run this from the same directory from which you ran Xtrapol8.\"\n\n #Check the phil file for reciprocal space refinement\n if self.check_single_file(self.reciprocal_space_phil):\n self.reciprocal_space_phil = os.path.abspath(self.reciprocal_space_phil)\n else:\n self.reciprocal_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for reciprocal space refinement not found. Refinement will use default parameters.'\n\n\n #Check the phil file for real space refinement\n if self.check_single_file(self.real_space_phil):\n self.real_space_phil = os.path.abspath(self.real_space_phil)\n else:\n self.real_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for real space refinement not found. Refinement will use default parameters.'\n\n #Check the residue list for distance analysis\n if self.check_single_file(self.residue_list):\n self.residue_list = os.path.abspath(self.residue_list)\n else:\n self.residue_list = None\n warning = 1\n warning_m += '\\nResidue list not found. Distance analysis (if required) will be performed without residue list.'\n\n return err, err_m, warning, warning_m", "def prepare_zip_file(self):\n # need the following:\n # 1. readme\n # 2. cleaned features file\n # 3. gene map\n # 4. clean response file\n # 5. run.yml\n # 6. combined viz scores files\n # 7. all top_genes_per_phenotype* files\n # 8. network metadata\n zip_path = os.path.join(\\\n self.job_dir_path, 'download.zip')\n with ZipFile(zip_path, 'w', ZIP_DEFLATED) as zipout:\n\n zipout.write(\\\n '/zip_readmes/README-GP.txt', 'README-GP.txt')\n\n cleaned_features_path = os.path.join(\\\n self.userfiles_dir, self.features_file_relative_path)\n zipout.write(\\\n cleaned_features_path, 'clean_genomic_matrix.txt')\n\n gene_names_map_path = os.path.join(\\\n self.userfiles_dir, self.gene_names_map_relative_path)\n zipout.write(\\\n gene_names_map_path, 'gene_map.txt')\n\n cleaned_response_path = os.path.join(\\\n self.userfiles_dir, self.response_file_relative_path)\n zipout.write(\\\n cleaned_response_path, 'clean_phenotypic_matrix.txt')\n\n zipout.write(\\\n self.yml_path, 'run_params.yml')\n\n # combine viz files\n combined_viz_path = os.path.join(self.job_dir_path, \\\n 'combined_viz.tsv')\n with open(combined_viz_path, 'w') as combo:\n for fidx, fname in enumerate(sorted(self.get_response_files())):\n fpath = os.path.join(self.results_dir_path, fname)\n with open(fpath, 'r') as vizfile:\n for lidx, line in enumerate(vizfile):\n if lidx == 0 and fidx > 0:\n # only print the column labels once\n pass\n else:\n combo.write(line)\n zipout.write(combined_viz_path, 'genes_ranked_per_phenotype.txt')\n\n top_genes_files = [f for f in os.listdir(self.results_dir_path) \\\n if f.startswith('top_genes_per_phenotype')]\n if len(top_genes_files) == 1:\n top_genes_file_path = os.path.join(\\\n self.results_dir_path, top_genes_files[0])\n zipout.write(\\\n top_genes_file_path, 'top_genes_per_phenotype_matrix.txt')\n\n if self.gg_network_metadata_full_path is not None:\n zipout.write(self.gg_network_metadata_full_path, \\\n 'interaction_network.metadata')", "def main():\n\n print \"Starting tar-maker script..\"\n # String of files we're going to be looking for\n files=\"runlocaltests.py testprocess.py verifyfiles.mix cleanup_deploy.py hashes.dict upgrade_nodes.sh deploy_helper.py\"\n\n # TODO: add list of 'optional files' to include\n\n # get the files passed in as arguments\n files_from_args = ''\n # 1 skips this file name\n print\n \n for eachfile in range(1, len(sys.argv)):\n print \"Adding custom file: \"+sys.argv[eachfile]\n files_from_args+=' '+sys.argv[eachfile]\n print\n # mash the two strings together now\n files+=files_from_args\n\n # Total number of files split by spaces\n total_files=len(files.split(' '))\n\n # Counter for found files\n num_files_found=0\n\n # Temporary tar, incrementally we'll build it up\n # Will remove the temp files (since I use -update flag)\n # for building up the .tar\n if os.path.isfile('./deploy.tar.temp'):\n os.remove('./deploy.tar.temp')\n\n\n for filename in files.split(' '):\n print ' Looking for '+filename+' in '+os.getcwd()\n if os.path.isfile('./'+filename):\n print ' File found!'\n num_files_found += 1\n shellexec('tar -rf deploy.tar.temp '+filename)\n else:\n print ' WARNING: '+filename+' NOT FOUND'\n\n print\n print \"Found \"+str(num_files_found)+\" of \"+str(total_files)+\" necessary files.\"\n print\n\n # Did we find all of the files?\n if num_files_found == total_files:\n print\n print 'All files found, finishing tar..'\n # rename the file to the final name.\n # this will over-write current deploy.tar in the dir if one exists \n shellexec('mv deploy.tar.temp deploy.tar')\n return 0\n else:\n print 'FATAL ERROR: Not all the files where found, please check that '\n print ' this script is in the same directory as the files. '\n print\n print \"Cleaning up temp files...\"\n \n # remove deploy.tar.temp only if it exists.\n if os.path.isfile('./deploy.tar.temp'):\n os.remove('./deploy.tar.temp')\n \n print\n print 'Finished (with errors)'\n return 1", "def __init__(self):\n\n self.write_title = TitleWriter() # TITLE project title\n self.write_options = GeneralWriter() # OPTIONS analysis options\n self.write_report = ReportWriter() # REPORT output reporting instructions\n self.write_files = SectionWriter() # FILES interface file options\n self.write_files.SECTION_NAME = \"[FILES]\"\n self.write_files.section_type = Files\n self.write_backdrop = BackdropOptionsWriter() # BACKDROP bounding rectangle and file name of backdrop image\n self.write_map = MapOptionsWriter() # MAP map's bounding rectangle and units\n self.write_raingages = SectionWriterAsList(\"[RAINGAGES]\", RainGageWriter,\n \";;Name \\tFormat \\tInterval\\tSCF \\tSource \\n\"\n \";;--------------\\t---------\\t--------\\t--------\\t----------\")\n\n self.write_hydrographs = SectionWriterAsList(\"[HYDROGRAPHS]\", UnitHydrographWriter,\n \";;Hydrograph \\tRain Gage/Month \\tResponse\\tR \\tT \\tK \\tDmax \\tDrecov \\tDinit \\n\"\n \";;--------------\\t----------------\\t--------\\t--------\\t--------\\t--------\\t--------\\t--------\\t--------\")\n # unit hydrograph data used to construct RDII inflows\n\n self.write_evaporation = EvaporationWriter() # EVAPORATION evaporation data\n self.write_temperature = TemperatureWriter() # TEMPERATURE air temperature and snow melt data\n self.write_adjustments = AdjustmentsWriter() # ADJUSTMENTS monthly climate adjustments\n self.write_subcatchments = SectionWriterAsList(\"[SUBCATCHMENTS]\", SubcatchmentWriter,\n \";;Name \\tRain Gage \\tOutlet \\tArea \\t%Imperv \\tWidth \\t%Slope \\tCurbLen \\tSnowPack \\n\"\n \";;--------------\\t----------------\\t----------------\\t--------\\t--------\\t--------\\t--------\\t--------\\t----------------\")\n # basic subcatchment information\n\n self.write_subareas = SectionWriterAsList(\"[SUBAREAS]\", SubareaWriter,\n \";;Subcatchment \\tN-Imperv \\tN-Perv \\tS-Imperv \\tS-Perv \\tPctZero \\tRouteTo \\tPctRouted\\n\"\n \";;--------------\\t----------\\t------------\\t--------\\t----------\\t----------\\t----------\\t---------\")\n # subcatchment impervious/pervious sub-area data\n\n #self.write_infiltration = SectionWriterAsListOf(\"[INFILTRATION]\", SectionWriter, None)\n # write_infiltration is set in as_text based on the kind of infiltration being used in the project.\n\n self.write_lid_controls = SectionWriterAsList(\"[LID_CONTROLS]\", LIDControlWriter,\n \";;Name \\tType/Layer\\tParameters\\n\"\n \";;--------------\\t----------\\t----------\")\n # low impact development control information\n\n self.write_lid_usage = SectionWriterAsList(\"[LID_USAGE]\", LIDUsageWriter,\n \";;Subcatchment \\tLID Process \\tNumber \\tArea \\tWidth \\tInitSat \\tFromImp \\tToPerv \\tRptFile \\tDrainTo\\n\"\n \";;--------------\\t----------------\\t-------\\t----------\\t----------\\t----------\\t----------\\t----------\\t------------------------\\t----------------\")\n # assignment of LID controls to subcatchments\n\n self.write_aquifers = SectionWriterAsList(\"[AQUIFERS]\", AquiferWriter,\n \";;Aquifer \\tPhi \\tWP \\tFC \\tHydCon\\tKslope\\tTslope\\tUEF \\tLED \\tLGLR \\tBEL \\tWTEL \\tUZM \\tUEF Pat\\n\"\n \";;--------------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t-------\")\n # groundwater aquifer parameters\n\n self.write_groundwater = SectionWriterAsList(\"[GROUNDWATER]\", GroundwaterWriter,\n \";;Subcatchment \\tAquifer \\tNode \\tEsurf \\tA1 \\tB1 \\tA2 \\tB2 \\tA3 \\tDsw \\tEgwt \\tEbot \\tWgr \\tUmc \\n\"\n \";;--------------\\t----------------\\t----------------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\")\n # subcatchment groundwater parameters\n\n self.write_gwf = SectionWriterAsList(\"[GWF]\", GWFWriter,\n \";;Subcatchment \\tFlow \\tEquation\\n\"\n \";;-------------- \\t------- \\t--------\")\n # custom groundwater flow equations\n\n self.write_snowpacks = SectionWriterAsList(\"[SNOWPACKS]\", SnowPackWriter,\n \";;Name \\tSurface \\tParameters\\n\"\n \";;--------------\\t----------\\t----------\")\n # subcatchment snow pack parameters\n\n self.write_junctions = SectionWriterAsList(\"[JUNCTIONS]\", JunctionWriter,\n \";;Name \\tElevation \\tMaxDepth \\tInitDepth \\tSurDepth \\tAponded\\n\"\n \";;--------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # junction node information\n\n self.write_outfalls = SectionWriterAsList(\"[OUTFALLS]\", OutfallWriter,\n \";;Name \\tElevation \\tType \\tStage Data \\tGated \\tRoute To\\n\"\n \";;--------------\\t----------\\t----------\\t----------------\\t--------\\t----------------\")\n # outfall node information\n\n self.write_dividers = SectionWriterAsList(\"[DIVIDERS]\", DividerWriter,\n \";;Name \\tElevation \\tDiverted Link \\tType \\tParameters\\n\"\n \";;--------------\\t----------\\t----------------\\t----------\\t----------\")\n # flow divider node information\n\n self.write_storage = SectionWriterAsList(\"[STORAGE]\", StorageWriter,\n \";;Name \\tElev. \\tMaxDepth \\tInitDepth \\tShape \\tCurve Name/Params \\tN/A-Pond\\tFevap \\tPsi \\tKsat \\tIMD\\n\"\n \";;--------------\\t--------\\t----------\\t-----------\\t----------\\t----------------------------\\t--------\\t--------\\t--------\\t--------\\t--------\")\n # storage node information\n\n self.write_conduits = SectionWriterAsList(\"[CONDUITS]\", ConduitWriter,\n \";;Name \\tFrom Node \\tTo Node \\tLength \\tRoughness \\tInOffset \\tOutOffset \\tInitFlow \\tMaxFlow\\n\"\n \";;--------------\\t----------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # conduit link information\n\n self.write_pumps = SectionWriterAsList(\"[PUMPS]\", PumpWriter,\n \";;Name \\tFrom Node \\tTo Node \\tPump Curve \\tStatus \\tStartup \\tShutoff\\n\"\n \";;--------------\\t----------------\\t----------------\\t----------------\\t--------\\t--------\\t--------\")\n # pump link information\n\n self.write_orifices = SectionWriterAsList(\"[ORIFICES]\", OrificeWriter,\n \";;Name \\tFrom Node \\tTo Node \\tType \\tOffset \\tQcoeff \\tGated \\tCloseTime\\n\"\n \";;--------------\\t----------------\\t----------------\\t------------\\t----------\\t----------\\t--------\\t----------\")\n # orifice link information\n\n self.write_weirs = SectionWriterAsList(\"[WEIRS]\", WeirWriter,\n \";;Name \\tFrom Node \\tTo Node \\tType \\tCrestHt \\tQcoeff \\tGated \\tEndCon \\tEndCoeff \\tSurcharge \\tRoadWidth \\tRoadSurf \\tCoeff. Curve\\n\"\n \";;--------------\\t----------------\\t----------------\\t------------\\t----------\\t----------\\t--------\\t--------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # weir link information\n\n self.write_outlets = SectionWriterAsList(\"[OUTLETS]\", OutletWriter,\n \";;Name \\tFrom Node \\tTo Node \\tOffset \\tType \\tQTable/Qcoeff \\tQexpon \\tGated\\n\"\n \";;--------------\\t----------------\\t----------------\\t----------\\t---------------\\t----------------\\t----------\\t--------\")\n # outlet link information\n\n self.write_xsections = SectionWriterAsList(\"[XSECTIONS]\", CrossSectionWriter,\n \";;Link \\tShape \\tGeom1 \\tGeom2 \\tGeom3 \\tGeom4 \\tBarrels \\tCulvert \\n\"\n \";;--------------\\t------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # conduit, orifice, and weir cross-section geometry\n\n self.write_transects = TransectsWriter() # transect geometry for conduits with irregular cross-sections\n\n self.write_losses = SectionWriterAsList(\"[LOSSES]\", LossWriter,\n \";;Link \\tKentry \\tKexit \\tKavg \\tFlap Gate \\tSeepage \\n\"\n \";;--------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # conduit entrance/exit losses and flap valves\n\n self.write_controls = ControlWriter()\n # rules that control pump and regulator operation\n\n self.write_events = SectionWriterAsList(\"[EVENTS]\", EventsWriter,\n \";;Start Date \\tEnd Date\\n\")\n # events\n\n self.write_landuses = SectionWriterAsList(\"[LANDUSES]\", LanduseWriter,\n \";; \\tSweeping \\tFraction \\tLast\\n\"\n \";;Name \\tInterval \\tAvailable \\tSwept\\n\"\n \";;--------------\\t----------\\t----------\\t----------\")\n # land use categories\n\n self.write_buildup = SectionWriterAsList(\"[BUILDUP]\", BuildupWriter,\n \";;Land Use \\tPollutant \\tFunction \\tCoeff1 \\tCoeff2 \\tCoeff3 \\tPer Unit\\n\"\n \";;--------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # buildup functions for pollutants and land uses\n\n self.write_washoff = SectionWriterAsList(\"[WASHOFF]\", WashoffWriter,\n \";;Land Use \\tPollutant \\tFunction \\tCoeff1 \\tCoeff2 \\tSweepRmvl \\tBmpRmvl\\n\"\n \";;--------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # washoff functions for pollutants and land uses\n\n self.write_pollutants = SectionWriterAsList(\"[POLLUTANTS]\", PollutantWriter,\n \";;Name \\tUnits \\tCrain \\tCgw \\tCrdii \\tKdecay \\tSnowOnly \\tCo-Pollutant \\tCo-Frac \\tCdwf \\tCinit\\n\"\n \";;--------------\\t------\\t----------\\t----------\\t----------\\t----------\\t----------\\t----------------\\t----------\\t----------\\t----------\")\n # pollutant information\n\n self.write_coverages = CoveragesWriter() # COVERAGES # assignment of land uses to subcatchments\n self.write_treatment = SectionWriterAsList(\"[TREATMENT]\", TreatmentWriter,\n \";;Node \\tPollutant \\tFunction\\n\"\n \";;--------------\\t----------------\\t--------\")\n # pollutant removal functions at conveyance system nodes\n\n self.write_inflows = SectionWriterAsList(\"[INFLOWS]\", DirectInflowWriter,\n \";;Node \\tConstituent \\tTime Series \\tType \\tMfactor \\tSfactor \\tBaseline\\tPattern\\n\"\n \";;--------------\\t----------------\\t----------------\\t--------\\t--------\\t--------\\t--------\\t--------\")\n # INFLOWS # external hydrograph/pollutograph inflow at nodes\n\n self.write_dwf = SectionWriterAsList(\"[DWF]\", DryWeatherInflowWriter,\n \";;Node \\tConstituent \\tBaseline \\tPatterns \\n\"\n \";;--------------\\t----------------\\t----------\\t----------\")\n # baseline dry weather sanitary inflow at nodes\n\n self.write_patterns = SectionWriterAsList(\"[PATTERNS]\", PatternWriter,\n \";;Name \\tType \\tMultipliers\\n\"\n \";;--------------\\t----------\\t-----------\")\n # PATTERNS periodic variation in dry weather inflow\n\n self.write_rdii = SectionWriterAsList(\"[RDII]\", RDIInflowWriter,\n \";;Node \\tUnit Hydrograph \\tSewer Area\\n\"\n \";;--------------\\t----------------\\t----------\")\n # rainfall-dependent I/I information at nodes\n\n self.write_loadings = InitialLoadingsWriter()\n # initial pollutant loads on subcatchments\n\n self.write_curves = SectionWriterAsList(\"[CURVES]\", CurveWriter,\n \";;Name \\tType \\tX-Value \\tY-Value \\n\"\n \";;--------------\\t----------\\t----------\\t----------\")\n # CURVES x-y tabular data referenced in other sections\n\n self.write_timeseries = SectionWriterAsList(\"[TIMESERIES]\", TimeSeriesWriter,\n \";;Name \\tDate \\tTime \\tValue\\n\"\n \";;--------------\\t----------\\t----------\\t----------\")\n # time series data referenced in other sections\n\n self.write_labels = SectionWriterAsList(\"[LABELS]\", LabelWriter,\n \";;X-Coord \\tY-Coord \\tLabel\")\n # X, Y coordinates, text, and font details of labels", "def process(filename, exclude_dirs=['unittest','test','site-packages']):\n print(\"Generating {}\".format(filename))\n nb = 0\n nb_err = 0\n _main_root = os.path.dirname(filename)\n _VFS = {}\n for _mydir in (\"libs\", \"Lib\"):\n for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):\n #if _root.endswith('lib_migration'):\n _flag=False\n for _exclude in exclude_dirs:\n if _exclude in _root: #_root.endswith(_exclude):\n _flag=True\n continue\n if _flag:\n continue # skip these modules\n if '__pycache__' in _root:\n continue\n nb += 1\n\n for _file in _files:\n _ext = os.path.splitext(_file)[1]\n if _ext not in ('.js', '.py'):\n continue\n if re.match(r'^module\\d+\\..*$', _file):\n continue\n nb += 1\n\n file_name = os.path.join(_root, _file)\n _data = open(file_name, encoding='utf-8').read()\n \n if _ext == '.py':\n _data = python_minifier.minify(_data, preserve_lines=True)\n\n _vfs_filename = os.path.join(_root, _file).replace(_main_root, '')\n _vfs_filename = _vfs_filename.replace(\"\\\\\", \"/\")\n\n if _vfs_filename.startswith('/libs/crypto_js/rollups/'):\n if _file not in ('md5.js', 'sha1.js', 'sha3.js',\n 'sha224.js', 'sha384.js', 'sha512.js'):\n continue\n\n mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')\n mod_name, ext = os.path.splitext(mod_name)\n is_package = mod_name.endswith('__init__')\n if is_package:\n mod_name = mod_name[:-9]\n _VFS[mod_name] = [ext, _data, 1]\n else:\n _VFS[mod_name] = [ext, _data]\n print((\"adding %s %s\" % (mod_name, _vfs_filename)))\n print('%s files, %s errors' % (nb, nb_err))\n with open(filename, \"w\") as file_to_write_VFS:\n file_to_write_VFS.write('__BRYTHON__.use_VFS = true;\\n')\n file_to_write_VFS.write('__BRYTHON__.VFS=%s;\\n\\n' % json.dumps(_VFS))", "def do_merge_all():\n for rawd, merged in TOMERGE:\n mylogger.info(\"cleaning \" + merged)\n ensure_dir(merged)\n cleandir(merged)\n mylogger.info(\"merging \" + rawd + \" to \" + merged)\n build_merged_dir(build_sensor_file_map(rawd), merged)\n\n # add timestamp file\n\tf = open(TIMESTAMP_FILE,\"w\")\n\tf.write(str(datetime.datetime.now()))\n\tf.close()", "def writeFiles(self, directory = \"./\"):\n self.mass = []\n self.zero = 0\n self.natoms = self.numMonomer\n self.nangles = 0\n self.ndihedrals = 0\n\n self.ntypes = 4\n\n # set masses of all beads to be 1\n # in principle, the mass of counterions and salt ions should be smaller\n # expect this difference will no matter in terms of complexation of polyelectrolytes\n for i in range(self.ntypes):\n self.mass.append(1)\n\n\n\n self.bdtypes = 1\n self.angtypes = 0\n self.dihtypes = 0\n self.improtypes = 0\n\n iFileLammpsName = directory + \"data.pe.la{0}.na{1}.lc{2}.nc{3}.rho{4}.r{5}.lammps\".\\\n format(self.lenPa, self.numPa, self.lenPc, self.numPc, self.volRatio, self.chargeRepeat)\n iFileLammps = open(iFileLammpsName, 'w')\n\n iFileXYZName = directory + \"data.pe.la{0}.na{1}.lc{2}.nc{3}.rho{4}.r{5}.xyz\".\\\n format(self.lenPa, self.numPa, self.lenPc, self.numPc, self.volRatio, self.chargeRepeat)\n iFileXYZ = open(iFileXYZName, 'w' )\n\n iFileXYZ.write(\"{0}\\n\".format(self.natoms))\n iFileXYZ.write(\"data.polyelectrolyte.xyz\\n\")\n\n iFileLammpsHeader = \"data file for mixtures of charged polymer chains\\n\" + \\\n \"\\n\" + \\\n \"{0:10d} atoms\\n\".format(self.natoms) + \\\n \"{0:10d} bonds\\n\".format(self.numBonds) + \\\n \"{0:10d} angles\\n\".format(self.nangles) + \\\n \"{0:10d} dihedrals\\n\".format(self.ndihedrals) + \\\n \"{0:10d} impropers\\n\".format(self.zero) + \\\n \"\\n\" +\\\n \"{0:10d} atom types\\n\".format(self.ntypes) + \\\n \"{0:10d} bond types\\n\".format(self.bdtypes) + \\\n \"{0:10d} angle types\\n\".format(self.angtypes) + \\\n \"{0:10d} dihedral types\\n\".format(self.dihtypes) + \\\n \"{0:10d} improper types\\n\".format(self.improtypes) + \\\n \"\\n\" + \\\n \" {0:16.8f} {1:16.8f} xlo xhi\\n\".format(self.lx, self.hx) + \\\n \" {0:16.8f} {1:16.8f} ylo yhi\\n\".format(self.ly, self.hy) + \\\n \" {0:16.8f} {1:16.8f} zlo zhi\\n\".format(self.lz, self.hz) + \\\n \"\\n\" + \\\n \"Masses\\n\" + \\\n \"\\n\"\n\n iFileLammps.write(iFileLammpsHeader)\n for i in range(self.ntypes):\n iFileLammps.write( \"{0} {1:8.3f}\\n\".format(i+1, self.mass[i]))\n\n iFileLammps.write(\"\\nAtoms\\n\\n\")\n \n \n\n for i in range(self.natoms):\n if self.atomsType[i] == 1 or self.atomsType[i] == 3:\n iFileXYZ.write(\"S {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 2:\n iFileXYZ.write(\"P {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 4:\n iFileXYZ.write(\"N {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 5:\n iFileXYZ.write(\"A {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 6:\n iFileXYZ.write(\"C {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 7:\n iFileXYZ.write(\"I {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 8:\n iFileXYZ.write(\"K {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n\n iFileLammps.write(\"{0} {1} {2} {3} {4} {5} {6}\\n\".format(i+1, \\\n self.molId[i], \\\n self.atomsType[i], \\\n self.atomsCharge[i], \\\n self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n\n iFileLammps.write(\"\\nBonds\\n\\n\")\n for i in range(self.numBonds):\n iFileLammps.write(\"{0} 1 {1} {2}\\n\".format(i+1, self.bondList[i][0], self.bondList[i][1]))\n\n iFileXYZ.close()\n iFileLammps.close()", "def test_all(self):\n for fixture in get_fixtures(self.fixtures):\n print('Testing instrumenter with ' + fixture)\n res = self.run_compilation(fixture)\n if res.returncode != 0:\n out = res.stderr.decode('utf-8')\n out_path = 'instrumenter_errors.txt'\n with open(out_path, 'w') as error_fd:\n error_fd.write(out)\n raise self.failureException(out)", "def output_files_as_file(output_files, output_type: str = \"svg\", debug=False):\n for counter, output_file in enumerate(output_files):\n plant_uml_command = 'java -Djava.awt.headless=true -jar \"{0}\" \"{1}\"'.format(plant_uml_jar, output_file)\n if debug:\n plant_uml_command = '{0} -v'.format(plant_uml_command)\n generate_svg = '{0} -t{1}'.format(plant_uml_command, output_type)\n try:\n logging.debug('Generating {3} diagram {1}/{2}: {0}'.format(\n generate_svg,\n counter + 1,\n len(output_files),\n output_type.upper()))\n os.system(generate_svg)\n except:\n logging.debug('Could not generate {0} diagram'.format(output_type))\n traceback.print_exc()", "def fixity_checker_report(observations, outputdir):\n logging.debug(\"{0}, {1}\".format(observations, outputdir))\n shards = defaultdict(dict)\n _mkdir(outputdir)\n # sort into bins for transport\n for key, value in six.iteritems(observations):\n # first two characters are the key to the \"shard\"\n shards[key[:2]].update({key: value})\n # write out json for each bin\n for key, value in six.iteritems(shards):\n out = os.path.join(outputdir, ''.join([key, '.json']))\n with open(out, 'w') as outfile:\n json.dump(shards[key], outfile, sort_keys=True,\n indent=4, separators=(',', ': '))\n del shards", "def get_output_files(self, action):\n assert action == \"run\"\n for ext in self.extensions:\n yield ext[1:].replace(\".\", \"_\"), self.base_path_out.format(mapper=self.name, ext=ext)\n for ext in (\".bamstats.html\", \".bamstats.txt\", \".flagstats.txt\", \".idxstats.txt\"):\n path = (\n \"work/{mapper}.{{library_name}}/report/bam_qc/\" \"{mapper}.{{library_name}}.bam{ext}\"\n ).format(mapper=self.name, ext=ext)\n yield \"report_\" + \".\".join(ext.split(\".\")[1:3]).replace(\".\", \"_\"), path\n for ext in (\n \".bamstats.html.md5\",\n \".bamstats.txt.md5\",\n \".flagstats.txt.md5\",\n \".idxstats.txt.md5\",\n ):\n path = (\n \"work/{mapper}.{{library_name}}/report/bam_qc/\" \"{mapper}.{{library_name}}.bam{ext}\"\n ).format(mapper=self.name, ext=ext)\n yield \"report_\" + \".\".join(ext.split(\".\")[1:3]).replace(\".\", \"_\") + \"_md5\", path", "def mkdirout():\n #pdbid=os.path.splitext(os.path.basename(PDB_PATH))[0]\n #outdir = os.path.join(OUTPUT_DIR, pdbid(),\"\") # OUTPUT DIRECTORY WHERE OUTPUT FILES WILL GO\n\n if os.path.exists(output_dir()):\n sys.exit(\"ERROR. Unable to create output directory. %s already exists. Please, make sure you choose an output path not containing former results.\" % output_dir() ) # LOGGING?\n else:\n try:\n os.mkdir(output_dir())\n except OSError:\n sys.exit(\"ERROR. Unable to create output directory %s.\" % output_dir() )\n os.mkdir(output_tmpdir())\n os.mkdir(output_tmpdir(\"pisacov\"))\n os.mkdir(output_tmpdir(\"pisa\"))\n os.mkdir(output_tmpdir(\"deepmetapsicov\"))", "def process_svgs(model_info, directory, output, allow_missing=False):\n r2dt.write(model_info, directory, output, allow_missing=allow_missing)", "def build(self) -> None:\n\n print(\"Genereting files..\")\n self.doc = self.doc + r'\\end{document}'\n\n f = open(\"latex\\\\\" + self.report_name + '.tex', 'w')\n f.write(self.doc)\n f.close()\n\n os.chdir('latex')\n\n cmd = ['pdflatex', '-interaction', 'nonstopmode', self.report_name + '.tex']\n #cmd = ['pdflatex', '-interaction', self.report_name + '.tex']\n\n for i in range(2):\n proc = subprocess.Popen(cmd)\n proc.communicate()\n retcode = proc.returncode\n if not retcode == 0:\n os.chdir('..')\n raise ValueError('Error {} executing command: {}'.format(retcode, ' '.join(cmd)))\n\n os.unlink(self.report_name + '.aux')\n os.unlink(self.report_name + '.lof')\n os.unlink(self.report_name + '.log')\n os.unlink(self.report_name + '.lot')\n os.unlink(self.report_name + '.out')\n os.unlink(self.report_name + '.toc')\n\n os.chdir('..')", "def make_stats_files(sample_dc, otu_dc, degree_counts, num_con_cat, num_con,\r\n num_cat, cat_by_sample, dir_path):\r\n output = open(os.path.join(dir_path,\r\n \"stats/real_dc_sample_degree.txt\"), 'w')\r\n sample_dc_out = sorted(sample_dc.items())\r\n sample_dc_str = '\\n'.join(['\\t'.join(map(str, t)) for t in sample_dc_out])\r\n output.write(''.join([\"# Just Sample degree counts\\n\",\r\n \"Degree\tSample Count\\n\", sample_dc_str]))\r\n output.close()\r\n\r\n output = open(os.path.join(dir_path,\r\n \"stats/real_dc_otu_degree.txt\"), 'w')\r\n otu_dc_out = sorted(otu_dc.items())\r\n otu_dc_str = '\\n'.join(['\\t'.join(map(str, t)) for t in otu_dc_out])\r\n output.write(''.join([\"# Just OTU degree counts\\n\",\r\n \"Degree\tOTU Count\\n\", otu_dc_str]))\r\n output.close()\r\n\r\n output = open(os.path.join(dir_path,\r\n \"stats/real_dc_sample_otu_degree.txt\"), 'w')\r\n dc_out = sorted(degree_counts.items())\r\n dc_str = '\\n'.join(['\\t'.join(map(str, t)) for t in dc_out])\r\n output.write(''.join([\"# Sample and OTU degree counts\\n\",\r\n \"Degree\tBoth Count \\n\", dc_str]))\r\n output.close()\r\n\r\n num_pairs = len(cat_by_sample) * (len(cat_by_sample) - 1) / 2\r\n\r\n num_pairs_line = \"NUM PAIRS: %s\" % str(num_pairs)\r\n num_cat_pairs_line = \"NUM SAME CAT PAIRS: %s\"\r\n num_con_pairs_line = \"NUM CONNECTED PAIRS: %s\" % int(num_con)\r\n\r\n for cat, num in num_con_cat.items():\r\n filename = \"stats/real_cat_stats_%s.txt\" % cat\r\n output = open(os.path.join(dir_path, filename), 'w')\r\n num_neither = int((num_pairs - num_con) - (num_cat[cat] - num))\r\n stats_line = ''.join(['(', str(int(num)), ', ', str(int(num_con - num)),\r\n ', ', str(int(num_cat[cat] - num)), ', ',\r\n str(num_neither), ')'])\r\n G_stat = G_2_by_2(int(num), int(num_con - num),\r\n int(num_cat[cat] - num), num_neither)\r\n output.write(\r\n '\\n'.join([num_pairs_line,\r\n num_cat_pairs_line % num_cat[\r\n cat],\r\n num_con_pairs_line,\r\n stats_line,\r\n str(G_stat)]))\r\n output.close()", "def output_files(self):\n # Output file for Moller generation\n if 'moller' in self.name:\n return ['moller.stdhep']\n # Output file for beam generation\n return ['beam.stdhep']", "def dump(self):\n self.hasher.update_time_dicts() # Makes the time measurements available\n\n print(\" Creating a results folder in {} and storing all results there.\".format(self.config.output_dir))\n if not os.path.isdir(self.config.output_dir):\n os.mkdir(self.config.output_dir)\n\n print(\" Dumping profile ...\")\n profile_file_name = \"{}_{}_profile\".format(self.name, self.config.mode)\n with open(os.path.join(self.config.output_dir, profile_file_name), \"a\") as file:\n profile = {\"config\": self.config.dump(),\n \"hash\": self.hasher.hash_time_dict,\n \"find\": self.hasher.find_time_dict}\n\n json.dump(profile, file)\n\n print(\" Dumping matches ...\")\n for i, match in enumerate(self.__matched_offsets):\n if int(match[0] > match[1]):\n offset_a = match[1]\n offset_b = match[0]\n else:\n offset_a = match[0]\n offset_b = match[1]\n\n match_file_name = \"{}_{}_{}_{}\".format(self.name, self.config.mode, offset_a, offset_b)\n with open(os.path.join(self.config.output_dir, match_file_name), \"w\") as file:\n infos = \"Config:\\n: {}\".format(self.config)\n text_a = \"\"\n text_b = \"\"\n if self.config.dump_text:\n text_a = \"Text:\\n{}\".format(self.__offset_text_map.get(offset_a))\n text_b = \"Text:\\n{}\".format(self.__offset_text_map.get(offset_b))\n\n file.write(\"{}\\n\\n{}\\n\\n{}\\n\\n{}\".format(infos, text_a, \"#\"*25, text_b))\n\n if self.config.dump_graph:\n print(\" Creating graphs ...\")\n x1, x2 = list(), list()\n y1, y2 = list(), list()\n t_all = 0\n for element, t in self.hasher.hash_time_dict.items():\n t_all += t\n x1.append(element)\n y1.append(t_all)\n\n t_all = 0\n for element, t in self.hasher.find_time_dict.items():\n t_all += t\n x2.append(element)\n y2.append(t_all)\n\n self.__plot(os.path.join(self.config.output_dir, \"hash_time\"), x1, y1)\n self.__plot(os.path.join(self.config.output_dir, \"find_time\"), x2, y2)\n\n print(\"\\n\\n\")\n\n return", "def writeFileBloatReport(f, baselineName, buildName):\n logging.info('Running bloaty diff between %s and %s', baselineName, buildName)\n f.write('Comparing %s and %s:\\n\\n' % (baselineName, buildName))\n\n result = subprocess.run(\n ['bloaty', '--csv', buildName, '--', baselineName],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n\n if result.returncode != 0:\n logging.warning('Bloaty execution failed: %d', result.returncode)\n f.write('BLOAT EXECUTION FAILED WITH CODE %d:\\n' % result.returncode)\n\n content = result.stdout.decode('utf8')\n\n f.write(content)\n f.write('\\n')\n\n result = ComparisonResult(os.path.basename(buildName))\n try:\n reader = csv.reader(io.StringIO(content))\n\n for row in reader:\n section, vm, f = row\n if (section in SECTIONS_TO_WATCH) or (vm not in ['0', 'vmsize']):\n result.sectionChanges.append(SectionChange(section, int(f), int(vm)))\n except:\n pass\n\n return result", "def main():\n if not os.path.exists(testcase.TestCase.dir_results):\n os.makedirs(testcase.TestCase.dir_results)\n if env.get('DEBUG').lower() == 'true':\n logging.config.fileConfig(config.get_xtesting_config(\n 'logging.debug.ini', constants.DEBUG_INI_PATH_DEFAULT))\n else:\n logging.config.fileConfig(config.get_xtesting_config(\n 'logging.ini', constants.INI_PATH_DEFAULT))\n logging.captureWarnings(True)\n os.chdir(testcase.TestCase.dir_results)\n Campaign.zip_campaign_files()", "def print_analysis(self,version,results,tests,test_details,test_order,\n output_dir,diffs_only):\n def format_result(r):\n return '%s %s' % (r.outcome,r.get_cause())\n\n main_template = makolookup.get_template(\"main.mako\")\n detail_template = makolookup.get_template(\"detail.mako\")\n\n f = open(os.path.join(output_dir,'index.html'),'w')\n try:\n f.write(main_template.render(version=version,results=results,tests=tests,\n test_details=test_details,test_order=test_order,\n time2datetime=time2datetime))\n finally:\n f.close()\n\n for test_id,test_detail in test_details.items():\n #print ('Detail: %s' % test_id)\n f = open(os.path.join(output_dir,test_id+'.html'),'w')\n try:\n f.write(detail_template.render(version=version,test_id=test_id,\n test_detail=test_detail,diffs_only=diffs_only))\n except:\n f.write(\"Error while processing output.\")\n finally:\n f.close()", "def write_to_files():\n\t# Create output files\n\toutput = [None, \\\n\t\t open(\"priority-1.txt\", \"w\"), \\\n\t\t open(\"priority-2.txt\", \"w\"), \\\n\t\t open(\"priority-3.txt\", \"w\"), \\\n\t\t open(\"priority-4.txt\", \"w\"), \\\n\t\t open(\"priority-5.txt\", \"w\"), ]\n\n\t# Loop over all fields and write them to the correct file\n\tfor field in sorted(reportlog.keys()):\n\t\tpriority = reportlog[field]['priority']\n\t\tlabel = reportlog[field]['label']\n\n\t\toutput[priority].write(\"intphas_%s\\t%s\\n\" % (field, label))\n\t\toutput[priority].flush()\n\n\t# Close files\n\tfor i in [1,2,3,4,5]:\n\t\toutput[i].close()", "def main(outdir, outfile, infile, logfile, verbose):\n\n error_ctr = 0\n if infile is None:\n print(Fore.RED + \"--infile was not specified\")\n print(Style.RESET_ALL + '', end='')\n error_ctr += 1\n\n if error_ctr > 0:\n print(Fore.RED + \"Required command-line arguments were not specified\")\n print(Style.RESET_ALL + '', end='')\n sys.exit(1)\n \n assert isinstance(infile, str)\n\n if not os.path.exists(infile):\n print(Fore.RED + \"'{}' does not exist\".format(infile))\n print(Style.RESET_ALL + '', end='')\n sys.exit(1)\n \n if verbose is None:\n verbose = DEFAULT_VERBOSE\n print(Fore.YELLOW + \"--verbose was not specified and therefore was set to default '{}'\".format(verbose))\n print(Style.RESET_ALL + '', end='')\n\n global g_verbose\n g_verbose = verbose\n\n if outdir is None:\n outdir = DEFAULT_OUTDIR\n print(Fore.YELLOW + \"--outdir was not specified and therefore was set to default '{}'\".format(outdir))\n print(Style.RESET_ALL + '', end='')\n\n assert isinstance(outdir, str)\n\n if not os.path.exists(outdir):\n pathlib.Path(outdir).mkdir(parents=True, exist_ok=True)\n print(Fore.YELLOW + \"Created output directory '{}'\".format(outdir))\n print(Style.RESET_ALL + '', end='')\n\n if logfile is None:\n logfile = outdir + '/' + os.path.basename(__file__) + '.log'\n print(Fore.YELLOW + \"--logfile was not specified and therefore was set to '{}'\".format(logfile))\n print(Style.RESET_ALL + '', end='')\n\n assert isinstance(logfile, str)\n\n if outfile is None:\n outfile = outdir + '/' + os.path.basename(__file__) + '.txt'\n print(Fore.YELLOW + \"--outfile was not specified and therefore was set to '{}'\".format(outfile))\n print(Style.RESET_ALL + '', end='')\n\n assert isinstance(outfile, str)\n\n\n logging.basicConfig(filename=logfile, format=LOGGING_FORMAT, level=LOG_LEVEL)\n\n derive_terms_and_comments(infile, outfile)", "def make_qa_report(metadata, base_dir, write_tag):\n # Change directory as QA code writes output directly to the running directory\n work_dir = os.getcwd()\n\n filenames = metadata['FITSImageFilename']\n for i, fits_file in enumerate(filenames):\n pb_dir = _productdir(metadata, base_dir, i, '_PB', write_tag)\n pb_filebase = os.path.splitext(fits_file)[0] + '_PB'\n\n log.info('Write QA report output')\n os.chdir(pb_dir)\n pb_fits = os.path.join(pb_dir, pb_filebase + FITS_EXT)\n command = '/home/kat/valid/Radio_continuum_validation -I {} --telescope MeerKAT -F'\\\n ' /home/kat/valid/filter_config_MeerKAT.txt -r'.format(pb_fits)\n sysarg = shlex.split(command)\n with log_qa(log):\n rcv.main(sysarg[0], sysarg[1:])\n os.chdir(work_dir)", "def _open_output_files(self):\n self.links_outfile = open(self.opts.links_outfile, 'wb')", "def write_scram_toolfiles(self):\n from string import Template\n\n mkdirp(join_path(self.spec.prefix.etc, 'scram.d'))\n\n values = {}\n values['VER'] = self.spec.version\n values['PFX'] = self.spec.prefix\n\n fname = 'uuid-cms.xml'\n template = Template(\"\"\"<tool name=\"uuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)\n\n fname = 'libuuid.xml'\n template = Template(\"\"\"<tool name=\"libuuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)", "def format_preparation_files(run_dir, sample_sheet, output_dir, pipeline,\n verbose):\n sample_sheet = KLSampleSheet(sample_sheet)\n df_sheet = sample_sheet_to_dataframe(sample_sheet)\n\n if pipeline == 'atropos-and-bowtie2':\n click.echo('Stats collection is not supported for pipeline '\n 'atropos-and-bowtie2')\n else:\n stats = run_counts(run_dir, sample_sheet)\n\n stats['sample_name'] = \\\n df_sheet.set_index('lane', append=True)['sample_name']\n\n # returns a map of (run, project_name, lane) -> preparation frame\n preps = preparations_for_run(run_dir, df_sheet, pipeline=pipeline)\n\n os.makedirs(output_dir, exist_ok=True)\n\n for (run, project, lane), df in preps.items():\n fp = os.path.join(output_dir, f'{run}.{project}.{lane}.tsv')\n\n if pipeline == 'fastp-and-minimap2':\n # stats are indexed by sample name and lane, lane is the first\n # level index. When merging, make sure to select the lane subset\n # that we care about, otherwise we'll end up with repeated rows\n df = df.merge(stats.xs(lane, level=1), how='left',\n on='sample_name')\n\n # strip qiita_id from project names in sample_project column\n df['sample_project'] = df['sample_project'].map(\n lambda x: re.sub(r'_\\d+$', r'', x))\n\n # center_project_name is a legacy column that should mirror\n # the values for sample_project.\n df['center_project_name'] = df['sample_project']\n\n df.to_csv(fp, sep='\\t', index=False)\n\n if verbose:\n project_name = remove_qiita_id(project)\n # assume qiita_id is extractable and is an integer, given that\n # we have already passed error-checking.\n qiita_id = project.replace(project_name + '_', '')\n print(\"%s\\t%s\" % (qiita_id, abspath(fp)))", "def main(files, target_depth, tol_diff, depth_fld, temperature_fld,\n salinity_fld):\n ymd_fmt = \"{0}-{1}-{2}\"\n # Note change of line terminator to make it compatible with AWK\n writer = csv.writer(sys.stdout, lineterminator='\\n')\n writer.writerow([\"time\", \"bottle_number\", \"depth\",\n \"temperature\", \"salinity\"])\n for bottle_f in files:\n with bottle_f:\n for lineno, line in enumerate(bottle_f, start=1):\n if re.search('^(\\*|#)', line):\n continue\n if re.search('^ *bottle', line.lower()):\n databeg = lineno + 2\n continue\n if (lineno - databeg) % 2 == 0:\n parsedl = line.split()\n mm = _french_month(parsedl[1])\n if mm:\n fmt = '%Y-%m-%d %H:%M:%S'\n yyyymmdd = ymd_fmt.format(parsedl[3], mm, parsedl[2])\n else:\n fmt = '%Y-%b-%d %H:%M:%S'\n yyyymmdd = ymd_fmt.format(parsedl[3], parsedl[1],\n parsedl[2])\n bottle_no = parsedl[0]\n depth = parsedl[depth_fld - 1]\n temperature = parsedl[temperature_fld - 1]\n salinity = parsedl[salinity_fld - 1]\n else:\n if lineno <= databeg:\n continue\n hhmmss = line.split()[0]\n ymd = datetime.strptime(\"{0} {1}\".format(yyyymmdd,\n hhmmss), fmt)\n depth_dev = abs(float(depth) - target_depth)\n if (depth_dev < tol_diff):\n line_out = (ymd.strftime('%Y-%m-%d %H:%M:%S'),\n bottle_no, depth, temperature,\n salinity)\n writer.writerow(line_out)", "def log_file1D(fast5_data , basecall_stat):\n\n version, flowcell_id, hostname, numMinion, run_id = fast5_data\n\n #Retrieve the dataframe with statitstics such as the quartile or std\n #Retrieve the dictionary from albacore summary log\n\n num_called_template, mean_qscore_template = basecall_stat.stat_generation()\n\n counter_template, total_nucleotide_template = basecall_stat.counter()\n\n occupancy_pore = basecall_stat.occupancy_pore()\n\n completeName = os.path.join('/home/ferrato/Documents/fast5', \"fichier_aozan.txt\")\n\n with open(completeName, 'w') as file_data:\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"num.called.template.{}={}\\n\".format(index, element))\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"mean.qscore.template.{}={}\\n\".format(index, element))\n\n for nucleotide, count in counter_template.items():\n file_data.write(\"nucleotide.{}.template={}\\n\".format(nucleotide,count))\n if nucleotide == 'total':\n continue\n calcul = float(count) / float(total_nucleotide_template)\n file_data.write(\"nucleotide.{}.proportion={}\\n\".format(nucleotide, calcul))\n\n\n file_data.write(\"total.number.of.sequence={}\\n\".format(basecall_stat.fast5_tot))\n\n for index, value in occupancy_pore.items():\n file_data.write(\"pore.occupancy.{}={}\\n\".format(index, value))\n\n\n file_data.write(\"flowcell.serial.number={}\\n\".format(flowcell_id))\n file_data.write(\"minknown.version={}\\n\".format(version))\n file_data.write(\"hostname={}\\n\".format(hostname))\n file_data.write(\"minion.serial.number={}\\n\".format(numMinion))\n file_data.write((\"run.id={}\\n\".format(run_id)))\n\n for index, element in basecall_stat.statistics_read_size().iteritems():\n file_data.write(\"Read.fastq.length.{}={}\\n\".format(index, element))", "def _test_output_created(self):\n TestHarness._test_output_created(self)\n source = glob.glob(os.path.join(os.getcwd(), 'source.*'))\n assert len(source) == 1, 'Either multiple or no source files ' \\\n 'exist.'\n assert source[0].endswith('h5'), \\\n 'Source file is not a HDF5 file.'", "def _reportFileAnalytics(self, sourceFiles, outputFile, language):\n \n #is this a single file or a set of files?\n bSingleFile = len(sourceFiles) == 1\n \n #open the output file for appending\n f=self.openFile(outputFile, \"a\") #open for appending\n f.write ('<font face=\"verdana\" color=\"' + AutoGrader.Const.HEADER_COLOR1 + '\">')\n f.write ('<br>\\n=======================================================<br>\\n')\n if bSingleFile:\n f.write(sourceFiles[0]) #if this is a single file, simply output its name\n else: #if these are multiple files, list the directory name in bold\n f.write('<b>' + os.path.split(sourceFiles[0])[0] + '</b>') #directory name in bold\n f.write ('<br>\\n=======================================================<br>\\n</font>')\n\n #for each file, report the analytics\n for sourceFile in sourceFiles:\n if bSingleFile == False: #only print the filename if we have more than 1 file in the list\n f.write ('<font face=\"verdana\" color=\"' + AutoGrader.Const.HEADER_COLOR1 + '\">')\n f.write(os.path.split(sourceFile)[1] + '</font><br>\\n')\n \n if language == 'C++':\n numLines, numComments = self.analyzeCppCode(sourceFile)\n f.write ('<font face=\"courier\" color=\"' + AutoGrader.Const.ANALYTICS_COLOR1 + '\">Code Lines: ' + str(numLines))\n f.write ('<br>\\n~#Comments: ' + str(numComments) + '<br>\\n')\n \n if language == 'Python':\n numLines, numDocStr, numComments, numDefs, numClasses = self.analyzePythonCode(sourceFile)\n f.write ('<font face=\"courier\" color=\"' + AutoGrader.Const.ANALYTICS_COLOR1 + '\">Code Lines: ' + str(numLines))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#Functions: ' + str(numDefs))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#Classes: ' + str(numClasses))\n f.write ('<br>\\n~#Comments: ' + str(numComments))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#DocStrs: ' + str(numDocStr) + '<br>\\n')\n \n f.write('</font><br>') #skip a line between entries\n f.close()", "def main():\n parser = argparse.ArgumentParser(description='Create packaged set of modulefiles for deployment on OASIS.')\n parser.add_argument('--location', dest='location', default=None,\n help='Location directory to place files in')\n parser.add_argument('--tarfile', dest='tarfile', default=None,\n help='Name of tarfile to generate')\n args = parser.parse_args(sys.argv[1:])\n if args.location is None:\n args.location = tempfile.mkdtemp()\n elif os.path.exists(args.location):\n overwrite = raw_input(\"{0} exists, overwrite? \".format(args.location))\n if overwrite.lower().strip() != 'y':\n sys.stderr.write(\"Exiting...\")\n sys.exit(0)\n shutil.rmtree(args.location)\n os.mkdir(args.location)\n else:\n os.mkdir(args.location)\n location = checkout_repo(args.location) \n if location is None:\n sys.stderr.write(\"Can't checkout modulefiles to {0}!\\n\".format(args.location))\n package_files(location)\n if args.tarfile is None:\n args.tarfile = \"/tmp/moduleupdate.tar.gz\"\n if tar_files(location, args.tarfile) is None:\n sys.stderr.write(\"Error generating tarfile, exiting\\n\")\n sys.exit(1)\n shutil.rmtree(location)\n sys.stdout.write(\"Packaged files located at {0}\\n\".format(args.tarfile))", "def finalize(self):\n with runez.Anchored(self.folder):\n runez.ensure_folder(self.build)\n CFG.set_base(self.build)\n pspec = PackageSpec(CFG, specced(self.package_name, self.package_version))\n exes = PACKAGER.package(pspec, self.build, runez.resolved_path(self.dist), self.requirements)\n if exes:\n report = PrettyTable([\"Executable\", self.sanity_check], border=self.border)\n report.header.style = \"bold\"\n if not self.sanity_check:\n report.header[1].shown = False\n\n for exe in exes:\n exe_info = None\n if self.sanity_check:\n r = runez.run(exe, self.sanity_check)\n exe_info = r.output or r.error\n\n report.add_row(runez.quoted(exe), exe_info)\n if self.symlink and exe and self.root:\n self.symlink.apply(exe, self.root)\n\n return report" ]
[ "0.6348986", "0.62728393", "0.6065827", "0.6017353", "0.6005254", "0.6003141", "0.5832814", "0.57835436", "0.5780004", "0.57281613", "0.57143897", "0.57073367", "0.5691607", "0.566522", "0.55969507", "0.55673474", "0.5560966", "0.55485785", "0.5524467", "0.5479293", "0.5468246", "0.5461302", "0.5449692", "0.54496664", "0.5447127", "0.5442134", "0.54200333", "0.5418777", "0.53943086", "0.5390904", "0.538892", "0.53828377", "0.53811", "0.53684026", "0.5363767", "0.5363483", "0.53625345", "0.5338162", "0.53349155", "0.5328145", "0.5319388", "0.53173035", "0.5314586", "0.5313175", "0.5310048", "0.5303101", "0.5295672", "0.5294945", "0.5279631", "0.5276006", "0.5266051", "0.5264104", "0.5263004", "0.52599436", "0.5255827", "0.5252761", "0.5234696", "0.5234043", "0.52211094", "0.52148277", "0.52109534", "0.5209101", "0.52062225", "0.52041584", "0.52035284", "0.5197568", "0.51968855", "0.5187511", "0.51867384", "0.51831627", "0.5182113", "0.5181749", "0.51816964", "0.51814085", "0.5179488", "0.5175674", "0.51753694", "0.5174312", "0.5169314", "0.51683706", "0.5165631", "0.51643044", "0.5158218", "0.51569873", "0.51542723", "0.5149371", "0.5148324", "0.51476574", "0.5145627", "0.51452416", "0.51434857", "0.51433045", "0.51430374", "0.5142193", "0.51394707", "0.51392084", "0.5135748", "0.5135449", "0.513525", "0.5131956" ]
0.53366596
38
Ensure this function returns the correct number of entities with the specified tag.
def test_get_sets_by_category(): group_categories = get_sets_by_category(mb, "Group") assert len(group_categories) == 5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_count(self, tag: Text) -> int:\r\n sub_tags = tag.split(\"+\")\r\n return len([e for e in self.elements if all(t in e.tags for t in sub_tags)])", "def getTagsNum(self):\r\n self.gettags()", "def count_tags():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_total_tags()\r\n trans.commit()", "def get_count(self):\n return len(self._tags)", "def test_tags_top_100_count(self):\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n tags = po.get_top_100_tags()\n assert len(tags) <= 100, \\\n \"# tags is %s, which is greater than 100\" % (len(tags))", "def test_task_count_tags(self):\r\n tasks.count_tags()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.TAG_CT)\r\n self.assertEqual(stat.data, 4)", "def test_tag_count_matches_tagged_items(self):\n\n po = self.catalog.load_pageobject('TagsBrowsePage')\n po.goto_page()\n po.form.footer.display_limit('All')\n tags_browse_url = po.current_url()\n\n po2 = self.catalog.load_pageobject('TagsViewPage')\n\n for row in po.search_result_rows():\n tag_info = row.value()\n\n self.browser.proxy_client.new_har(\"page\")\n row.goto_tag()\n har_entry = self.browser.page_load_details()\n\n tags_view_url = po2.current_url()\n\n # check for errors loading the page\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"clicking on the tag '%s' on '%s' \" \\\n % (tag_info['name'],tags_browse_url) + \\\n \"returned an error response code on the page '%s'. \" \\\n % (tags_view_url) + \\\n \"http archive follows:\\n%s\" \\\n % (pprint.pformat(har_entry))\n\n # get the total number of resources\n (junk,junk,total) = po2.get_pagination_counts()\n\n # compare the total number of resources\n # with the count provided by the tag\n assert tag_info['count'] == total, \\\n \"The number of resources listed for the\" \\\n + \" tag '%s' (%s) on %s does not match the total\" \\\n % (tag_info['name'],tag_info['count'],tags_browse_url) \\\n + \" number of resources listed on %s (%s)\" \\\n % (tags_view_url,total)\n\n # go back to the Tags page\n self.browser._browser.back()", "def test_tags_recently_used_count(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n tags = po.get_recently_used_tags()\n assert len(tags) <= 25, \\\n \"# tags is %s, which is greater than 25\" % (len(tags))", "def all_tags_sized(cls):\n tag_counts = memcache.get('all_tags_sized')\n if tag_counts is None:\n tag_counts = defaultdict(int)\n\n plaques = Plaque.query().filter(Plaque.approved == True).fetch()\n for plaque in plaques:\n for t in plaque.tags:\n tag_counts[t] += 1\n\n tag_fontsize = {}\n for tag, count in tag_counts.items():\n if count < 5:\n tag_fontsize[tag] = 10\n elif count < 10:\n tag_fontsize[tag] = 13\n elif count < 20:\n tag_fontsize[tag] = 16\n elif count < 40:\n tag_fontsize[tag] = 19\n elif count < 120:\n tag_fontsize[tag] = 22\n else:\n tag_fontsize[tag] = 25\n memcache_status = memcache.set('all_tags_sized', tag_fontsize)\n if not memcache_status:\n logging.debug(\"memcaching for all_tags_sized failed\")\n else:\n logging.debug(\"memcache.get worked for all_tags_sized\")\n\n return tag_counts", "def __len__(self):\n return len(self._tagged)", "def entity_counts(doc):\n \n tags = []\n for token in doc.ents:\n tags.append(token.label_)\n frequency = dict(Counter(tags).most_common())\n\n return frequency", "async def totalImages(self, tags):\n with async_timeout.timeout(10):\n url = self.urlGen(tags=tags, PID=0)\n async with self.session.get(url=url) as XMLData:\n XMLData = await XMLData.read()\n XMLData = ET.XML(XMLData)\n XML = self.ParseXML(XMLData)\n return int(XML['posts']['@count'])\n return None", "def count_tags(tag_events):\n tagged_lines = []\n for tag_event in tag_events:\n for tag in tag_event[1][\"tag\"][\"labels\"]:\n tagged_lines.append(tag)\n tag_counts = Counter(tagged_lines)\n return tag_counts", "def tag_count(self, tag):\n return sum(self._out_counts.get(tag, {}).values())", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def get_entities(tags):\n pass", "def getTags(number=None):", "def N_genes_in_dataset(self):\n return len(self.all_genes_in_dataset)", "def count_total_tags():\r\n total = TagMgr.count()\r\n stat = StatBookmark(attrib=TAG_CT, data=total)\r\n DBSession.add(stat)", "def get_entity_count(cls):\n return int(cls.db.get(\"entity_count\"))", "def count_by_tag(self, dataframe, tags):\r\n if tags and not dataframe['tags'].empty:\r\n data_to_return = []\r\n counter = 0\r\n for tag in tags:\r\n for datafield in dataframe['tags']:\r\n if tag in datafield:\r\n counter += 1\r\n data_to_return.append([tag, counter])\r\n counter = 0\r\n return pandas.DataFrame(data_to_return, columns=('TAG', 'TagCount'))", "def count_posTags(self):\n pos_list = ['NUM', 'AUX', 'ADV', 'DET', 'NOUN', 'ADP', 'ADJ', 'NOUN', 'VERB', 'NOUN', 'PUNCT', 'PUNCT']\n result = count_posTags(pos_list)\n self.assertEqual(result, (3, 1, 1, 1, 2))", "def check_all_tag_photo_counts(self):\n data = self.db.get_query_as_list(\n '''\n select * from tag\n '''\n )\n\n for tag in data:\n print()\n print(tag)\n # query for the number of photos using the tag\n # compare it to the number in the photos column\n # update if necessary\n query_count = self.db.get_query_as_list(\n '''\n select count(tag_name)\n from photo_tag\n where tag_name = \"{}\"\n '''.format(tag['tag_name'])\n )\n\n if query_count[0]['count(tag_name)'] == tag['photos']:\n print('OK', 'actual photos number with tag',\n query_count[0]['count(tag_name)'], 'in photos column', tag['photos'])\n else:\n print('MISSMATCH IN PHOTOS AND PHOTOS WITH TAG\\n', 'actual photos number with tag',\n query_count[0]['count(tag_name)'], 'in photos column', tag['photos'])\n\n tag_name = tag['tag_name']\n count = query_count[0]['count(tag_name)']\n break\n\n print('\\nDONE NO PROBLEMS!')", "def count(context, namespace_name, session):\n namespace = namespace_api.get(context, namespace_name, session)\n query = (session.query(func.count(models.MetadefTag.id)).filter_by(\n namespace_id=namespace['id']))\n return query.scalar()", "def get_all_tags_count(self):\n return apps.get_model('tags', 'Tag').objects.filter(\n all_traits__source_dataset__source_study_version__study=self,\n all_traits__source_dataset__source_study_version__i_is_deprecated=False\n ).distinct().count()", "def num_articles(self):\n\t\treturn len(index)", "def count_entity_doc(document):\n count = {}\n for line in document[1:]:\n _, _, entity_type, _, _ = conll04_parser.split_line(line)\n if entity_type in count:\n count[entity_type] += 1\n else:\n count[entity_type] = 1\n return count", "def count_words_in_blob_if_tag_meets_criteria(blob, tag_criteria_function):\n word_tags_that_meet_critera = [word_tag for word_tag in blob.tags if \\\n tag_criteria_function(word_tag[1])]\n return len(word_tags_that_meet_critera)", "def find_usefull_tags(tags, tagmodel, tag_count_vect):\n\n final_tags = []\n for tag in tags:\n if tag == None:\n continue\n else:\n tagpd = pd.Series(tag)\n tag_feature = tag_count_vect.transform(tagpd)\n result = tagmodel.predict(tag_feature)\n\n result = result.tolist() \n result = str(result)\n if result == '[1]':\n final_tags.append(tag)\n final_tags = list(dict.fromkeys(final_tags))\n return(final_tags)", "def get_entity_count(self, collection):\n # Use 'entity_count' attribute when available. It is created in the\n # BaseCollectionViewSet class.\n return (\n collection.entity_count\n if hasattr(collection, \"entity_count\")\n else collection.entity_set.count()\n )", "def test_get_all_tags(self):\n print(self.session.tags)\n self.assertEqual(\n len(self.session.tags),\n (3 * len(self.session.wp_post_objects)) #3 tags added by default\n )", "def size(self):\n return len(self.id2term)", "def count_images_with_tags(self, tags):\n\t\tif not tags: \n\t\t\treturn self.session.query(Image.id).count()\n\t\t\n\t\treturn self.build_select_images_with_tags_query(tags).count()", "def test_total_ct(self):\r\n ct = 5\r\n for i in range(ct):\r\n t = Tag(gen_random_word(10))\r\n DBSession.add(t)\r\n\r\n ct = TagMgr.count()\r\n self.assertEqual(5, ct, 'We should have a total of 5: ' + str(ct))", "def word_given_tag(word, tag, train_bag): # train_bag=train_tagged_words\n tag_list = [pair for pair in train_bag if pair[1] == tag]\n count_tag = len(tag_list) # total number of times the passed tag occurred in train_bag\n w_given_tag_list = [pair[0] for pair in tag_list if pair[0] == word]\n # now calculate the total number of times the passed word occurred as the passed tag.\n count_w_given_tag = len(w_given_tag_list)\n return count_w_given_tag, count_tag", "def compute_geotag_usage():\n year = 2014\n\n for tag in TEST_TAGS:\n\n tags = [tag]\n query = FlickrQuery(tags=tags, year=year)\n geotagged_query = FlickrQuery(tags=tags, year=year, only_geotagged=True)\n\n total = flickr_api.count_photos(query)\n geotagged = flickr_api.count_photos(geotagged_query)\n print('%s: %.2f (%d total) (%d)' % (tags, geotagged / total, total, geotagged))", "def test_tags_browse_valid_tag_name_count(self):\n\n po = self.catalog.load_pageobject('TagsBrowsePage')\n po.goto_page()\n po.form.footer.display_limit('All')\n\n for row in po.search_result_rows():\n rowv = row.value()\n\n assert rowv['name'].strip() != '', \\\n \"invalid name for tag '%s': name is blank\" \\\n % (rowv['name'])\n\n assert rowv['count'] >= 0, \\\n \"invalid count for tag '%s': count = %s\" \\\n % (rowv['name'],rowv['count'])", "def __len__(self):\n\t\treturn len(self.hitos)", "def statCountContainsTag(node, tag):\n if node.nodeName == tag:\n return True\n if node.children == []:\n return False\n c = False\n for child in node.children:\n if statCountContainsTag(child, tag):\n return True\n return False", "def count(self):\n return len(self.find())", "def __len__(self) -> int:\n\t\treturn len(self._articles)", "def print_number_of_entities(self, entity_col):\n for df in self:\n print(\"# of entities: \", len(df[entity_col].unique()))", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def testArticleCount(self):\n\n self.articleCount(17)", "def new_messages_number(self, tag):\n return len(self._message_queue.setdefault(tag,[]))", "def __len__(self):\n return len(self.chunk_idxes)", "def getNrEntries(self):\n return len(self.data)", "def entities_check(self, token_sequence, entities):\n entities_count = {k: len(v) for k, v in entities.items()}\n for token in token_sequence:\n for special_token in self.possible_entities:\n if token == special_token:\n if special_token not in entities_count:\n return False\n else:\n entities_count[special_token] -= 1\n\n for _, v in entities_count.items():\n if v < 0:\n return False\n\n return True", "def entity_tag_sentence(sentence):\n return ne_chunk(sentence)", "def __len__(self):\n return self.document_count", "def count(item):\n return len(item)", "def total(cls) -> int:\n entity_list = cls.query.all()\n if entity_list:\n return len(entity_list)\n return 0", "def __len__(self):\n return len(self.essids)", "def get_n_authors(soup):\n n_authors = len(soup.find_all(attrs={\"name\":\"Author\"}))\n return(n_authors)", "def count():", "def word_given_tag(word:str, tag:str, train_bag:Sequence[Tuple[str, str]]=train_tagged_words)-> Tuple[int, int]: \n tag_list = [pair for pair in train_bag if pair[1] == tag]\n count_tag = len(tag_list)\n w_given_tag_list = [pair[0] for pair in tag_list if pair[0] == word]\n count_w_given_tag = len(w_given_tag_list)\n\n return (count_w_given_tag, count_tag)", "def count(self):\n return self.size()", "def get_tag_index(tags, tag_to_search):\n counter = 0\n for t in tags:\n if tag_to_search == t:\n break\n else:\n counter+=1\n return counter", "def count(self):\n return len(self._elements)", "def dashboard_content_article_tag_cloud():\n tag_stats = dict()\n past_30 = offset_time_past(30, str=True)\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n results = articles.find({'collected': {'$gt': past_30}}, {'_id': 0})\n for result in results:\n for tag in result.get('tags', list()):\n tag_stats[tag] = tag_stats.get(tag, 0) + 1\n tags_sorted = sorted(tag_stats.items(), key=operator.itemgetter(1),\n reverse=True)[:50]\n data = list()\n for item in tags_sorted:\n data.append({'name': item[0], 'weight': item[1]})\n return jsonify(data)", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def get_post_count(parser, token):\n try:\n tag, user = token.split_contents()\n except:\n raise template.TemplateSyntaxError, \"%r tag requires one argument\" % token.contents.split()[0]\n return GetPostCount(user)", "def __len__(self):\r\n return self.docs.count()", "def getNumod_byerSentences(self): # !!! Need to rewrite this to match graph\n return len(self.__document)", "def get_lenght(text):\n return range(len(Articles.split(text)))", "def size(self):\n\t\treturn self._count", "def get_entity_contracts_count():\n url = 'http://www.base.gov.pt/base2/rest/contratos?adjudicatariaid=%d' \\\n '&sort(-id)' % entity.base_id\n\n response = requests.get(url, headers={'Range': 'items=0-24'})\n\n results_range = response.headers['content-range']\n _, count = results_range.split('/')\n\n return int(count)", "def __len__(self) -> int:\n return self.objects.size", "def count(self):\n return len(self.objects)", "def size() -> int:\n ...", "def n_featured():\r\n sql = text('''select count(*) from featured;''')\r\n results = db.engine.execute(sql)\r\n for row in results:\r\n count = row[0]\r\n return count", "def __len__(self) -> int:\n return len(self.ids)", "def intf_TAGQUERY(E):\n if not inc.TXT_or_LST_of_TXTs(E.The,1):\n print(\"Input Error: tag?\")\n print(intf_TAGQUERY.__doc__)\n return # Without doing much of anything.\n mytags= E.The.StackPop().val\n if type(mytags)==type(list()):\n #mytags= map(lambda x:x.val, mytags) # Should now be a list of TXTs.\n mytags= [x.val for x in mytags] # Should now be a list of TXTs.\n else:\n mytags= [ mytags ] # Also a (1 item) list of ints.\n qualifying_ents= list()\n for myeid in MMEL.El.keys():\n alltagshere= True # Assume they're here until one is not found.\n for mytag in mytags:\n #print(\"Searching entity #%d for tag ''%s''\" % (myeid,mytag))\n if not MMEL.El[myeid].has_tag(mytag):\n alltagshere= False\n break\n if alltagshere:\n qualifying_ents.append( objectifier.StackOB_VAL(myeid) )\n E.The.StackPush( objectifier.StackOB_LST(qualifying_ents) )", "def count(self, elem):\n return self.iter.count(elem)", "def count(self):\n return len(self)", "def get_num_POS_tags(data, pos_tag):\n pos_count = []\n for tweet in data:\n tokens = nltk.word_tokenize(tweet)\n tags = nltk.pos_tag(tokens)\n counts = Counter([j for i, j in tags])\n total = sum(counts.values())\n # normalized_counts = dict((word, float(count) / total) for word, count in counts.items())\n normalized_verb_count = sum(count for pos, count in counts.iteritems() if pos.startswith(pos_tag))\n # verb_counts = sum(1 for word, pos in normalized_counts if word.startswith('VB'))\n pos_count.append(normalized_verb_count / total)\n\n return np.array(pos_count).reshape(-1, 1)", "def cursor_nelements(cursor):\n\tcount = 0\n\tfor data in cursor:\n\t\tcount += 1\n\treturn count", "def get_available_inner_tag(**kwargs):\n\n tag = kwargs.pop('tag')\n used_inners = [item.inner for item in [\n *MLPAv4.objects.filter(tag=tag),\n *MLPAv6.objects.filter(tag=tag),\n *BilateralPeer.objects.filter(tag=tag),\n *Monitorv4.objects.filter(tag=tag)]]\n for i in range(1, 4097):\n if i not in used_inners:\n return i", "def getNumElements(self):\n raise Exception(\"Didn't expect this to get called.\")", "def count(self):\n\n raise NotImplementedError", "def gettotaltags(s,refconvdf): \r\n taglist=[] \r\n for ptag in refconvdf[(refconvdf.convid==s)].tags.values:\r\n if type(ptag)==str or type(ptag)==unicode:\r\n ptag=ptag[1:-1].split(', ') #possible source of error.. added space to cater for reading in csv. \r\n if ptag:\r\n try: \r\n for ele in ptag:\r\n taglist.append(ele) \r\n except TypeError:\r\n pass\r\n return taglist", "def test_tag_search(self):\n url = reverse_lazy('tag-list') + '?search={}'.format('testtag')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n results = response.data['results']\n self.assertEqual(len(results), 3)\n\n for tag in ('testtag1', 'testtag3'):\n result = list(filter(lambda it: it['title'] == tag, results))\n self.assertEqual(len(result), 1)\n result = result[0]\n\n self.assertEqual(len(result['posts']), 3)", "def __len__(self):\n return len(self.iterator)", "def count_tags(tags):\n counts = {}\n for tag_list in tags.values():\n for tag in tag_list:\n if tag in counts:\n counts[tag] += 1\n else:\n counts[tag] = 1\n return counts", "def elements_count(self):\n return self.__elements_count", "def count_articles(all_articles):\n print(f\"There are {len(all_articles)} articles.\")", "def element_count(self):\n return len(self.elements) + len(self.virtual_elements)", "def pos_tag_counts(doc):\n tags = []\n for token in doc:\n tags.append(token.pos_)\n frequency = dict(Counter(tags).most_common()) \n return frequency", "def render_number_tagged_traits(self, record):\n return record.current_non_archived_traits.count()", "def _get_num_sentences(doc: Doc, min_sen_length=5):\n return len([sent for sent in list(doc.sents) if len(sent.text.strip())>min_sen_length])", "def get_num_items(self):\r\n return self.num_items", "def _get_objects_length(self) -> int:\n return len(self.objects)", "def n_sents(doc: Doc) -> int:\n if not doc.has_annotation(\"SENT_START\"):\n LOGGER.warning(\n \"`doc` has not been segmented into sentences; applying spaCy's rule-based, \"\n \"`Sentencizer` pipeline component to `doc` before counting...\"\n )\n doc = _SENTENCIZER(doc)\n return itertoolz.count(doc.sents)", "def calculate_top_tags(tags_df, n_tags, use_cip_tags=True):\n tag_counts = tags_df['tag'].value_counts()\n if use_cip_tags:\n # Not all CiP tags are necessarily present in the dataset\n # and not necessarily in sufficient amounts\n present_tags = set(tag_counts[tag_counts > 5].index)\n return list(filter(lambda t: t in present_tags, CIP_TAGS))\n else:\n return tag_counts.index[:n_tags]", "def __len__(self):\n return len(self.db.list_nodes('/images'))", "def __len__(self):\n return self.count", "def __len__(self):\n return len(self._fetch())", "def size(self) -> int:" ]
[ "0.70608175", "0.678857", "0.6734028", "0.65930325", "0.64529455", "0.62508005", "0.6231032", "0.62118965", "0.61979675", "0.6197728", "0.6096715", "0.6096469", "0.6078573", "0.59946", "0.59408474", "0.5923345", "0.59161204", "0.590068", "0.5879492", "0.5843947", "0.58285606", "0.5811348", "0.5800454", "0.5785051", "0.5744477", "0.57426006", "0.57134944", "0.5706941", "0.5694222", "0.56415236", "0.5637233", "0.56201917", "0.56055623", "0.5601312", "0.5599203", "0.55810523", "0.55766076", "0.55669254", "0.55653256", "0.55612576", "0.5555154", "0.5540942", "0.5512566", "0.5512566", "0.5512566", "0.5512566", "0.5511057", "0.54878867", "0.54858655", "0.54837245", "0.5439005", "0.5438991", "0.54364395", "0.54362863", "0.5430967", "0.54274696", "0.54274505", "0.5415853", "0.54119366", "0.54099417", "0.5406975", "0.53893816", "0.538054", "0.5364619", "0.5363476", "0.53629357", "0.5361147", "0.5352273", "0.53372973", "0.5321219", "0.5319774", "0.53195393", "0.53173417", "0.5316381", "0.5310657", "0.5310305", "0.5309494", "0.53085595", "0.53016466", "0.52862424", "0.52756745", "0.52730614", "0.5272865", "0.5255658", "0.52555215", "0.525442", "0.5251011", "0.52500576", "0.5249552", "0.5245854", "0.5242925", "0.5239998", "0.52371365", "0.5236402", "0.52348244", "0.523137", "0.5230184", "0.5228385", "0.52247006", "0.522402", "0.5223611" ]
0.0
-1
Ensure this function returns the correct list of entity sets without the graveyard volume.
def test_locate_graveyard(): groups_to_write, graveyard_sets = locate_graveyard(mb) assert groups_to_write == [12682136550675318125, 12682136550675318126, 12682136550675318128, 12682136550675318129]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def available_sets(session, player):\n excluded_sets = set(session.taken.keys())\n for grouping in session.exclusives:\n if player.sets.intersection(grouping):\n excluded_sets.update(grouping)\n return [s for s in session.sets if s not in excluded_sets]", "def gather_entities(self):\n entitylist = set()\n for entity in self.entities.all():\n entitylist.add(entity)\n entitylist.update(entity.get_ancestors())\n return entitylist #set(entity for entity in entitylist if not entity.abstract_entity)", "def getSets():", "def lego_sets():\n # you must replace this line and return your own list\n return []", "def lego_sets():\n # you must replace this line and return your own list\n return lego_sets_list", "def attack_sets(self):\n # TODO These should be component queries\n attack_sets = []\n if self.host.equipment:\n attack_sets.append(\n attacks.AttackSet(\n attacks.WeaponAttack, amount=len(self.host.equipment.get_wielded_grasp_slots())\n )\n )\n\n if self.host.body:\n attack_sets.extend(self.host.body.get_attacks())\n\n if self.host.monster and self.host.monster.base_monster.attack_sets:\n attack_sets.extend(\n self.host.monster.base_monster.attack_sets\n )\n\n return attack_sets", "def getSets(unique_name=None):", "def sets(self):\n return self._loaded_and_cached(gdxcc.GMS_DT_SET)", "def sets(self):\n return self._sets", "def get_all_sets(config: Dict) -> List:\n return [x for x, y in config.items() if y[\"type\"] == \"set\"]", "def all_genes_in_dataset(self):\n # the empty-set argument is needed in case there are no mutants in the dataset - set.union() with empty args is an error.\n return set.union(set(), *[set(genes) for N_mutants,genes \n in self.dataset.get_gene_dict_by_mutant_number(self.dataset_name).items() if N_mutants>0])", "def dsets(self):\n return list(self._dst_h5)", "def get_all_entities(self):\n return Artifact.get_all()", "def known_safes(self):\n if self.count == 0:\n return self.cells\n return set([])", "def entities(self):\n triples = self.triples()\n return set(pd.concat((triples[\"head\"], triples[\"tail\"])))", "def chunk_set(self) -> Set[OctreeChunk]:\n return self._chunks", "def available_sets(self, data=None, remote=False):\n if data is None:\n data = [SET_LABELS.RELEASE_DATE, SET_LABELS.BLOCK]\n else:\n if SET_LABELS.RELEASE_DATE not in data:\n data.append(SET_LABELS.RELEASE_DATE)\n if SET_LABELS.BLOCK not in data:\n data.append(SET_LABELS.BLOCK)\n\n sets = self._availability.available_sets(remote=remote)\n self._availability.populate(sets, data, remote=remote)\n\n return sets", "def getTestSets():\n return list(_testsetdict.keys())", "def depth_setsM(self):\n G = self.copy()\n depths = []\n while G.vertices() != []:\n outdegs = G.out_degree(labels=True)\n new_depth = [x for x in outdegs if outdegs[x]==0]\n depths.append(new_depth)\n G.delete_vertices(new_depth)\n return depths", "def getChemAtomSets(self):\n dataDict = self.__dict__\n chemAtomVars = self.chemAtoms\n chemAtomSets = set(self.chemComp.chemAtomSets)\n \n # remove ChemAtomSets that do not have all atoms within ChemCompVar\n for ca in self.chemComp.chemAtoms:\n if isinstance(ca, ChemAtom) and ca not in chemAtomVars:\n cas = ca.chemAtomSet\n while cas in chemAtomSets:\n chemAtomSets.remove(cas)\n cas = cas.chemAtomSet\n \n # remove ChemAtomSets with duplicate names\n casByName = {}\n for cas in chemAtomSets.copy():\n name = cas.name\n cas0 = casByName.get(name)\n \n if cas0 is None:\n casByName[name] = cas\n \n else:\n set1 = cas.chemAtomSets\n if set1:\n set0 = cas0.chemAtomSets\n else:\n set1 = cas.chemAtoms\n set0 = cas0.chemAtoms\n \n if set1 < set0:\n casx = cas\n if not set1:\n raise ApiError(\"%s found ChemAtomSets %s and %s that are incompatible\" % (self, cas, cas0))\n \n elif set0 < set1:\n casByName[name] = cas\n casx = cas0\n if not set0:\n raise ApiError(\"%s found ChemAtomSets %s and %s that are incompatible\" % (self, cas, cas0))\n \n else:\n raise ApiError(\"%s found ChemAtomSets %s and %s that are not subsets of each other\" % (self, cas, cas0))\n \n while casx in chemAtomSets:\n chemAtomSets.remove(casx)\n casx = casx.chemAtomSet\n #\n result = frozenset(chemAtomSets)\n return result", "def _computedivergentset(repo):\n divergent = set()\n obsstore = repo.obsstore\n newermap = {}\n for ctx in repo.set('(not public()) - obsolete()'):\n mark = obsstore.precursors.get(ctx.node(), ())\n toprocess = set(mark)\n while toprocess:\n prec = toprocess.pop()[0]\n if prec not in newermap:\n successorssets(repo, prec, newermap)\n newer = [n for n in newermap[prec] if n]\n if len(newer) > 1:\n divergent.add(ctx.rev())\n break\n toprocess.update(obsstore.precursors.get(prec, ()))\n return divergent", "def list_imdbs():\n return __sets.keys()", "def list_imdbs():\n return __sets.keys()", "def list_imdbs():\n return __sets.keys()", "def sets(self):\n\n return self._collection.distinct('set')", "def known_mines(self):\n if len(self.cells) <= self.count:\n return self.cells\n return set([])", "def known_safes(self):\n #if the bomb count is zero\n if(self.count == 0):\n #all spaces are safe, which returns all spots that are safe\n return (self.cells)\n else:\n return set()", "def create_relation_superset(self):\n return filter(lambda x: x[0] != x[1],\n super().create_relation_superset())", "def getSet(unique_name):", "def getSet(unique_name):", "def gen_all_holds(hand):\n from_hand = [()]\n for item in hand:\n for subset in from_hand:\n from_hand = from_hand + [tuple(subset) + (item, )]\n \n return set(from_hand)", "def get_organizations_to_delete():\n\n all_organizations = seed.models.Organization.objects.all()\n bad_organizations = [org for org in all_organizations if org.id not in get_core_organizations()]\n return bad_organizations", "def gen_all_holds(hand):\n all_holds_set = [()]\n for entry in hand:\n for subset in all_holds_set:\n # create subsets of hand set\n all_holds_set = all_holds_set + [tuple(subset) + (entry,)]\n return set(sorted(all_holds_set))", "def src_dsets(self):\n if self._src_dsets is None:\n with h5py.File(self._src_path, mode='r') as f:\n self._src_dsets = list(f)\n\n return self._src_dsets", "def noh(ls, dsets):\n data_set = build_set(ls[1], dsets)\n\n noh_set = set()\n pred = oechem.OEIsHydrogen()\n\n for idx in data_set:\n atom = system.GetAtom(oechem.OEHasAtomIdx(idx))\n if not pred(atom):\n noh_set.add(idx)\n\n return noh_set", "def objects_in_use(self):\n return set()", "def known_safes(self):\n if self.count==0:\n return self.cells\n return set()\n #raise NotImplementedError", "def write_sets(self):\n\t\tself.write_components['sets'] = (self.shock_gm.write_sets()+\n\t\t\t\t\t\t\t\t\t\tself.shock_gm.write_aliased_sets()+\n\t\t\t\t\t\t\t\t\t\tself.shock_gm.write_sets_other()+\n\t\t\t\t\t\t\t\t\t\tself.shock_gm.write_aliased_sets_other()+\n\t\t\t\t\t\t\t\t\t\tself.shock_gm.write_sets_load(self.shock_gm.database.name))\n\t\treturn self.write_components['sets']", "def excluded(cls):\n return []", "def get_unstable_manifolds(self, persistence=None):\n if persistence is None:\n persistence = self.persistence\n partitions = {}\n for key, items in self.base_partitions.items():\n min_index = key[0]\n while (\n self.merge_sequence[min_index][0] < persistence\n and self.merge_sequence[min_index][1] != min_index\n ):\n min_index = self.merge_sequence[min_index][1]\n new_key = min_index\n if new_key not in partitions:\n partitions[new_key] = []\n partitions[new_key].extend(items.tolist())\n\n for key in partitions:\n partitions[key] = sorted(list(set(partitions[key])))\n\n return partitions", "def get_enabled_stores(self):\n keys = [k for k in self.d.keys() if isinstance(k, int)]\n keys.sort()\n storelist = []\n for k in keys[1:]:\n storelist.extend(self.d[k])\n continue\n return storelist", "def get_all_districts():\n with open(district_data_dir + 'district-data.json') as f:\n district_dict = json.load(f)\n districts = set([])\n\n for date, data in district_dict.items():\n if date == '03/02/2020':\n continue\n districts.update(data.keys())\n\n # Remove unnecessary points\n districts.remove('total-infected')\n districts.remove('max-legend-value')\n districts.remove('splitPoints')\n return districts", "def get_entityset_ranges(my_core, meshset, geom_dim):\n\n entityset_ranges = {}\n entityset_types = ['Nodes', 'Curves', 'Surfaces', 'Volumes']\n for dimension, set_type in enumerate(entityset_types):\n entityset_ranges[set_type] = my_core.get_entities_by_type_and_tag(meshset, types.MBENTITYSET, geom_dim,\n [dimension])\n return entityset_ranges", "def finalSubsets(self):\n subs = self.allSubsets()\n for s in self.graph.observed:\n subs = subs[subs[:,s] == 1,] # remove subsets where values in s are not True\n return subs", "def get_all_storage(life):\n\treturn [items.get_item_from_uid(item) for item in life['inventory'] if 'max_capacity' in items.get_item_from_uid(item)]", "def get_backup_sets(debug_stmts):\n handle_set = re.compile(r\"Handling set '([\\w/]+)'\")\n backup_sets = set([])\n for stmt in debug_stmts:\n if stmt.module == \"backup\":\n m = handle_set.match(stmt.msg)\n if m:\n backup_sets.add(m.group(1))\n return sorted(backup_sets)", "def build_kegg_sets(kegg_sets_members, keggset_info_folder, organism, xrdb,\n tags_dictionary=None):\n\n all_kegg_sets = []\n\n for kegg_id in kegg_sets_members.keys():\n info_file = os.path.join(keggset_info_folder, kegg_id)\n org_slug = slugify(organism)\n\n kegg_set_info = get_kegg_set_info(info_file, org_slug)\n\n kegg_set_info['organism'] = organism\n kegg_set_info['xrdb'] = xrdb\n kegg_set_info['annotations'] = {}\n\n # This following loop fills out annotations. Since KEGG sets do not\n # have publications associated with their genes, each gene will have\n # an empty list as a value in the set's annotations.\n for member in kegg_sets_members[kegg_id]:\n if xrdb == 'Entrez':\n try:\n kegg_set_info['annotations'][int(member)] = []\n except ValueError:\n logger.error('Entrez ID %s could not be coerced to an '\n 'integer and was not included in KEGG set'\n 'with kegg_id %s', member, kegg_id)\n else:\n kegg_set_info['annotations'][member] = []\n\n if tags_dictionary and kegg_id in tags_dictionary:\n kegg_set_info['tags'] = tags_dictionary[kegg_id]['gs_tags']\n\n all_kegg_sets.append(kegg_set_info)\n\n return all_kegg_sets", "def __init__(self):\n self.EntireSet = []", "def generate_containers(self):\n chests = []\n z = 0\n while z < 10: # 10 chests per level for now\n c = 0\n while c == 0:\n x = random.randint(0, self.map.width - 1)\n y = random.randint(0, self.map.height - 1)\n if self.map.map[x][y].blocked == False:\n chests.append(Container.Container(x=x, y=y, name=\"Chest\", char=\"c\", contents=\"\"))\n z += 1\n c += 1\n\n return chests", "def dormitories(self) -> list[Dormitory]:\n return list(self._dormitories.values())", "def all_blocks(state):\n return state.clear.keys()", "def containers(self):\n seen = set()\n return [l.from_segment for l in self.edges_to_containers \\\n if id(l) not in seen and not seen.add(id(l))]", "def all():\n return MIGRATIONS.values()", "def test_entities__getEntities__2(stubEntities):\n e = stubEntities\n assert (set([e.kwack, e.duck, e.cat]) ==\n set(e.entities.getEntities(sorted=False)))", "def blaze(self):\n visited = set()\n tile_exits = dict((tile, {}) for tile in self.tiles)\n\n def visit(tile):\n # Randomized depth-first search of self.tiles.\n visited.add(tile)\n adj = self.adjacencies(tile, self.tiles)\n self.rand.shuffle(adj)\n for d, t in adj:\n if t not in visited:\n tile_exits[tile][d] = t\n tile_exits[t][self._inverted_dirs[d]] = tile\n visit(t)\n\n visit(next(iter(self.tiles)))\n return tile_exits", "def get_flush_lists(self, keys):\r\n return set(e for flush_list in\r\n filter(None, cache.get_many(keys).values())\r\n for e in flush_list)", "def _remove_seen_ids(ctrl, id_set):\n partition_range = range(0, ctrl.config[\"partitions\"])\n unseen_ids = id_set.copy()\n for partition_idx in partition_range:\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_idx)\n for seen_id in id_set:\n if lower_bound <= seen_id <= upper_bound:\n unseen_ids.remove(seen_id)\n return unseen_ids", "def get_filesets(self):\n\n\t\tif None != self.__filesets:\n\t\t\treturn self.__filesets.get_filesets()\n\t\telse:\n\t\t\treturn None", "def getAllMachines():\n\n validHosts = (subprocess.getoutput(\"qconf -shgrpl\").split())\n machineList = []\n processedHGList = []\n readNodes = False\n for host in validHosts:\n hostMachineList = ((subprocess.getoutput(\"qconf -shgrp_tree \" + str(host))).split())\n for element in hostMachineList:\n if '@' not in element: # If it is not a HG name\n if element not in machineList:\n machineList.append(element) # Searching whole list for this node\n machineList.sort()\n return machineList", "def dirty(self) -> IdentitySet:\n return IdentitySet(\n [\n state.obj()\n for state in self._dirty_states\n if state not in self._deleted\n ]\n )", "def lego_sets():\n # you must replace this line and return your own list\n\n for i in range(min_parts()):\n name.append(resultsDict[i][\"name\"])\n set_num.append(resultsDict[i][\"set_num\"])\n\n for i in range(min_parts()):\n set_id = set_num[i]\n url = \"/api/v3/lego/sets/\" + set_id + \"/parts/?key=\"+KEY+\"&page_size=1000\"\n h.request(\"GET\", url)\n res = h.getresponse().read()\n res_dict = json.loads(res)[\"results\"]\n res_dict.sort(key=lambda k: k[\"quantity\"], reverse=True)\n part_dict = {}\n part_set = []\n for j in range(min(20, len(res_dict))):\n temp = {}\n temp[\"color\"] = res_dict[j][\"color\"]\n temp[\"quantity\"] = res_dict[j][\"quantity\"]\n temp[\"name\"] = res_dict[j][\"part\"][\"name\"]\n temp[\"number\"] = res_dict[j][\"part\"][\"part_num\"]\n part_set.append(temp)\n\n part_dict[\"set_num\"] = set_num[i]\n part_dict[\"Parts\"] = part_set\n Parts.append(part_dict)", "def get_entities(self, clean=False):\n return list(self.iter_entities(clean=clean))", "def get_setlist(self, show_id):\r\n songs = ShowsSongsMapping.query.filter_by(ShowID=show_id).order_by(\r\n asc(ShowsSongsMapping.SetlistOrder)).all()\r\n\r\n result = []\r\n for song in songs:\r\n sd = {\r\n \"setlistOrder\": song.SetlistOrder,\r\n \"showID\": song.ShowID,\r\n \"songID\": song.SongID,\r\n \"duration\": song.ShowSongDuration,\r\n }\r\n result.append(sd)\r\n return result", "def _get_children(self):\n return set()", "def hydrations(self):\n return self._hydrations", "def get_free_games(self) -> List[Game]:", "def all(self) -> List[Optional[U]]:\n return list(self._store.values())", "def get_key_set():\n keys = [0] * n_families\n for i in range(n_families):\n keys[i] = get_key(i)\n\n return keys", "def known_mines(self):\n if len(self.cells)==self.count:\n return self.cells\n return set()\n #raise NotImplementedError", "def get_hull_loadouts():\n loadouts = {}\n loadout_table = db_parser.get_table_as_dict('loadout')\n for row in loadout_table:\n if row['hull_name'] not in loadouts.keys():\n loadouts[row['hull_name']] = []\n loadouts[row['hull_name']].append(row['part_name'])\n return loadouts", "def all_entity_lists(self) -> t.List[EntityList]:\n _all_entity_lists: t.List[EntityList] = []\n _all_entity_lists.extend(self.ensembles)\n\n db = self.db\n if db is not None:\n _all_entity_lists.append(db)\n\n return _all_entity_lists", "def form_cheapset_list_goods(self):\n \n self.database.truncate_all_tables()\n\n list_with_object_cheapset_test = [\n GoodInfo('макароны Макфа,рожки', '30', '7','2020-12-30', '360', '2020-12-30'),\n GoodInfo('макароны Макфа,спагетти', '30', '10','2020-12-30', '360', '2020-12-30')\n ]\n \n for good in list_with_object_cheapset_test:\n self.database.add(good)\n\n most_cheapset_test_list = self.database.get_all_goods()\n\n return most_cheapset_test_list", "def _computeunstableset(repo):\n # revset is not efficient enough here\n # we do (obsolete()::) - obsolete() by hand\n obs = getrevs(repo, 'obsolete')\n if not obs:\n return set()\n cl = repo.changelog\n return set(r for r in cl.descendants(obs) if r not in obs)", "def _get_ad_sets(self, params):\n return self._api.account.get_ad_sets(params={**params, **self._state_filter()}, fields=[self.state_pk])", "def _eidset():\n\n dbrosters = set()\n with sqlite3.connect(DB) as db:\n cursor = db.cursor()\n cursor.execute(\"SELECT eid FROM players\")\n rows = cursor.fetchall()\n for row in rows:\n dbrosters.add(int(row[0]))\n # return set.\n return dbrosters", "def __removeDuplicateEntities(self, entities):\n\n seenEncodedEntities = set()\n uniqueEntitiesList = []\n\n for entity in entities:\n encodedEntity = EncodedEntity(entity).encoded\n if encodedEntity not in seenEncodedEntities:\n seenEncodedEntities.add(encodedEntity)\n uniqueEntitiesList.append(entity)\n\n return uniqueEntitiesList", "def get_items(self):\n\n self.__logger.info(\"Thermo Builder Started\")\n\n # All relevant materials that have been updated since thermo props were last calculated\n q = dict(self.query)\n q.update(self.materials.lu_filter(self.thermo))\n comps = [m[\"elements\"] for m in self.materials().find(q, {\"elements\": 1})]\n\n self.__logger.info(\"Found {} compositions with new/updated materials\".format(len(comps)))\n\n # Only yields maximal super sets: e.g. if [\"A\",\"B\"] and [\"A\"] are both in the list, will only yield [\"A\",\"B\"]\n # as this will calculate thermo props for all [\"A\"] compounds\n processed = set()\n # Start with the largest set to ensure we don\"t miss superset/subset relations\n for chemsys in sorted(comps, key=lambda x: len(x), reverse=True):\n if \"-\".join(sorted(chemsys)) not in processed:\n processed |= self.chemsys_permutations(chemsys)\n yield self.get_entries(chemsys)", "def _storage_review(\n self,\n storage_key: Hash32,\n storage_root_hash: Hash32) -> Set[Hash32]:\n with self._trie_db.at_root(storage_root_hash) as snapshot:\n try:\n # request the data just to see which part is missing\n snapshot[storage_key]\n except MissingTrieNode as exc:\n return {exc.missing_node_hash}\n else:\n return set()", "def get_entities(self):\n return list(self._entities.values())", "def alt_clueset(self):\n sames = self.get_same_mapping()\n new_clues = []\n has_changes = False\n\n for clue in self.clueset:\n if (clue[\"type\"] != SAME and clue[\"type\"] != ISAT):\n alt = self.new_clue(sames, clue)\n if alt:\n new_clues.append(alt)\n has_changes = True\n else:\n new_clues.append(clue)\n\n return new_clues if has_changes else None", "def making_sets(lists):\n empty_set = []\n lists =lists.split()\n for elements in lists:\n if elements == \" \":\n next\n else:\n if elements not in empty_set:\n empty_set.append(elements) \n return empty_set", "def strict(cls):\n return frozenset()", "def all_hypernyms(self):\n hypernyms = []\n for path in self.hypernym_paths():\n for synset in path:\n if synset is not self:\n hypernyms.append(synset)\n return set(hypernyms)", "def entity_groups(self):\n entities = self.entities()\n if not entities:\n return None\n non_ent = self.opts.get('non_ent', 'O')\n groups = []\n idx = 0\n while idx < len(entities):\n ner_tag = entities[idx]\n # Check for entity tag\n if ner_tag != non_ent:\n # Chomp the sequence\n start = idx\n while (idx < len(entities) and entities[idx] == ner_tag):\n idx += 1\n groups.append((self.slice(start, idx).untokenize(), ner_tag))\n else:\n idx += 1\n return groups", "def entity_groups(self):\n entities = self.entities()\n if not entities:\n return None\n non_ent = self.opts.get('non_ent', 'O')\n groups = []\n idx = 0\n while idx < len(entities):\n ner_tag = entities[idx]\n # Check for entity tag\n if ner_tag != non_ent:\n # Chomp the sequence\n start = idx\n while (idx < len(entities) and entities[idx] == ner_tag):\n idx += 1\n groups.append((self.slice(start, idx).untokenize(), ner_tag))\n else:\n idx += 1\n return groups", "def potential_new_obs(self) -> Set[GriddedPerm]:\n subobs: Set[GriddedPerm] = set()\n for ob in self._tiling.obstructions:\n subobs.update(ob.all_subperms(proper=True))\n subobs.remove(GriddedPerm.empty_perm())\n return subobs", "def get_items(self):\n self.logger.info(\"Electrode Builder Started\")\n\n # All updated chemical systems that contain at least one redox active elements\n q = dict(self.query)\n q.update(self.materials.lu_filter(self.thermo))\n q.update({\"chemsys\": {\"$in\": self.redox_els}})\n\n comps = self.materials.distinct(\"chemsys\", q)\n\n self.logger.info(\n \"Found {} compositions with new/updated materials\".format(len(comps)))\n\n # Only yields maximal super sets: e.g. if [\"A\",\"B\"] and [\"A\"] are both in the list, will only yield [\"A\",\"B\"]\n # as this will calculate thermo props for all [\"A\"] compounds\n processed = set()\n\n # Start with the largest set to ensure we don\"t miss superset/subset\n # relations\n for chemsys in sorted(comps, key=lambda x: len(x.split(\"-\")), reverse=True):\n if chemsys not in processed:\n processed |= chemsys_permutations(chemsys)\n yield self.get_entries(chemsys)", "def subsets(self):\n return set(self.subset_map.values())", "def get_nodes(self):\n return_set = set()\n for key in self._main_dictionary:\n return_set.add(key)\n return return_set", "def local_nonant_cache(self):\n if not self._ran:\n raise RuntimeError(\"Need to call WheelSpinner.run() before querying solutions.\")\n local_xhats = dict()\n for k,s in self.spcomm.opt.local_scenarios.items():\n for node in s._mpisppy_node_list:\n if node.name not in local_xhats:\n local_xhats[node.name] = [\n value(var) for var in node.nonant_vardata_list]\n return local_xhats", "def create_sets(self,FD_SET=[],VA_SET=[]):\n \n self.m.S = Set(initialize=self.sectors, doc='sectors')\n\n if self.EORA is True:\n self.m.rROW = Set(initialize=self.countries+['ROW'],ordered=True, doc='regions including export')\n self.m.R = Set(initialize=self.countries+['ROW'],ordered=True, doc='regions')\n else:\n self.m.rROW = Set(initialize=self.countries,ordered=True, doc='regions including export')\n self.m.R = Set(initialize=self.countries,ordered=True, doc='regions')\n\n if self.EORA is True:\n self.m.fdemand = Set(initialize=['P3h', 'P3n','P3g', 'P51','P52','P53'], doc='Final Demand')\n else:\n self.m.fdemand = Set(initialize=self.fd_cat, doc='Final Demand')\n\n if self.EORA is True:\n self.m.VA = Set(initialize=['VA'], doc='value added')\n else:\n self.m.VA = Set(initialize=VA_SET, doc='value added')", "def gen_all_holds(hand):\r\n \r\n held_dice = [()]\r\n \r\n for dice in hand:\r\n for dummy_dice in held_dice:\r\n held_dice = held_dice + [tuple(dummy_dice) + (dice, )]\r\n \r\n\r\n return set(held_dice)", "def entities(self):\n return self._entities", "def get_exercises(self):\n exercises = set()\n for er in self.exercise_recordings:\n if er.exercise not in exercises:\n exercises.add(er.exercise)\n return list(exercises)", "def find_uses_paths_with_invalid_marketplaces(\n self, pack_ids: List[str]\n ) -> List[BaseContent]:\n with self.driver.session() as session:\n results: Dict[str, Neo4jRelationshipResult] = session.execute_read(\n validate_marketplaces, pack_ids\n )\n self._add_nodes_to_mapping(result.node_from for result in results.values())\n self._add_relationships_to_objects(session, results)\n return [self._id_to_obj[result] for result in results]", "def Inventory(cls):\r\n l = ServerSet()\r\n rs = cls.find()\r\n for server in rs:\r\n l.append(server)\r\n return l", "def listNonDegenerate(self):\n return arange(self.nelems())[~self.testDegenerate()]", "def get_non_ovf_disks():\n return [\n d.get_id() for d in get_all_disks() if (\n d.get_alias() != ENUMS['ovf_disk_alias']\n )\n ]", "def get_all_valid_deer_load_shapes(database_year):\n query = \"\"\"\n SELECT *\n FROM deer_load_shapes\n limit 1\n \"\"\"\n valid_deer_load_shapes = execute_query(database_year, query)\n all_columns_w_utilities = list(\n valid_deer_load_shapes.drop(\"hour_of_year\", axis=1).columns\n )\n # TODO (ssuffian): Reshape db so it is a query by utility column\n return list(\n set(\n [\n c.replace(\"PGE_\", \"\")\n .replace(\"SDGE_\", \"\")\n .replace(\"SCG_\", \"\")\n .replace(\"SCE_\", \"\")\n for c in all_columns_w_utilities\n ]\n )\n )", "def remove_cold_start(self, entities=None):\n entities = self.fguide.entities if entities is None else entities\n for key in entities:\n diff = np.setdiff1d(self.test[key], self.train[key])\n logging.info(\n 'removing %d %s ids from the test set.' % (len(diff), key))\n logging.debug(' '.join(map(str, diff)))\n self.test = self.test[~self.test[key].isin(diff)]", "def _get_underprivileged_groups(self):\n if self.underprivileged_groups == \"auto\":\n return []\n else:\n return deepcopy(self.underprivileged_groups)" ]
[ "0.6598351", "0.6436746", "0.6417409", "0.6331896", "0.6149638", "0.604962", "0.5946124", "0.58908427", "0.5782512", "0.57297885", "0.571627", "0.55856144", "0.55809915", "0.5561783", "0.5537913", "0.5536738", "0.5535733", "0.54536754", "0.5453462", "0.5442003", "0.53676707", "0.5353379", "0.5353379", "0.5353379", "0.53442293", "0.5311887", "0.529896", "0.5290701", "0.5278077", "0.5278077", "0.52736515", "0.5264885", "0.5258448", "0.52527565", "0.5243828", "0.5239732", "0.52395827", "0.5233683", "0.52267665", "0.52248317", "0.5222317", "0.52222365", "0.519935", "0.5178073", "0.5169151", "0.51514167", "0.5148702", "0.5144273", "0.513696", "0.51354945", "0.5103192", "0.50970227", "0.5095785", "0.5090605", "0.50756514", "0.5074773", "0.5072582", "0.50683814", "0.505512", "0.50534075", "0.5051628", "0.5050409", "0.50423646", "0.50418043", "0.50353765", "0.50338477", "0.5033242", "0.5027267", "0.50233936", "0.5023277", "0.5021538", "0.501533", "0.5011124", "0.5010508", "0.50058943", "0.5003057", "0.500006", "0.49986032", "0.4996758", "0.49949145", "0.49937966", "0.4982684", "0.4982532", "0.49750483", "0.49750483", "0.49733335", "0.49677414", "0.4967246", "0.49664176", "0.4965794", "0.49648055", "0.49647662", "0.4963907", "0.4959664", "0.49574107", "0.49417135", "0.49396643", "0.49384367", "0.49370718", "0.49359244", "0.49305803" ]
0.0
-1
Ensure this function returns the correct default output file name.
def test_default_format_file_name(): output_name = format_file_name(test_file) assert (output_name == test_file[:-4] + "_no_grave.h5m") == True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_file_name(self) -> typing.Optional[str]:\n return self._values.get(\"output_file_name\")", "def output_file_name(self) -> typing.Optional[str]:\n return self._values.get(\"output_file_name\")", "def output_file_name(self) -> typing.Optional[str]:\n return self._values.get(\"output_file_name\")", "def output_file_name(self) -> typing.Optional[str]:\n return self._values.get(\"output_file_name\")", "def getDefaultOutputFileName(inputFileName):\n\tbaseName = os.path.basename(inputFileName)\n\trootName = os.path.splitext(baseName)[0]\n\treturn string.join([rootName, \"xls\"], '.')", "def getDefaultOutputFileName(inputFileName):\n baseName = os.path.basename(inputFileName)\n rootName = os.path.splitext(baseName)[0]\n return string.join([rootName, \"xls\"], '.')", "def defaultOutputFilepath(self):\n return self.outputFilepath('TulipOutput.txt')", "def AssignOutputFilename(self, output_file):\n if output_file is None:\n gmt = time.gmtime()\n ts = calendar.timegm(gmt)\n return f\"{self.input_file}.{ts}\"\n return output_file", "def get_default_filename() -> str:\n return datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")", "def GetOutputFilename(fname):\n return os.path.join(outdir, fname)", "def create_output_loc(self):\n self.output_name = [self.args.xml_out, 'gatk4_' + self.json_file['name'].lower().split(' ')[0] + '.xml']\n if not self.args.xml_out.endswith('/'):\n return '/'.join(self.output_name)\n else:\n return ''.join(self.output_name)", "def getOutputFilename(self, filename):\n return filename[:-4] + \".txt\"", "def outname(self, fileappend=''):\n if self._outname is None:\n self._outname = self.generate_outname(fileappend='')\n\n return self._outname", "def defaultFile(self):\n filename = _odb.getCurrentFrame().filename\n if filename == '<string>' and self.mainpyfile:\n filename = self.mainpyfile\n return filename", "def output_file_name_maker(args):\n log.debug(\"Entering output_file_name_maker()\")\n path = os.getcwd() + '/out_files/'\n if not os.path.isdir(path):\n os.mkdir(path)\n\n if args.output is None:\n out_file_name = path + args.input[:-4] + '_' + args.type + '_' + args.layer\n else:\n out_file_name = path + args.output\n\n log.debug(\"Exiting output_file_name_maker()\")\n return out_file_name", "def get_output_file_name(argn=2, std_name='output.txt'):\n try:\n name = sys.argv[argn]\n except IndexError:\n name = std_name\n print(\"Warning: no output file name received. Output will be\"\n \" written to '%s'.\" % name)\n return name", "def getOutputFile(fname):\n return os.path.join(Configurations.getOutputDir(), fname)", "def _build_output_file(self, output):\n\t\tif output is None:\n\t\t\tself.output_file_name = \"index.html\"\n\t\telse:\n\t\t\tself.output_file_name = output", "def default_output_path():\n\n documents = os.path.join(os.path.expanduser('~'))\n try:\n documents = _xdg_documents_path()\n except: pass\n if platform.system() == 'Windows':\n try:\n documents = _win_documents_path()\n except: pass\n\n return os.path.join(documents, 'Topographica')", "def full_name(self):\n return \"output.%s\" % self.name", "def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)", "def outfile(self):\n\n return f\"{self.name}.run.out\"", "def output_filename(self, prefix, suffix):\n filename = \"%s%s%s\" % (prefix, _ExecutionWrapper._file_index, suffix)\n _ExecutionWrapper._file_index += 1\n return filename", "def output_name(self) -> str:\n return jsii.get(self, \"outputName\")", "def _make_output_path(self, filename):\n return os.path.join(self._output_directory, filename)", "def get_output_file_type(self):\n file_name = '.' + self.template_file_name.split('.')[-2]\n return file_name", "def get_output_raw_name(journal_file_name, output_type='txt'):\n dot_pos = journal_file_name.rfind('.')\n if dot_pos != -1:\n output_file_name = journal_file_name[0: dot_pos]\n else:\n output_file_name = journal_file_name\n num_of_output = 1\n if output_type == 'txt':\n while True:\n output_file = '%s_%d.txt'%(output_file_name,num_of_output)\n if not os.path.exists(output_file):\n break\n else:\n num_of_output += 1\n else:\n output_file = '%s.%s'%(output_file_name,output_type)\n return output_file", "def get_output_filename(self, gzip=False):\n if self.mode == 'light':\n suffix = '-light'\n else:\n suffix = ''\n\n destination = self.destination\n extension = 'fits'\n if gzip:\n destination += '-compressed'\n extension += '.gz'\n return os.path.join(destination,\n 'iphas-dr2-{0}{1}.{2}'.format(\n self.get_partname(),\n suffix,\n extension))", "def output_file_path(self):\n return self.__output_file_path", "def setOutputFileName(self, *args):\n return _libsbml.SBMLExternalValidator_setOutputFileName(self, *args)", "def _get_output_filename(dataset_dir, split_name):\n return '%s/%s*.tfrecord' % (dataset_dir, split_name)", "def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n # return Path(dir) / filename\n return filename", "def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n #return Path(dir) / filename\n return filename", "def get_output_file_name(run_parameters, dir_name_key, prefix_string, suffix_string='', type_suffix='tsv'):\n output_file_name = os.path.join(run_parameters[dir_name_key], prefix_string + '_' +\n run_parameters['method'] + '_' + run_parameters[\"correlation_measure\"])\n\n output_file_name = kn.create_timestamped_filename(output_file_name) + '_' + suffix_string + '.' + type_suffix\n return output_file_name", "def comparison_outfile(self):\n\n return f\"{self.name}.compare.out\"", "def _logFile_default(self):\n print \"choosing default log file\"\n return os.path.join(self.rpiADCLogFolder,time.strftime(\"rpiADC-%Y-%m-%d.csv\", self.currentLocalTime))", "def _get_output_filename(dataset_dir, split_name):\n return '%s/fer_%s.tfrecord' % (dataset_dir, split_name)", "def _output_path(name):\n output = Path(\"../Analysis Results/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(f\"{name}.png\")", "def _get_output_filename(dataset_dir, split_name):\n return '%s/cifar100_%s.tfrecord' % (dataset_dir, split_name)", "def _set_output_dir(self):\n return os.path.join(self.outputDir,\n datetime.datetime.utcnow().strftime(\"%Y%m%d\"))", "def get_save_name():\n if ARGV.get(FILE_OPT):\n return ARGV.get(FILE_OPT)\n return FILE_DEFAULT", "def current_filename(self):\n if self.char_limit is not None:\n return '{}/{}.{}.n{}.maf'.format(self.out_dir, self.chrom,\n self.taxon, self.file_num)\n else:\n return '{}/{}.{}.maf'.format(self.out_dir, self.chrom, self.taxon)", "def get_output_name(input_path):\n file_name, file_ext = os.path.splitext(os.path.basename(input_path))\n return os.path.abspath(\"out\" + os.path.sep + file_name + \"_geo\" + file_ext)", "def output_file(self):\n if not self.output_file_generated():\n return None\n if self.output_static is False:\n return self._output_generated_file\n else:\n return self._output_uploaded_file", "def _out(self, *args):\n suffix = '_'.join(map(str, args))\n return os.path.join(self._out_folder, suffix )", "def get_output_path():\n return os.getcwd() + \"/output/\"", "def GenerateOutputFilename(extension):\n\n # used for resolv.conf\n if '.' in extension:\n filename = extension\n else:\n output_base = 'namebench_%s' % datetime.datetime.strftime(datetime.datetime.now(),\n '%Y-%m-%d %H%M')\n output_base = output_base.replace(':', '').replace(' ', '_')\n filename = '.'.join((output_base, extension))\n\n output_dir = tempfile.gettempdir()\n return os.path.join(output_dir, filename)", "def output_filename(self, modifier):\n fs = self._filesystem\n output_filename = fs.join(self._root_output_dir, self._test_name)\n return fs.splitext(output_filename)[0] + modifier", "def _default_dump_dest(self) -> str:\n ... # pragma: no cover", "def _get_output_filename(dataset_dir):\n return os.path.join(dataset_dir, 'pokemon.tfrecord')", "def outpath(self):\n return None", "def get_output_path(self):\n output_path = '%s/%s' % (\n os.path.expanduser(JOB_OUTPUT_PATH), self.get_unique_name())\n return output_path", "def get_filename(output_dir, accountname):\n f_name = 'twitter_data_' + accountname + str(datetime.datetime.utcnow()) + '.csv'# start_time + '_' + end_time\n full_path = output_dir + '/' + f_name\n\n return full_path", "def set_default_output_settings(args):\n # TODO: shouldn't be setting args.X here as a side effect!\n stem_name, _ = os.path.splitext(os.path.basename(args.input))\n input_dir = helpr.get_directory(args.input)\n # Set default output format if there is an output filename specified\n if args.output:\n args.output = helpr.cleanup_filepath(args.output)\n if not args.outputFormat:\n args.outputFormat = os.path.splitext(args.output)[1][1:]\n log.info(\"You didn't specify an output format, \"\n \"assuming from output filename that it is %s\", args.outputFormat)\n # Set default output filename if not already done\n else:\n # Hmm default hidden here, not good\n if not args.outputFormat:\n args.outputFormat = printer_opts_checked[args.printer].default_output_fmt\n log.info(\"You didn't specify an output format, defaulted to %s\", args.outputFormat)\n filename = \"\".join([stem_name, \"_\", str(args.eventNumber), \".\", args.outputFormat])\n args.output = os.path.join(input_dir, filename)\n log.info(\"You didn't specify an output filename, setting it to %s\", args.output)", "def getOutputFileName(aDirectory, aSelenium, aSuite):\n\n name = \\\n aSelenium.mOperatingSystem + \"_\" + \\\n aSelenium.mBrowser + \"_\" + \\\n aSelenium.mBrowserMode + \"_\" + \\\n aSelenium.mFont + \"_\" + \\\n aSelenium.mOutputJax\n\n if resultsExist(aDirectory + name):\n i = 1\n while (resultsExist(aDirectory + name + \"-\" + str(i)) and\n i < MAX_TEST_RESULTS_WITH_SAME_NAME):\n i = i + 1\n name += \"-\" + str(i)\n\n if (aSuite.mTaskHandler != None):\n sendOutputFileName(name);\n\n return name", "def py_simple_output_filename(filename, tag, ending):\n\n py_simple_output_filename = (py_output_dir(tag, ending) + \"/\"\n + filename + \".\"\n + ending)\n\n return py_simple_output_filename", "def _gen_basename(param_dict, clargs):\n if param_dict['output_basename'] in ['', 'auto']:\n return clargs.input_fname.lower().split('.json')[0]\n\n else:\n return param_dict['output_basename']", "def getOutputFileName(self):\n return _libsbml.SBMLExternalValidator_getOutputFileName(self)", "def get_data_output_name(prefix: str):\n return settings.data_dir + 'standardized_data_' + prefix", "def file_name(product, ext='json'):\n return f\"./output/{product}_{datetime.now().strftime('%Y-%m-%d_%H%M%S')}_transformed_{version}.{ext}\"", "def generateFileName(self):\n return 'Covid' + self.map_type + '.html'", "def _getfilename(self):\n pass", "def _get_parameter_based_output_prefix(self):\n\n # As you can see the generation of the output filename prefix is\n # straigthforward but pretty tireingsome.\n filename_prefix = \"sequential_alignment_\"\n\n filename_prefix += \"s-%d_e-%d_r-%d_\" % tuple(self.options.sliceRange)\n\n try:\n filename_prefix += \"ROI-%s\" % \"x\".join(map(str, self.options.registrationROI))\n except:\n filename_prefix += \"ROI-None\"\n\n try:\n filename_prefix += \"_Resize-%s\" % \"x\".join(map(str, self.options.registrationResize))\n except:\n filename_prefix += \"_Resize-None\"\n\n filename_prefix += \"_Color-%s\" % self.options.registrationColor\n\n try:\n filename_prefix += \"_Median-%s\" % \"x\".join(map(str, self.options.medianFilterRadius))\n except:\n filename_prefix += \"_Median-None\"\n\n filename_prefix += \"_Metric-%s\" % self.options.antsImageMetric\n filename_prefix += \"_MetricOpt-%d\" % self.options.antsImageMetricOpt\n filename_prefix += \"_Affine-%s\" % str(self.options.useRigidAffine)\n\n filename_prefix += \"_eps-%d_lam%02.2f\" % \\\n (self.options.graphEdgeEpsilon, self.options.graphEdgeLambda)\n\n try:\n filename_prefix += \"outROI-%s\" % \"x\".join(map(str, self.options.outputVolumeROI))\n except:\n filename_prefix += \"outROI-None\"\n\n return filename_prefix", "def modpricesetter_get_file_name(self):\r\n year, month, day = self._get_market_year_month_day_as_str()\r\n interval_number = self._get_interval_number_as_str()\r\n base_name = \"NEMPriceSetter_{year}{month}{day}{interval_number}00.xml\"\r\n name = base_name.format(\r\n year=year, month=month, day=day, interval_number=interval_number\r\n )\r\n path_name = Path(self.cache_folder) / name\r\n name_OCD = name.replace(\".xml\", \"_OCD.xml\")\r\n path_name_OCD = Path(self.cache_folder) / name_OCD\r\n name_zero = name.replace(\".xml\", \"00.xml\")\r\n path_name_zero = Path(self.cache_folder) / name_zero\r\n if os.path.exists(path_name):\r\n return name\r\n elif os.path.exists(path_name_OCD):\r\n return name_OCD\r\n elif os.path.exists(path_name_zero):\r\n return name_zero\r\n else:\r\n return name", "def file_name(self):\n \n file_name = stringify(self._file_name)\n if file_name is None:\n file_name = stringify(self._project.file_name)\n if file_name is None:\n with current_context() as ctx:\n file_name = stringify(ctx.get('ninja.file_name', DEFAULT_NAME))\n if file_name is not None:\n file_name = '{}.ninja'.format(file_name)\n return file_name", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))", "def _make_config_file_name(environment, out=False):\n return os.path.join(PH_HOME_DIR, \"etc/config\", \"%s.conf\" % environment) if out else \\\n os.path.join(PH_HOME_DIR, \"config\", \"%s.conf.in\" % environment)", "def return_output_path(self):\n # Return the path of the output file\n return os.path.join(self._output_file_location, self._output_filename)", "def py_output_filename(tag, filename, spec, ending):\n\n py_output_filename = (py_output_dir(tag, ending) + \"/\"\n + filename + \"_\" + spec + \".\"\n + ending)\n\n return py_output_filename", "def name_final_path(out_img_folder):\n if out_img_folder == None:\n return \"./.out_hidden_images\"\n else:\n return out_img_folder", "def GetOutputFilename(self, directory=None):\n if self.forced_filename:\n logging.debug('Forced filename or pre-computed file name = %s', self.filename)\n return self.filename\n\n tags = dict()\n\n # Base tag\n tags['base'] = f\"{self['ARTIST']} - {self['DATE_RECORDED']} - {self['TITLE']}\"\n\n # Setup version subinfo\n tags['version'] = f\" ({self['VERSION']})\" if self[\"VERSION\"] else \"\"\n\n # Setup label / release subinfo\n channels = self.channels if self.channels != '2.0' else ''\n if self[\"ORIGINAL_MEDIUM\"] == \"CD\":\n labeltag = f\"{self['LABEL']} {self['ISSUE_DATE']} {channels}\"\n else:\n labeltag = f\"{self['LABEL']} {self['ISSUE_DATE']} {self['ORIGINAL_MEDIUM']} {channels}\"\n labeltag = labeltag.strip()\n tags['label'] = labeltag and f\" ({labeltag})\"\n\n # Setup disc tag\n if self[\"PART_NUMBER\"]:\n disctag = f\" (Disc {self['PART_NUMBER']}) {self['DISC_NAME']}\"\n else:\n disctag = f\" {self['DISC_NAME']}\"\n tags['disc'] = disctag.rstrip()\n\n # Merge into filename\n filename = f\"{tags['base']}{tags['version']}{tags['disc']}{tags['label']}{ext.WAV}\"\n # Replace invalid characters with either a dash or remove them\n filename = re.compile(\"[<>:/\\\\\\\\]\").sub(\"-\", filename)\n filename = re.compile(\"[|?*]\").sub(\"\", filename)\n # Replace invalid double quotes with valid single quotes\n filename = filename.replace('\"', \"'\")\n\n if directory:\n return os.path.join(directory, filename)\n return filename", "def name(self):\n return self._output_name", "def filename(self, url, default_file = \"index.html\"):\n purl = urlparse(url)\n file_name = purl[1] + purl[2] \n folder_name = (purl[1] + purl[2])\n \n if purl[2] == '':\n folder_name += ('/' + default_file)\n file_name += ('/' + default_file)\n elif purl[2] == '/':\n folder_name += default_file\n file_name += default_file\n elif (purl[2])[-1] == '/':\n file_name += ('/' + default_file)\n\n folder_path = dirname(folder_name)\n \n if not isdir(folder_path): # create archive dir if nec.\n if not exists(folder_path): \n makedirs(folder_path)\n return file_name", "def get_default_config_filename():\n if 'PYWREN_CONFIG_FILE' in os.environ:\n config_filename = os.environ['PYWREN_CONFIG_FILE']\n # FIXME log this\n\n elif os.path.exists(\".pywren_config\"):\n config_filename = os.path.abspath('.pywren_config')\n\n else:\n config_filename = get_default_home_filename()\n\n return config_filename", "def filename(self):\n translator = {ord(\" \"): \"_\", ord(\",\"): None}\n return f'{self._full_name.translate(translator)}.txt'", "def _output_log_path(name):\n output = Path(\"../Raw Data/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(\"000_logging.hdf5\")", "def get_out_file_path(self):\n dir_path = self._get_output_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.FOCUSED_IMAGE_NAME)", "def get_report_file_name(self):\n if os.path.isfile(self.REPORT_FILE_PATH):\n print(\"'{}' is already exist!\".format(self.REPORT_FILE_PATH))\n report_file = self.prompt_report_file_name()\n else:\n report_file = self.REPORT_FILE_PATH\n return report_file", "def _get_station_filename():\n output_dir = os.path.join(output, state, station)\n if not os.path.isdir(output_dir):\n logger.debug(\"Creating directory %s\", output_dir)\n os.makedirs(output_dir)\n return os.path.join(output_dir, \"%s.%s\" % (c_time, format))", "def _get_image_name(self) -> str:\n dirname = os.path.basename(os.getcwd())\n default_image_name = f\"{dirname}_{self.config_name}\"\n image_name = self.config_options.get(\"image\", default_image_name)\n return image_name", "def __add_output_file_location(self, filename):\n # Return the joined path of the output directory and the filename\n return os.path.join(self._output_file_location, filename)", "def outputFilepath(self, filename):\n return os.path.expanduser('~') + '/' + filename", "def update_filename(instance, filename):\n path = os.path.join(\"documents_analizer\", \"documents\")\n name = \"{}{}\".format(highly_random_name(),\n os.path.splitext(filename)[1])\n return os.path.join(path, name)", "def __diff_filename(self):\n diff_dir = os.path.join(self.__folder, Reference.DIFF_OUT)\n if not os.path.exists(diff_dir):\n os.makedirs(diff_dir)\n return os.path.join(diff_dir, self.__name +'.jpg')", "def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\"", "def output_filename(phase, debug=False, append=''):\n suffix = ''\n if append:\n suffix = '_{0}'.format(append)\n\n if debug:\n filename = os.path.abspath(config.output_path + tst_map[phase])\n else:\n filename = os.path.abspath(config.output_path + csv_map[phase])\n\n return filename + suffix + '.csv'", "def out_filename(self, filetype, dir, format='old'):\n filename = self.filename(filetype=filetype, format=format)\n return Path(dir) / filename", "def test_option_format_file_name():\n\toutput_name = format_file_name(test_file, 'test_output.h5m')\n\tassert (output_name == 'test_output.h5m') == True", "def getFileName(self, dialogTitle, defaultExt='txt'):\n filters = ';;'.join((globalref.fileFilters[defaultExt],\n globalref.fileFilters['all']))\n if os.path.basename(self.defaultFilePath):\n self.defaultFilePath = '{0}.{1}'.format(self.defaultFilePath,\n defaultExt)\n filePath = QtGui.QFileDialog.getSaveFileName(QtGui.QApplication.\n activeWindow(),\n dialogTitle,\n self.defaultFilePath, \n filters)\n if filePath and not os.path.splitext(filePath)[1]:\n filePath = '{0}.{1}'.format(filePath, defaultExt)\n return filePath", "def SetOutputFilename(self, filename, output_name=None, output_type=None):\n\n if self._outputs is not None:\n\n for o in self._outputs:\n\n if not output_type is None:\n\n if o.GetType() == output_type:\n\n if output_name is None:\n\n o.SetFilename(filename)\n\n elif o.GetName() == output_name:\n\n o.SetFilename(filename)\n \n elif not output_name is None:\n\n if o.GetName() == output_name:\n \n o.SetFilename(filename)\n \n else:\n\n o.SetFilename(filename)\n else:\n\n print \"No outputs have been loaded\"", "def get_dest_name ( self ):\n return self.filename", "def outputdir():\n return __OUTPUT_DIR__", "def output_files(self):\n # Output file for Moller generation\n if 'moller' in self.name:\n return ['moller.stdhep']\n # Output file for beam generation\n return ['beam.stdhep']", "def filename(self):\n return self.config.get('filename', self.id) + f'_{self.file_suffix}'", "def filename(self):\r\n\t\treturn None", "def get_filename(out_dir, file_date, extension):\n return path.join(out_dir, f'CrossrefCitations_{file_date}.{extension}')", "def _get_filename():\n dirname = os.path.dirname(__file__)\n return os.path.join(dirname, 'occulttraining.txt')", "def outputNamingBase(video, audio):\n outbase = os.path.join(\n outfolder,\n \"{}_{}_{}_{}_{}\".format(\n os.path.splitext(basev)[0],\n '___',\n os.path.splitext(basea)[0],\n '___',\n now,\n ))\n return outbase", "def output_name(self, auxiliary_output=None):\n\n raise Exception(\"Child classes must override output_name()\")", "def get_default_file_path(file_name: str) -> str:\n return join(SOURCE_PATH, 'data', file_name)" ]
[ "0.7893717", "0.7893717", "0.7893717", "0.7893717", "0.7785941", "0.77704704", "0.7656715", "0.7550296", "0.7494922", "0.7245793", "0.71426344", "0.7105859", "0.707106", "0.70194244", "0.7014153", "0.6989266", "0.69176805", "0.69050795", "0.6865408", "0.68430066", "0.6822271", "0.6789395", "0.67867416", "0.6774877", "0.67630327", "0.67469585", "0.6740333", "0.6731416", "0.66897506", "0.6681157", "0.6679765", "0.66714835", "0.66707206", "0.6667888", "0.66504604", "0.6637022", "0.66290224", "0.6627278", "0.6591614", "0.6559124", "0.6557709", "0.65517163", "0.6548581", "0.6517655", "0.65174913", "0.6503199", "0.6497448", "0.649585", "0.64603484", "0.6453475", "0.64505553", "0.64491105", "0.64401984", "0.6425943", "0.642348", "0.64190936", "0.6410587", "0.64105487", "0.6409835", "0.63998836", "0.6374119", "0.63657993", "0.6362626", "0.6354382", "0.63502765", "0.6348109", "0.6343916", "0.63425314", "0.6333502", "0.6331299", "0.6328593", "0.6324529", "0.6306616", "0.6302525", "0.6289549", "0.627526", "0.6265033", "0.62602067", "0.62541366", "0.62477475", "0.62443453", "0.6243304", "0.6242524", "0.62339985", "0.6232365", "0.62303007", "0.6223618", "0.621678", "0.6200701", "0.6197141", "0.6195406", "0.61915946", "0.6187526", "0.617958", "0.6174786", "0.61671555", "0.61633676", "0.6160238", "0.61595047", "0.61539775" ]
0.69034684
18
Ensure this function returns the correct user input file name.
def test_option_format_file_name(): output_name = format_file_name(test_file, 'test_output.h5m') assert (output_name == 'test_output.h5m') == True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_path():\n file_name = input(\"Enter the file name:\")\n return file_name", "def obtain_filename():\n file_wanted = input(\"Filename? \")\n return file_wanted", "def file_name_request(self):\n self.file_name = input(\"What is the name of the input file?\\n>>>\")", "def get_filename():\n filename = input(\"Filename? \")\n while not filename:\n filename = input(\"Filename? \")\n return filename", "def get_input_file():\n\n filename = input('Input the file name to save data to: ') + '.csv'\n return filename", "def get_valid_filename(msg):\r\n\r\n filename = input(msg)\r\n while not os.path.exists(filename):\r\n print(\"That file does not exist.\")\r\n filename = input(msg)\r\n return filename", "def AskForFileName():\n file_doesnot_exsit = True\n file_name = None\n while file_doesnot_exsit:\n try:\n file_name = input(\"What is the name of the input file?\")\n file = open(file_name, 'r')\n file_doesnot_exsit = False\n except FileNotFoundError:\n print(\"File is not found\")\n return file_name", "def get_fileName(path):\n fileName = input('Select data file from ' + ','.join(os.listdir(path)) + ' ')\n return fileName", "def _get_valid_filename(files=None):\n\tfilename = input(\"What would you like to name your new cluster?\\n\"\n\t\t\t\t\t\"[10 characters max, alphanumeric only]\\n\")\n\twhile True:\n\t\tvalid = False # set to fail break condition\n\t\texists = True # set to fail break condition\n\t\tif re.match(\"^[^\\W_]{1,10}$\", filename): # regex for 1 to 10 alphanumeric-only chars\n\t\t\tvalid = True\n\t\tif files: # previous save files exist. Test filename for duplication\n\t\t\texists = _test_filename_existence(filename, files)\n\t\telse: # files=None so no previous save files exist\n\t\t\texists = False\n\t\tif not valid:\n\t\t\tfilename = input(\"Invalid name. Please try another name:\\n\")\n\t\tif exists:\n\t\t\tfilename = input(\"Name already in use. Choose again:\\n\")\n\t\tif valid and not exists: # filename must match regex and must not already exist\n\t\t\tbreak\n\treturn filename", "def get_input_name():\n xlsTypes = [(\"Файлы Excel или csv\", \".xls .xlsx\")]\n return askopenfilenames(initialdir=os.path.abspath(os.getcwd()), filetypes=xlsTypes, title=\"Выберите файлы Excel или CSV\")", "def input_name(inputname):\n\tpath= (os.path.abspath(inputname))\n\treturn (path.split(\"/\")[-1].split(\".\")[0])", "def get_custom_filename(existing_files: List[str]) -> str:\n questions = [\n {\n 'type': 'input',\n 'name': 'custom_filename',\n 'message': 'Name your new sample :',\n 'default': get_default_filename(),\n 'validate': lambda text: (\n (len(re.findall(r'^[A-Za-z0-9_\\-.]{3,40}$', text)) > 0\n and text+'.smp' not in existing_files\n ) or\n 'Typed file name contains illegal characters or already exist'\n )\n }\n ]\n return prompt(questions, style=custom_style_2)['custom_filename']+'.smp'", "def getInputFileBasename(inputFilename = None):\n\n curInputFilename = getInputFilename()\n\n if inputFilename :\n curInputFilename = inputFilename\n\n # print \"curInputFilename=%s\"%(curInputFilename)\n inputBasename = getBasename(curInputFilename)\n # print \"inputBasename=%s\"%(inputBasename)\n return inputBasename", "def prompt_report_file_name(self):\n while True:\n report_file = input(\"Enter name for your report file: \")\n if os.path.isfile(report_file):\n print(\"'{}' is already exist!\".format(report_file))\n else:\n break\n return report_file", "def askInputFile():\n while True:\n print(\"Enter a valid .txt file\")\n # Try until a plain-text file is provided.\n try:\n fileName = easygui.fileopenbox(\"Enter a .txt file\",\n \"Open file\",\n default=\"C:\\\\\",\n filetypes=[\"*.txt\"])\n if fileName == None:\n raise \n except :\n pass\n else:\n return fileName", "def getInputFilename():\n\n argvList = sys.argv\n # print \"argvList=%s\"%(argvList)\n return argvList[0]", "def get_existing_filename(existing_files: List[str]) -> str:\n\n # Ask user which file only if there are multiple files\n\n if len(existing_files) == 1:\n return existing_files[0]\n\n questions = [\n {\n 'type': 'list',\n 'name': 'target_filename',\n 'message': 'Which file do you want to load ?',\n 'choices': existing_files\n }\n ]\n return prompt(questions, style=custom_style_2)[\"target_filename\"]", "def get_new_user_fname():\n fname = input(\"First name: \")\n menu.option_to_exit(fname)\n try:\n if len(fname) < 2 or len(fname) > 25:\n raise ValueError\n except ValueError:\n print(\"First name must be 3–25 characters.\\n\"\n \"Please try again.\\n\")\n return get_new_user_fname()\n else:\n return fname", "def GetInputFilename(fname):\n if not indir or fname[:1] == '/':\n return fname\n for dirname in indir:\n pathname = os.path.join(dirname, fname)\n if os.path.exists(pathname):\n return pathname\n\n raise ValueError(\"Filename '%s' not found in input path (%s) (cwd='%s')\" %\n (fname, ','.join(indir), os.getcwd()))", "def askFilename():\n# print(\"\\nDo you have the file already?\"+\n# \"\\nYes - proceed\\t\\t No - go back to main menu\")\n# choice = input(\"(Y/N) \")\n# if choice.upper() == \"N\":\n# filename = None\n# elif choice.upper() == \"Y\": \n print(\"\\nInsert file name (without the filetype)\")\n print(\"(PRESS CTRL+C IF THERE IS NO FILE YET!!)\")\n fileOpt = input(\"or press enter if saved on default name: \") \n if fileOpt != \"\":\n filename = fileOpt+\".txt\"\n else:\n print(\"\\n\\nFinding file...\")\n print(\"\\n\\nWhich party is it for?\")\n print(\"A. Labor\\t\\t B. Liberal\")\n partyOpt = input(\"Selected party is (A/B): \")\n list1 = [\"A\", \"B\"]\n while partyOpt.upper() not in list1:\n partyOpt = input(\"Selected party is (A/B): \")\n marginOpt = input(\"\\nWhat was the margin used? (enter as int) \")\n if partyOpt.upper() == \"A\":\n filename = \"LaborParty_MarginalSeatList\"+str(marginOpt)+\"%.txt\"\n elif partyOpt.upper() == \"B\":\n filename = \"LiberalParty_MarginalSeatList\"+str(marginOpt)+\"%.txt\"\n return filename", "def _get_fname(self, input_fname):\n # Check whether input_fname exists.\n if not os.path.isfile(input_fname):\n # Check to see whether the uncompressed version is available instead\n if not os.path.isfile(input_fname[:-3]):\n msg = \"Input filename %s is not a file\"\n raise IOError(msg % input_fname)\n else:\n msg = (\n \"Input filename ``%s`` is not a file. \\n\"\n \"However, ``%s`` exists, so change your input_fname accordingly.\"\n )\n raise IOError(msg % (input_fname, input_fname[:-3]))\n\n return _passively_decode_string(os.path.abspath(input_fname))", "def reasonable_file_name(filename):\n\tif filename == '-':\n\t\traise ValueError(\"Not a valid file name: \" + filename)\n\treturn filename", "def filename(self) -> Optional[str]:\n ...", "def _getfilename(self):\n pass", "def get_filename():\n filename = None\n while filename is None:\n filename = input(\"Enter a data file name: \")\n if not os.path.isfile(filename): #if the file doesn't exist\n print(\"Invalid File Name Entered!\")\n filename = None\n \n infile = open(filename)\n lines = infile.readlines()\n infile.close()\n return (lines, filename)", "def _safe_file_name(self):\n FMT_STR = \"%s - %s - %s (%d) - %s%s\"\n return cleanse_filename(FMT_STR % (self.track,\n self.artist.replace(\"/\", \"\\\\\"),\n self.album.replace(\"/\", \"\\\\\"),\n self.year,\n self.title.replace(\"/\", \"\\\\\"),\n os.path.splitext(self.file_name)[1]))", "def get_valid_name(self, name):\n return get_valid_filename(name)", "def _get_file_name(self) -> str:\n headers = self._get_url_http_headers()\n file_type = self._check_url_file_type(headers)\n file_name = self._get_filename_from_headers(headers)\n\n if not file_name:\n file_name = self._get_filename_from_url()\n\n if file_name is None:\n raise FileNameCannotBeEvaluatedException\n\n if file_type:\n file_name = self._add_file_extension(file_name, file_type)\n\n return file_name", "def _input_path() -> str:\n path_string = input('Path:').strip()\n return path_string", "def setFinalFileName(self, offset=0):\n while True:\n tempFinalFileName = input(\n offset * \" \" + \"Specify a final file name [%s]: \" % self.finalFileName) or self.finalFileName\n # Check 'tempFinalFileName' for bad formatting\n if len(re.findall(r'[^A-Za-z0-9_-]', tempFinalFileName)) == 0:\n # If no illegal characters then change finalFileName\n self.finalFileName = tempFinalFileName\n break\n else:\n print(\"Invalid file name!\")", "def get_file():\n # Main Loop\n while True:\n filename = input(\"Please enter the name of the file you want to work on: \")\n # Check if file exists...\n if path.exists(filename):\n print(\"File sucessfully retrieved. Returning to previous menu...\")\n print()\n return filename\n \n print(\"That file does not exist in your current directroy. Try again.\")\n print()", "def get_filename(s):\n return(os.path.expanduser(s))", "def _get_filename():\n dirname = os.path.dirname(__file__)\n return os.path.join(dirname, 'occulttraining.txt')", "def _get_filename_from_dialog(file_type):\n\n if file_type is 'res':\n caption = 'Select a results file.'\n filter = 'Adams Results Files (*.res)'\n # Bring up a dialog for the user to select a results file\n filename = PyQt4.QtGui.QFileDialog.getOpenFileName(caption=caption, filter=filter)\n\n elif file_type is 'csv':\n caption='Select location to save the csv results file.'\n filter='CSV Files (*.csv)'\n # Bring up a dialog for the user to select a results file\n filename = PyQt4.QtGui.QFileDialog.getSaveFileName(caption=caption, filter=filter) \n\n return filename", "def askopenfilename():\r\n file_opt = options = {}\r\n options['defaultextension'] = '.csv'\r\n options['filetypes'] = [('all files', '.*'), ('csv files', '.csv')]\r\n options['initialdir'] = os.getcwd()\r\n options['initialfile'] = 'profile.csv'\r\n options['title'] = 'choose file'\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**file_opt)\r\n\r\n # open file on your own\r\n return filename", "def get_report_file_name(self):\n if os.path.isfile(self.REPORT_FILE_PATH):\n print(\"'{}' is already exist!\".format(self.REPORT_FILE_PATH))\n report_file = self.prompt_report_file_name()\n else:\n report_file = self.REPORT_FILE_PATH\n return report_file", "def getInputFileBasenameNoSuffix():\n\n inputFileBasename = getInputFileBasename()\n basenameRemovedSuffix = removeSuffix(inputFileBasename)\n return basenameRemovedSuffix", "def get_filename_as_agrv_if_no_ask(prompt):\n Found = False\n ln = len(sys.argv)\n while not Found:\n if ln < 2:\n file = input( prompt)\n else:\n file = sys.argv[1]\n try:\n RFH = open(file)\n Found = True\n except FileNotFoundError:\n print(\"%%Error! File not found!\")\n ln = 1\n# break\n return RFH", "def display():\r\n name = input(\"Enter the filename:\\n\")\r\n if name==\"42.txt\":\r\n print(f42)\r\n elif name == \"1015.txt\":\r\n print(f1015)\r\n else:\r\n print(\"File not found\")", "def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\"", "def get_existing_filename_or_die(self, key) -> str:\n filename = self.get_or_default(key, None)\n if filename is None:\n print(\"Error, '\" + key + \"' is required.\")\n sys.exit(1)\n elif not os.path.isfile(filename):\n print(\"'\" + str(filename) + \"' is not a file.\")\n sys.exit(1)\n else:\n return filename", "def get_file_name(self):\n return str(self.get_file())", "def GetInputPath(self):\n self.inputDir = raw_input(\"Where should files be read from? This can be a file or a folder of files\\n\\r>>> \")\n if os.path.isabs(self.inputDir):\n if os.path.isdir(self.inputDir):\n self.isFolder = True\n self.inputDirs = os.listdir(self.inputDir)\n elif os.path.isfile(self.inputDir):\n self.isFolder = False\n self.inputDirs = [self.inputDir]\n else:\n print \"That path does not exist. Try again\"\n self.GetInputPath()\n else:\n print \"that was not an excepted path name. Try again.\"\n self.GetInputPath()", "def get_valid_path(file_path: Path, prompt_title: str=\"PATH TO FILE\") -> Path:\n\n print(f\"{Color.EMPHASIS}{prompt_title}{Color.END}\")\n while True:\n if file_path.exists() and file_path.is_file():\n return file_path\n else:\n file_path = Path(input(f\"{Color.INFORMATION}Enter the file's path: {Color.END}\"))", "def users_filename(self):\n pass", "def _filename(self):\n logger.debug(\"Popping Filename browser\")\n return filedialog.askopenfilename(**self._kwargs)", "def AskSaveasFilename(self, title='Save File', filetypes=None, initialfile=''):\n if filetypes==None:\n filetypes = [\n ('CSV File','*.csv'),\n ('Any File','*.*')]\n fileName = FileDialog.asksaveasfilename(parent=self.tk_root, filetypes=filetypes, initialfile=initialfile ,title=title)\n return fileName", "def get_sample_name(self):\n\n if self.have_metadata is False:\n self._get_metadata()\n self.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['context_tags'].attrs['user_filename_input']\n\t\texcept Exception, e:\n\t\t\treturn None", "def input_name_from_func_name(func_name):\n\treturn os.path.join(INPUTS_DIR, ''.join(func_name.split('make_')[1:])) \\\n\t\t\t+ '.%s' % EXTENSION", "def generate_filename(self, filename):\n filename = str(filename).replace(\"\\\\\", \"/\")\n # `filename` may include a path as returned by FileField.upload_to.\n dirname, filename = os.path.split(filename)\n if \"..\" in pathlib.PurePath(dirname).parts:\n raise SuspiciousFileOperation(\n \"Detected path traversal attempt in '%s'\" % dirname\n )\n return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename)))", "def handle_file_name(self):\r\n self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]\r\n result_name = self.tmp_name + '_result_'\r\n log_name = self.tmp_name + '_log.csv'\r\n \r\n self.result_file_name = os.path.join(self.save_path , result_name) \r\n self.log_file_name = os.path.join(self.log_path , log_name)", "def get_output_file_name(argn=2, std_name='output.txt'):\n try:\n name = sys.argv[argn]\n except IndexError:\n name = std_name\n print(\"Warning: no output file name received. Output will be\"\n \" written to '%s'.\" % name)\n return name", "def test_default_format_file_name():\n\toutput_name = format_file_name(test_file)\n\tassert (output_name == test_file[:-4] + \"_no_grave.h5m\") == True", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))", "def getFileName(self, dialogTitle, defaultExt='txt'):\n filters = ';;'.join((globalref.fileFilters[defaultExt],\n globalref.fileFilters['all']))\n if os.path.basename(self.defaultFilePath):\n self.defaultFilePath = '{0}.{1}'.format(self.defaultFilePath,\n defaultExt)\n filePath = QtGui.QFileDialog.getSaveFileName(QtGui.QApplication.\n activeWindow(),\n dialogTitle,\n self.defaultFilePath, \n filters)\n if filePath and not os.path.splitext(filePath)[1]:\n filePath = '{0}.{1}'.format(filePath, defaultExt)\n return filePath", "def get_local_filename_arg(self):\n\ttry:\n\t arg = sys.argv[2]\n\t local_filename = str(arg) \n\texcept IndexError:\n\t print \"Please provide the name under which the received file is to be stored locally.\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\")\n\telse:\n\t return local_filename", "def askopenfilename():\n\n file_opt = options = {}\n options['defaultextension'] = '.*'\n options['initialdir'] = 'User\\\\'\n options['initialfile'] = ''\n options['parent'] = root\n options['title'] = 'choose file'\n options['multiple'] = 1\n\n # get filename\n filename = tk.filedialog.askopenfilename(**file_opt)\n\n if filename:\n self.sourcefile = filename\n if len(filename) is 1:\n file_path_var.set(filename)\n else:\n file_path_var.set(\n \"Multiple files, including {}\".format(filename[0]))", "def filename(self):\n translator = {ord(\" \"): \"_\", ord(\",\"): None}\n return f'{self._full_name.translate(translator)}.txt'", "def is_valid_file(input_file):\n if not os.path.isfile(input_file):\n print('File \\'{}\\' not found.'.format(input_file))\n exit(1)\n return input_file", "def getDefaultOutputFileName(inputFileName):\n baseName = os.path.basename(inputFileName)\n rootName = os.path.splitext(baseName)[0]\n return string.join([rootName, \"xls\"], '.')", "def filename(self):\r\n\t\treturn None", "def _is_valid_file(arg: str) -> str:\n if not os.path.exists(arg):\n raise FileNotFoundError(\"%s does not exist\" % arg)\n return arg", "def get_filename(self):\n \n for f in os.listdir(self.get_directory()):\n if os.path.isfile(os.path.join(self.get_directory(), f)):\n return f\n \n return None", "def get_file_name(self):\n return self.path.name[6:]", "def callback_Save():\n saving_msg = 'Save Bioprocess As:'\\\n '\\n(will save in processes/ by default)'\n fileName = sg.popup_get_text(saving_msg, 'File Saver')\n\n if fileName:\n # read filename and add default path\n fileName = fileName.strip(' ')\n\n # if user does not input a fileName\n elif fileName is None:\n fileName = 'cancel'\n elif fileName == '':\n fileName = 'exit'\n\n return fileName", "def input_file(path): \n if path is None : \n return None\n else : \n path = os.path.abspath(path)\n if not os.path.exists(path):\n raise IOError('File {} does not exists.'.format(path))\n return path", "def get_file_name(self):\n return self.upload.name[6:]", "def get_file_name(self):\n return self.file_name", "def generate_filename(player_name):\n name = player_name.split()\n filename = '_'.join(name).lower()\n return filename", "def get_filename(filename):\n \n return utilities.get_filename(filename)", "def get_rad_filename(input_file):\n # replace PSV with RAD\n (path, filename) = os.path.split(input_file)\n rad_filename = filename.replace('psv', 'rad')\n rad_filename = rad_filename.replace('PSV', 'RAD')\n rad_file = os.path.join(path, rad_filename)\n\n # rename to .TAB from .TXT in case of raw input\n rad_file = rad_file.replace('.TXT', '.tab')\n rad_file = rad_file.replace('.txt', '.tab')\n\n return rad_file", "def get_original_file_name(cleanup_event):\n original_name = None\n if not cleanup_event:\n return\n try:\n original_name = cleanup_event.event_outcome_detail.split(\";\")[0]\n except IndexError:\n logger.info(\n \"name and version of the file format tool %s could not be\"\n \"determined. Check if it is well formed\",\n cleanup_event.event_outcome_detail,\n )\n return original_name", "def get_artwork_name():\n artwork_name = input('Please enter title of artwork: ')\n if not controls_utils.artwork_name_is_unique(artwork_name):\n return artwork_name\n else:\n print('artwork not found')", "def temp_name(self, filename):\n if self.params.get('nopart', False) or filename == '-' or \\\n (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):\n return filename\n return filename + '.part'", "def GetFileName():\r\n d = GetData()\r\n return d.filename", "def _what_is_filename(self):\n found = False\n\n while not found:\n prompt = \"-?- What file to send -> \"\n fn = self._input(prompt)\n found = FileTools().does_file_exist(fn)\n fs = FileTools().what_is_filesize(fn)\n return fn, fs", "def getDefaultOutputFileName(inputFileName):\n\tbaseName = os.path.basename(inputFileName)\n\trootName = os.path.splitext(baseName)[0]\n\treturn string.join([rootName, \"xls\"], '.')", "def getFileName(self, textEntry):\n textEntry.setText(QtGui.QFileDialog.getOpenFileName())\n textEntry.emit(QtCore.SIGNAL('FILE_SELECTED'))", "def clean_data_name(data_name, filename):\n if data_name is None or data_name == '':\n data_name = os.path.splitext(os.path.basename(filename))[0]\n return data_name", "def construct_path(self, str_input):\n\n str_result = str_input[len('\\input{'):-1]\n str_result = self.publication_path + str_result\n\n #print '[i] constructed path {}'.format(str_result)\n\n if str_result[-3:] != 'tex':\n str_result = str_result + '.tex'\n return str_result", "def input_read_filename(self, register: str, filename: str):\n full_path = _root_dir / filename\n if not full_path.is_file():\n print(f\"WARN: Input file {full_path} does not exist.\")\n self._input_filename(register, filename)", "def valid_file_name(s: str) -> str:\n global UNICODE_FILENAMES, FILESYSTEM_IS_LINUX\n if FILESYSTEM_IS_LINUX:\n s = fix_linux.sub(\"_\", s)\n else:\n s = fix_windows.sub(\"_\", s)\n s = fix_windows_ending.split(s)[0]\n\n if not UNICODE_FILENAMES:\n s = fix_unicode.sub(\"_\", s)\n return s", "def test_choose_file_name(self):\n\t\ttp = self.sess.query(sql.Post).filter(sql.Post.title == 'test').first()\n\t\tfile = ng.choose_file_name(tp.urls[0], tp, sql.session(), album_size=1)\n\t\tself.assertEqual('aww/test - (testuser)', file, msg='Failed to convert basic Test post!')", "def get_path():\n\n output_path = None\n while output_path is None:\n print question + \"Please enter the directory where you would like the file saved?\"\n output_path = raw_input()\n if os.path.isdir(os.path.expanduser(output_path)):\n pass\n else:\n os.system('clear')\n print warn + \"%s is not valid, please try again: \" % str(output_path)\n output_path = None\n return os.path.expanduser(output_path)", "def test_sanitized_filename(self):\n value = \"/absolute/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"\n\n value = \"../relative/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"", "def get_fname(self):\n # When you call getOpenPathName, a path picker dialog is created\n # and if the user selects a path, it's path is returned, and if not\n # (ie, the user cancels the operation) None is returned\n fname = QtGui.QFileDialog.getExistingDirectory(self, \"Select Path\")\n if fname:\n self.lbl.setText(fname)\n else:\n self.lbl.setText('No path selected')", "def input_path(self, filename):\n\n return os.path.abspath(os.path.expanduser(os.path.join(self.input_dir, filename)))", "def filename(self):\n return valid_filename(\n \"%s%s by %s.epub\" %\n (self.prefix, self.title, self.author))", "def update_filename(instance, filename):\n path = os.path.join(\"documents_analizer\", \"documents\")\n name = \"{}{}\".format(highly_random_name(),\n os.path.splitext(filename)[1])\n return os.path.join(path, name)", "def extract_file_name(self, input_file):\n self.file_name_with_ext, self.file_name = extract_file_name(input_file)", "def _retrieve_filename(file: Union[str, FileStorage]) -> str:\n if isinstance(file, FileStorage):\n return file.filename\n return file", "def get_filename(self, base_filename: str) -> str:\n folder = self.prepare_folder()\n i = 0\n cartridge_number = self.config['info']['cartridge_number']\n while os.path.isfile(os.path.join(folder, base_filename.format(\n cartridge_number=cartridge_number,\n i=i))):\n i += 1\n\n return os.path.join(folder, base_filename.format(cartridge_number=cartridge_number, i=i))", "def uploaded_paper_name(paper, filename):\n initial = 'uploads/'\n name = paper.name + '-paperbank' + '.pdf'\n\n return initial + name", "def askOutputFile():\n while True:\n print(\"Save the final file\")\n # Try until the final file is saved.\n try:\n fileName = easygui.filesavebox(\"Save your file\",\n \"Save the file\",\n default=\"C:\\\\DefaultFile.txt\",\n filetypes=[\"*.txt\"])\n if fileName == None:\n raise\n except:\n pass\n else:\n return fileName", "def input_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._input_path_var.set(filename)", "def format_filename(self, s):\n valid_chars = \"-_ %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ', '_') # I don't like spaces in filenames.\n return filename", "def send_file_name():\n if value.get() == \"----------------------\":\n messagebox.showinfo(\"Choose File\", \"Please choose a file to edit.\", parent=app_frame)\n return\n elif len(entries) != 0:\n messagebox.showinfo(\"Warning!\", \"You must first close the current file!\", parent=app_frame)\n return\n\n events = get_file(value.get())\n # Call display_lr_assignments() and send events file to be displayed in the application window\n display_lr_assignments(events)", "def getSafeFilename(untrustedFilename: unicode) -> unicode:\n ...", "def prompt_user():\n print('Please insert image direcory:')\n inpt = input('> ')\n return inpt", "def check_input_file(value):\n if not os.path.exists(value):\n raise argparse.ArgumentTypeError(f'Input file `{value}` does not exist')\n\n return value", "def get_date_input_file(file: str) -> str:\n # check format\n if not match_input_files(file):\n raise Exception(\"Not valid input file format\")\n\n else:\n date = result = re.search(r\"input_(.*)\\.feather\", file)\n return date.group(1)" ]
[ "0.8286607", "0.8252012", "0.80501723", "0.76167345", "0.7578909", "0.74168056", "0.73490226", "0.7260431", "0.72341657", "0.723208", "0.7181744", "0.71346575", "0.70963013", "0.7040597", "0.70244354", "0.7022156", "0.6870741", "0.68621856", "0.68099684", "0.6801889", "0.672843", "0.66927975", "0.66684014", "0.66645026", "0.6645343", "0.65987074", "0.658073", "0.65658194", "0.65625185", "0.65475535", "0.6508533", "0.6490562", "0.6487085", "0.6484191", "0.64804894", "0.64779687", "0.64777434", "0.6416631", "0.6409543", "0.6394178", "0.6386469", "0.63677526", "0.6343119", "0.6339003", "0.63363636", "0.63350827", "0.6316884", "0.63088834", "0.6304797", "0.6297282", "0.6287421", "0.6278721", "0.6268972", "0.6250107", "0.6249588", "0.6245491", "0.6244111", "0.6235244", "0.6222256", "0.62217325", "0.6207675", "0.61950856", "0.6193523", "0.6175095", "0.6172391", "0.61722434", "0.6171619", "0.6166221", "0.6164479", "0.61467445", "0.61440843", "0.6139694", "0.613781", "0.61251205", "0.6124222", "0.6122806", "0.6116967", "0.61148345", "0.61143035", "0.6112483", "0.61095357", "0.6109029", "0.60935235", "0.60914415", "0.60896343", "0.60883147", "0.6085378", "0.60837466", "0.6074994", "0.607165", "0.6070665", "0.6067293", "0.60647374", "0.60526097", "0.6044541", "0.6039441", "0.60288256", "0.6028268", "0.6025465", "0.6024851", "0.602244" ]
0.0
-1
Ensure that graveyard_removal.py correctly removes the graveyard from an h5m file.
def test_default_graveyard_removal(): os.system("python svalinn_tools/graveyard_removal.py " + test_file_path + test_file) size = os.path.getsize(test_file[:-4] + "_no_grave.h5m") assert size == 5748780
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cleanup():\n os.remove(test_file[:-4] + \"_no_grave.h5m\")", "def remove_group(self):\n try:\n with open_hdf5(self.file_name, mode=\"a\") as hdf_file:\n del hdf_file[self.h5_path]\n except KeyError:\n pass", "def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)", "def test_print_graveyard_removal(capfd):\n os.system(\"python svalinn_tools/graveyard_removal.py \" + test_file_path + test_file + \" -p\")\n out, err = capfd.readouterr()\n assert (\"12682136550675318127\" in out) == True", "def __del__(self):\n self.h5file.close()", "def test_locate_graveyard():\n groups_to_write, graveyard_sets = locate_graveyard(mb)\n assert groups_to_write == [12682136550675318125, 12682136550675318126,\n 12682136550675318128, 12682136550675318129]", "def __delitem__(self, key):\n if self.file_exists:\n try:\n with open_hdf5(self.file_name, mode=\"a\") as store:\n del store[self._get_h5_path(key)]\n except (AttributeError, KeyError):\n pass", "def tearDown(self):\n\n self.h5file.close()\n self.h5file = None\n Path(self.h5fname).unlink() # comment this for debug only\n super().tearDown()", "def test_d_remove_database(self):\n\n if os.path.isfile(location):\n os.remove(location)\n\n assert(True)", "def remove_file_from_cache(self, md5_hash):\n self.used_space -= len(self.storage[md5_hash])\n self.storage.pop(md5_hash)\n self.remove_from_usage_queue(md5_hash)", "def test_exc(self):\n g = h5g.open(self.fid, '/')\n g._close()\n self.assertEqual(h5i.get_type(g), h5i.BADID)", "def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()", "def remove():", "def delFiles(self):\r\n \r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n if os.path.exists(self.h5File): \r\n os.remove(self.h5File) \r\n logger.debug(\"{0:s} File {1:s} deleted.\".format(logStr,self.h5File)) \r\n except XmError:\r\n raise \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def clean_database(databasePathname):\n print '# loading database ' + databasePathname\n try:\n db = gdbm.open(databasePathname, 'w')\n except:\n print \"# \" + databasePathname + \" could not be loaded\"\n sys.exit(-1)\n\n # even though gdbm supports memory efficient iteration over\n # all keys, I want to order my traversal across similar\n # paths to leverage caching of directory files:\n allKeys=db.keys()\n print '# finished loaded keys from ' + databasePathname\n allKeys.sort()\n print '# finished sorting keys from ' + databasePathname\n print '# deleting dead nodes'\n count=0\n for currKey in allKeys:\n try:\n os.stat(currKey)\n sys.stdout.write('.')\n except OSError:\n del db[currKey]\n sys.stdout.write('*')\n count=count+1\n sys.stdout.flush()\n print \"\\n# reorganizing \" + databasePathname\n db.reorganize()\n db.sync()\n db.close()\n print '# done cleaning ' + databasePathname + ', removed ' + str(count) + ' dead nodes!'", "def purge_outdated(self):\n todelete = []\n sql = \"select rowid, path, mtime from pictures\"\n cur = self.con.execute(sql)\n for rowid, path_str, mtime in cur:\n if mtime and op.exists(path_str):\n picture_mtime = os.stat(path_str).st_mtime\n if int(picture_mtime) <= mtime:\n # not outdated\n continue\n todelete.append(rowid)\n if todelete:\n sql = \"delete from pictures where rowid in (%s)\" % ','.join(map(str, todelete))\n self.con.execute(sql)", "def verify_no_snapshot_reingestion(c: Composition) -> None:\n c.run(\"testdrive\", \"wait-for-snapshot.td\", \"postgres-disable-select-permission.td\")\n\n restart_mz(c)\n\n c.run(\n \"testdrive\",\n \"delete-rows-t1.td\",\n \"delete-rows-t2.td\",\n \"alter-table.td\",\n \"alter-mz.td\",\n )", "async def process_fixup(self, reference: Optional[str] = None) -> None:\n full_snapshots = [\n x for x in self.sys_snapshots.list_snapshots if x.sys_type == SNAPSHOT_FULL\n ]\n\n if len(full_snapshots) < MINIMUM_FULL_SNAPSHOTS:\n return\n\n _LOGGER.info(\"Starting removal of old full snapshots\")\n for snapshot in sorted(full_snapshots, key=lambda x: x.date)[:-1]:\n self.sys_snapshots.remove(snapshot)", "def test_removal_mount_dependency(self):\n from chroma_core.models import ManagedMgs\n\n self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, \"mounted\")\n try:\n # Make it so that the mount unconfigure operations will fail\n MockAgentRpc.succeed = False\n\n # -> the TargetMount removal parts of this operation will fail, we\n # want to make sure that this means that Target deletion part\n # fails as well\n self.set_and_assert_state(self.mgt.managedtarget_ptr, \"removed\", check=False)\n\n ManagedMgs.objects.get(pk=self.mgt.pk)\n self.assertNotEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, \"removed\")\n finally:\n MockAgentRpc.succeed = True\n\n # Now let the op go through successfully\n self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, \"removed\")\n with self.assertRaises(ManagedMgs.DoesNotExist):\n ManagedMgs.objects.get(pk=self.mgt.pk)\n self.assertEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, \"removed\")", "def test_remove_file_group0(self):\n with copy_of_directory(assets.path_to('SBB0000F29300010000/data')) as tempdir:\n mets = OcrdMets(filename=join(tempdir, 'mets.xml'))\n self.assertEqual(len(mets.file_groups), 17)\n self.assertEqual(len(mets.find_all_files()), 35)\n # print()\n # before = sorted([x.ID for x in mets.find_all_files()])\n with self.assertRaisesRegex(Exception, \"not empty\"):\n mets.remove_file_group('OCR-D-GT-ALTO')\n mets.remove_file_group('OCR-D-GT-PAGE', recursive=True)\n # print([x for x in before if x not in sorted([x.ID for x in mets.find_all_files()])])\n self.assertEqual(len(mets.file_groups), 16)\n self.assertEqual(len(mets.find_all_files()), 33)", "def remove():\n\n db_remove()", "def removeStaleLVM():\n log.debug(\"waiting 5s for activation of stale lvm on new md array %s\", self.path)\n time.sleep(5)\n udev.settle()\n\n try:\n pv_info = lvm.pvinfo(device=self.path)[self.path]\n except (errors.LVMError, KeyError) as e:\n return\n\n vg_uuid = None\n try:\n vg_uuid = udev.device_get_vg_uuid(pv_info)\n except KeyError:\n return\n\n if vg_uuid:\n log.info(\"removing stale LVM metadata found on %s\", self.name)\n try:\n lvm.vgremove(None, vg_uuid=vg_uuid)\n except errors.LVMError as e:\n log.error(\"Failed to remove stale volume group from newly-created md array %s: %s\",\n self.path, str(e))\n raise", "def test_removedFile(self):\n self.write(\"service1.json\", [{\"host\": \"host1\", \"port\": 123},\n {\"host\": \"host2\", \"port\": 124}])\n self.pump()\n self.remove(\"service1.json\")\n self.pump()\n self.assertNodesEqual(knownNodes(self.disco, \"service1\", \"staging\"), [])", "def _clean_up(self):", "def removedb():\n\n try:\n os.remove(rebasedb)\n except OSError:\n pass", "def check_hdf5_files(database):\n\n logger.info(\" Checking dataset Integrity\")\n remove_file = []\n for fname in database:\n try:\n f = h5py.File(fname, 'r')\n mol_names = list(f.keys())\n if len(mol_names) == 0:\n warnings.warn(' -> %s is empty ' % fname)\n remove_file.append(fname)\n f.close()\n except BaseException:\n warnings.warn(' -> %s is corrputed ' % fname)\n remove_file.append(fname)\n\n for name in remove_file:\n database.remove(name)\n if remove_file:\n logger.info(f'\\t -> Empty or corrput databases are removed:\\n'\n f'{remove_file}')\n\n return database", "def tearDownClass(cls):\n path = os.path.join(os.path.dirname(os.path.dirname(rmgpy.__file__)),\n 'examples', 'arkane', 'species')\n cls.dump_path = os.path.join(path, 'C2H6')\n cls.load_path = os.path.join(path, 'C2H6_from_yaml')\n cls.extensions_to_delete = ['pdf', 'txt', 'inp', 'csv']\n cls.files_to_delete = ['arkane.log', 'output.py']\n cls.files_to_keep = ['C2H6.yml']\n for path in [cls.dump_path, cls.load_path]:\n for name in os.listdir(path):\n item_path = os.path.join(path, name)\n if os.path.isfile(item_path):\n extension = name.split('.')[-1]\n if name in cls.files_to_delete or \\\n (extension in cls.extensions_to_delete and name not in cls.files_to_keep):\n os.remove(item_path)\n else:\n # This is a sub-directory. remove.\n shutil.rmtree(item_path)", "def database_maintenance():\r\n\r\n logging.debug('database_maintenance()')\r\n\r\n # Check datgabase\r\n all_imagepaths = get_all_images_from_database()\r\n for imagepath in all_imagepaths:\r\n if not os.path.isfile(imagepath):\r\n delete_image_from_database(imagepath)\r\n logging.debug('database_maintenance() - image not in folder, deleted')\r\n\r\n # Check temporary folder\r\n all_imagepaths = get_all_images_from_filesystem()\r\n for imagepath in all_imagepaths:\r\n if not exists_image_in_database(imagepath):\r\n delete_image_from_database(imagepath)\r\n logging.debug('database_maintenance() - image not in database, deleted')", "def test_remove_orphaned_metadata(self):\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n 'orphaned.html.ini'),\n '[orphaned.html]\\n')\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt',\n 'infrastructure', 'metadata',\n 'testdriver.html.ini'),\n '[testdriver.html]\\n')\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n '__dir__.ini'), 'expected: FAIL\\n')\n with self._patch_builtins():\n manifests = load_and_update_manifests(self.finder)\n self.command.remove_orphaned_metadata(manifests)\n self.assertFalse(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n 'orphaned.html.ini')))\n self.assertTrue(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n '__dir__.ini')))\n self.assertTrue(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt',\n 'infrastructure', 'metadata',\n 'testdriver.html.ini')))", "def deleteShards():\n os.popen('rm *_shard')", "def clean_db():\n yest = datetime.utcnow() - timedelta(days=1)\n try:\n Load.query.filter(Load.time < yest).delete()\n db.session.commit()\n except:\n db.session.rollback()", "def remove_from_usage_queue(self, md5_hash):\n self.usage_queue.remove(md5_hash)", "def _removeFX(self):\r\n\t\tnodesToClean = [CONST.FOAM_FLUID_SHAPENODE, CONST.WAKE_FLUID_SHAPENODE, 'fluids_hrc']\r\n\t\tfor eachNode in nodesToClean:\r\n\t\t\ttry:\r\n\t\t\t\tcmds.delete(each)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\tfor eachCache in cmds.ls(type = 'cacheFile'):\r\n\t\t\tcmds.delete(eachCache)", "def _check_final_md5(self, key, etag):\r\n if key.bucket.connection.debug >= 1:\r\n print 'Checking md5 against etag.'\r\n if key.md5 != etag.strip('\"\\''):\r\n # Call key.open_read() before attempting to delete the\r\n # (incorrect-content) key, so we perform that request on a\r\n # different HTTP connection. This is neededb because httplib\r\n # will return a \"Response not ready\" error if you try to perform\r\n # a second transaction on the connection.\r\n key.open_read()\r\n key.close()\r\n key.delete()\r\n raise ResumableUploadException(\r\n 'File changed during upload: md5 signature doesn\\'t match etag '\r\n '(incorrect uploaded object deleted)',\r\n ResumableTransferDisposition.ABORT)", "def test_modify_storage_group_snapshot_unlink(self):\n if self.is_v4:\n self.skipTest(\n 'Modify storage group snapshot unlink by generation does '\n 'not work on the V4.')\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n self.replication.modify_storage_group_snapshot(\n src_storage_grp_id=sg_name, tgt_storage_grp_id=target_sg,\n snap_name=snap_name, gen_num=0, link=True)\n linked_snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_linked=True)\n self.assertTrue(linked_snap_details.get('isLinked'))\n self.replication.modify_storage_group_snapshot(\n src_storage_grp_id=sg_name, tgt_storage_grp_id=target_sg,\n snap_name=snap_name, gen_num=0, unlink=True)\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_unlinked=True)\n self.assertFalse(snap_details.get('isLinked'))\n self.provisioning.delete_storage_group(storage_group_id=target_sg)", "def remove(self, egg):", "def teardown_class(self):\n\n file_list = \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*') + HEN_FILE_EXTENSION) + \\\n glob.glob(os.path.join(self.datadir,\n '*lcurve*') + HEN_FILE_EXTENSION) + \\\n glob.glob(os.path.join(self.datadir,\n '*lcurve*.txt')) + \\\n glob.glob(os.path.join(self.datadir,\n '*.log')) + \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*.dat')) + \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*.png')) + \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*.txt')) + \\\n glob.glob(os.path.join(self.datadir,\n 'monol_test_fake*.evt')) + \\\n glob.glob(os.path.join(self.datadir,\n 'bubu*'))\n for f in file_list:\n print(\"Removing \" + f)\n os.remove(f)", "def clean_up_old_exports(self):\n threshold = datetime.datetime.utcnow() - datetime.timedelta(days=30)\n self.session.query(Export).filter(Export.started_at < threshold).delete()", "def delete(self):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.delete()\n\t\telse:\n\t\t\tsuper( textureFile, self ).delete()", "def clean_chunk_files(dirpath):\n workdir = os.getcwd()\n os.chdir(dirpath)\n for filename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n os.remove(filename)\n os.chdir(workdir)", "def delete(self, host, file):", "def death_check(self):\r\n chance = random.random()\r\n if decimal.Decimal(chance) < decimal.Decimal(self.death_rate):\r\n if self.unique_id in head_of_household_list:\r\n try:\r\n head_of_household_list[self.hh_id] = 0\r\n except TypeError: # head of household migrated\r\n head_of_household_list[self.past_hh_id] = 0\r\n self.model.number_of_humans -= 1\r\n if self.unique_id in labor_list:\r\n labor_list.remove(self.unique_id)\r\n if self.work_status == 1:\r\n try:\r\n num_labor_list[self.hh_id] -= 1\r\n except TypeError:\r\n num_labor_list[self.past_hh_id] -= 1\r\n if self.unique_id in former_hoh_list:\r\n try:\r\n former_hoh_list[self.hh_id] = 0\r\n except:\r\n former_hoh_list[self.past_hh_id] = 0\r\n if [self.unique_id, self.hh_id] in single_male_list:\r\n single_male_list.remove([self.unique_id, self.hh_id])\r\n if self.unique_id in married_male_list:\r\n married_male_list.remove(self.unique_id)\r\n human_death_list.append(self.unique_id)\r\n try:\r\n hh_size_list[self.hh_id] -= 1\r\n except:\r\n hh_size_list[self.past_hh_id] -= 1\r\n human_demographic_structure_list[self.age_category] -= 1\r\n\r\n self.model.schedule.remove(self)\r\n if self in self.model.grid:\r\n self.model.grid.remove_agent(self)", "def cleanUp(self, f):\n os.system('rm ' + f)", "def del_highscores(self):\n\t\ttry:\n\t\t\twith open(self.filename) as f_obj:\n\t\t\t\tcontents = f_obj.read()\n\t\texcept FileNotFoundError:\n\t\t\tprint('File for highscores not found! Call 016 733 7043 for assistance.')\n\t\telse:\n\t\t\tjson_contents = json.loads(contents)\n\t\t\tfor item in json_contents:\n\t\t\t\titem['player_name'] = 'EMPTY'\n\t\t\t\titem['player_score'] = 0\n\t\t\tself.save_highscores(json_contents)", "def test_remove_herbivore():\n savanna_cell = topo.Savanna()\n test_herbivore = animals.Herbivores()\n savanna_cell.add_animal(test_herbivore)\n assert test_herbivore in savanna_cell.herbivore_list\n assert test_herbivore in animals.Animals.instances\n savanna_cell.remove_animal(test_herbivore)\n assert test_herbivore not in savanna_cell.herbivore_list\n assert test_herbivore in animals.Animals.instances\n animals.Animals.instances.remove(test_herbivore)\n assert test_herbivore not in animals.Animals.instances", "def purge(self):\n pass", "def remove():\n pass", "def distributed_clean_db(empty_db):\n team.load_file(GOOD_TEST_TEAM_FILE, False)\n game.load_file(GOOD_TEST_GAME_FILE)\n game.load_file(join(TEST_DATA_DIR, \"distribution2.csv\"))\n game.load_file(join(TEST_DATA_DIR, \"distribution3.csv\"))\n service.set_player_codes()\n team.set_matches()\n return empty_db", "def unload_fmu(self):\n del self.fmu", "def _check_final_md5(self, key, file_name):\r\n fp = open(file_name, 'r')\r\n if key.bucket.connection.debug >= 1:\r\n print 'Checking md5 against etag.'\r\n hex_md5 = key.compute_md5(fp)[0]\r\n if hex_md5 != key.etag.strip('\"\\''):\r\n file_name = fp.name\r\n fp.close()\r\n os.unlink(file_name)\r\n raise ResumableDownloadException(\r\n 'File changed during download: md5 signature doesn\\'t match '\r\n 'etag (incorrect downloaded file deleted)',\r\n ResumableTransferDisposition.ABORT)", "def test_RemovalCandidate_instantiation():\n f = tempfile.mkstemp()\n #f[1] is the absolute pathname.\n rc = r.RemovalCandidate(f[1])\n assert_equal(rc.path,f[1])\n assert_is_instance(rc.mtime,float)\n assert_equal(rc.size,0)\n os.remove(f[1])", "def testBrokenLinks(self):\n with h5py.File(self.h5_fname, 'a') as f:\n f[\"/Mars/BrokenSoftLink\"] = h5py.SoftLink(\"/Idontexists\")\n f[\"/Mars/BrokenExternalLink\"] = h5py.ExternalLink(\"notexistingfile.h5\", \"/Idontexists\")\n\n ddict = h5todict(self.h5_fname, path=\"/Mars\", errors='ignore')\n self.assertFalse(ddict)\n\n with LoggingValidator(dictdump_logger, error=2):\n ddict = h5todict(self.h5_fname, path=\"/Mars\", errors='log')\n self.assertFalse(ddict)\n\n with self.assertRaises(KeyError):\n h5todict(self.h5_fname, path=\"/Mars\", errors='raise')", "def test_upload_area_cleanup(self):\n vis2_uvid='urn:mrn:stm:service:instance:furuno:vis2'\n p = Path('import')\n files = list(p.glob('**/urn:mrn:s124:*'))\n for item in files:\n print(item)\n os.remove(str(item))\n pass", "def remove(self, name):\n # Remove any DUMP-IDS 1st in case the user wants to move a mesh\n if 'mesh' in self.pargs:\n if name in self.pargs['mesh']:\n # must delete all meshes / dumps in order to re-import remaining meshes\n for dump in self.pargs['traj']['dump_mname']:\n self.lmp.command('undump {}'.format(dump))\n\n self.lmp.command('unfix walls')\n\n for i, mesh in enumerate(self.pargs['mesh'].keys()):\n self.lmp.command('unfix {}'.format(mesh))\n\n if 'mfile' in self.pargs['traj']:\n if isinstance(self.pargs['traj']['mfile'], list):\n raise RuntimeError('mfile cannot be a list. Something is not setup correctly.')\n elif self.pargs['traj']['mfile']: # the user has requested all mesh(es) be written as one file\n pass\n else: # self.pargs['traj']['mfile'] had better be None\n assert(self.pargs['traj']['mfile'] is None)\n\n del self.pargs['mesh'][name]\n\n # Re-import any remaining meshes\n self.importMeshes()\n\n # Create new dump setups, leaving particle dumps intact\n self.writeSetup(only_mesh=True)\n\n return 0\n \n # Otherwise, we are just unfixing a non-mesh fix\n self.lmp.command('unfix {}'.format(name))", "def fix(self):\n\n pm.delete(self.errorNodes)\n\n self.run()", "def test_unlink_gen_snapshot(self):\n if self.is_v4:\n self.skipTest('Getting storage group list by generation does '\n 'not work on the V4. Will need logic in this test '\n 'based on uCode.')\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n self.replication.link_gen_snapshot(\n sg_id=sg_name, link_sg_name=target_sg, snap_name=snap_name,\n gen_num=0)\n\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_linked=True)\n self.assertTrue(snap_details.get('isLinked'))\n self.replication.unlink_gen_snapshot(\n sg_id=sg_name, unlink_sg_name=target_sg, snap_name=snap_name,\n gen_num=0)\n snap_details = self._test_get_ss_gen_detail(\n sg_name, snap_name, gen_num=0, check_unlinked=True)\n\n self.assertFalse(snap_details.get('isLinked'))\n self.provisioning.delete_storage_group(storage_group_id=target_sg)", "def clean_gem5(c):\n _delete_file(f'{ROOT_DIR}/gem5/build/')", "def test_kyc_delete_legal_board_member(self):\n pass", "def purge() -> None:\r\n _purge_func(False)", "def test_snapshot_and_restore_drop_table_remove_dropped_column(self):\n cluster = self.cluster\n cluster.populate(1).start()\n node1, = cluster.nodelist()\n session = self.patient_cql_connection(node1)\n\n # Create schema and insert some data\n create_ks(session, 'ks', 1)\n session.execute(\"CREATE TABLE ks.cf (k int PRIMARY KEY, a text, b text)\")\n session.execute(\"INSERT INTO ks.cf (k, a, b) VALUES (1, 'a', 'b')\")\n assert_one(session, \"SELECT * FROM ks.cf\", [1, \"a\", \"b\"])\n\n # Take a snapshot and drop the column and then drop table\n snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic')\n session.execute(\"ALTER TABLE ks.cf DROP b\")\n assert_one(session, \"SELECT * FROM ks.cf\", [1, \"a\"])\n session.execute(\"DROP TABLE ks.cf\")\n\n # Restore schema and data from snapshot, data should be the same as input\n self.restore_snapshot_schema(snapshot_dir, node1, 'ks', 'cf')\n self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')\n node1.nodetool('refresh ks cf')\n assert_one(session, \"SELECT * FROM ks.cf\", [1, \"a\", \"b\"])\n\n # Clean up\n logger.debug(\"removing snapshot_dir: \" + snapshot_dir)\n shutil.rmtree(snapshot_dir)", "def close_file(self, data_set):\n if hasattr(data_set, '_h5_base_group'):\n data_set._h5_base_group.close()\n # Removes reference to closed file\n del data_set._h5_base_group\n else:\n logging.warning(\n 'Cannot close file, data_set has no open hdf5 file')", "def hfp_delete(handle, org_dn, name):\r\n\r\n dn = org_dn + \"/fw-host-pack-\" + name\r\n mo = handle.query_dn(dn)\r\n if mo is None:\r\n raise ValueError(\"HFP '%s' does not exist\" % dn)\r\n\r\n handle.remove_mo(mo)\r\n handle.commit()", "def check_for_unload(self):\n flag = False\n for i, layer in enumerate(self.chunk_loaded_list):\n for ticket, *data in layer[:]:\n if ticket.value in (0, 1):\n continue\n if (\n ticket == ChunkLoadTicketType.PLAYER_LOADED\n ): # check if player in range, if not, remove ticket\n pass\n else:\n layer.remove(ticket)\n if i != 15:\n self.chunk_loaded_list[i + 1].append(ticket)\n flag = flag or len(layer)\n\n if not flag:\n print(\"unloading chunk\", self)\n self.hide_all(True)\n self.get_dimension().unload_chunk(self)", "def unloaded():\n pass", "def _snapshot_destroy(self, dir_path, snapshot_name):\n try:\n self.fs.rmdir(self._snapshot_path(dir_path, snapshot_name))\n except cephfs.ObjectNotFound:\n log.warn(\"Snapshot was already gone: {0}\".format(snapshot_name))", "def test_exc(self):\n g = h5g.open(self.fid, '/')\n g._close()\n self.assertRaises(ValueError, h5i.get_file_id, g)", "def test_remove_absent_host(self, inventoryloader):\n inventoryloader.del_host('localhost3')", "def removed(self, filename):\r\n self.__close_and_reload(filename)", "def destroy(self):\n self.close()\n if self.wantAnyDbm:\n lt = time.asctime(time.localtime())\n trans = maketrans(': ','__')\n t = lt.translate(trans)\n head, tail = os.path.split(self.filepath)\n newFileName = 'UDStoreBak'+t\n if os.path.exists(self.filepath):\n try:\n os.rename(tail, newFileName)\n uber.air.writeServerEvent('Uberdog data store Info', 0 \\\n , 'Creating backup of file: %s saving as: %s' %(tail, newFileName))\n except:\n uber.air.writeServerEvent('Uberdog data store Info', 0 \\\n , 'Unable to create backup of file: %s ' %tail)\n else:\n # Remove the filename with all sufix's\n # .bak, .dir, .dat\n files = os.listdir(head)\n for file in files:\n if file.find(tail)>-1:\n filename, ext = os.path.splitext(file)\n try:\n os.rename(file, newFileName+ext)\n uber.air.writeServerEvent('Uberdog data store Info', 0 \\\n , 'Creating backup of file: %s saving as: %s' %(file,newFileName+ext))\n except:\n uber.air.writeServerEvent('Uberdog data store Info', 0 \\\n , 'Unable to create backup of file: %s ' %newFileName+ext)\n else:\n if os.path.exists(self.filepath + '.bu'):\n os.remove(self.filepath + '.bu')\n if os.path.exists(self.filepath):\n os.remove(self.filepath)", "def removeIfDead(self):\n if self.y < 0:\n del projectiles[findPlace(self, projectiles)]", "def test_topo_remove_herbivore():\n cell = topo.Topography()\n testherbi = animals.Herbivores()\n testlist = [animals.Herbivores() for _ in range(10)]\n cell.herbivore_list = testlist\n cell.add_animal(testherbi)\n cell.remove_animal(testherbi)\n assert testherbi not in cell.herbivore_list", "def remove(self):\n\n try:\n os.remove(self.fullpath)\n os.remove(self.fullpath + \".pub\")\n except OSError:\n e = get_exception()\n if e.errno != errno.ENOENT:\n raise HostkeyError(e)\n else:\n self.changed = False", "def test_unpacker_delete_manifest_metadata_unknown(config, mocker, path_map_mock):\n logger_mock = mocker.MagicMock()\n p = Unpacker(config, logger_mock)\n mock_os_remove = mocker.patch(\"os.remove\")\n mock_os_remove.side_effect = [NameError, NameError]\n with pytest.raises(NameError):\n p._delete_manifest_metadata(\"0869ea50-e437-443f-8cdb-31a350f88e57\")\n mock_os_remove.assert_called_with(\"/tmp/lta/testing/unpacker/outbox/0869ea50-e437-443f-8cdb-31a350f88e57.metadata.ndjson\")", "def delete(self, name):\n path = self.directory / f\"{name}.yaml\"\n if path.exists():\n path.unlink()", "def _purge_mlrun_db(self):\n self._delete_mlrun_db()\n self._scale_down_mlrun_deployments()", "def test_delete_voltage_maps(self):\n pass", "def drop(self):\n pass", "def drop(self):\n pass", "def testNotExistingPath(self):\n with h5py.File(self.h5_fname, 'a') as f:\n f['data'] = 1\n\n ddict = h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='ignore')\n self.assertFalse(ddict)\n\n with LoggingValidator(dictdump_logger, error=1):\n ddict = h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='log')\n self.assertFalse(ddict)\n\n with self.assertRaises(KeyError):\n h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='raise')", "def die(self):\n self.pjs.bombermen.remove(self)\n for block in self.physics.blocks[self.stype]:\n if block == self.rects[0]:\n self.physics.blocks[self.stype].remove(block)", "def removeCoordinatesDumpFile(self):\n os.remove(self.COORDINATES_DUMP_FNAME)", "def cleanup():", "def remove_stuff_post_error(self):\n os.system('rm %s' % self.destination)", "def close_hdf_file(self):\n\t\tself.h5f.close()", "def delete_thumbnail(self, thumbnail_name):", "def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)", "def close(self):\n path_lst = self.h5_path.split(\"/\")\n last = self.history[-1].strip()\n if len(last) > 0:\n hist_lst = last.split(\"/\")\n self.h5_path = \"/\".join(path_lst[: -len(hist_lst)])\n if len(self.h5_path.strip()) == 0:\n self.h5_path = \"/\"\n del self.history[-1]", "def test_rebuild_with_wrong_shared_storage(self, mock_remove_allocs):\n with mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True) as mock_inst:\n self.assertRaises(exception.InvalidSharedStorage,\n lambda: self._rebuild(on_shared_storage=False))\n\n # Should remain on original host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['host'], 'fake_host_2')\n self.assertTrue(mock_inst.called)\n mock_remove_allocs.assert_called_once_with(\n mock.ANY, instance.uuid, self.rt.compute_nodes[NODENAME].uuid)", "def test_remove_vm(self, instance_name):\n self.instances.pop(instance_name)", "def test_collisions_file_path(self):\n self.assertRaises(ValueError, collisions_clean, \"not_a_file_path\")", "def test_aws_service_api_snapshot_delete(self):\n pass", "def tearDown(self):\n os.remove(self._dbfile)", "def cleanup_footprint(self, fpname):\n logging.debug(\"Environment: %s entered\" % __name__)\n fp = self.get_footprint(fpname, start=False)\n fp.cleanup_old_images()\n fp.save()", "def test_duplicate_heartbeats_are_deleted(self):\n self._assert_duplicates_are_deleted(HeartBeat)", "def test_unpacker_delete_manifest_metadata_v3(config, mocker, path_map_mock):\n logger_mock = mocker.MagicMock()\n p = Unpacker(config, logger_mock)\n mock_os_remove = mocker.patch(\"os.remove\")\n mock_os_remove.side_effect = [NameError, None]\n p._delete_manifest_metadata(\"0869ea50-e437-443f-8cdb-31a350f88e57\")\n mock_os_remove.assert_called_with(\"/tmp/lta/testing/unpacker/outbox/0869ea50-e437-443f-8cdb-31a350f88e57.metadata.ndjson\")", "def _unloadone(self):\n self.logger.log('attempting to unload some containers [stub]')\n if not len(self.containers):\n self.logger.log('no %s loaded to unload!' % (self.cname))\n containers = sorted(self.containers, key=lambda container: container.usefulness)\n tounload = containers[0]\n self.containers.remove(tounload)\n tounload.unload()\n self.logger.log('unloaded %s %r' % (self.cname, (tounload,)))", "def check_datanodes(self):\n for nodename in self.datanodes.keys():\n datanode = self.datanodes[nodename]\n try:\n datanode.heartbeat()\n except Exception as e:\n del self.datanodes[nodename]\n logging.info('data node %s died: %s', nodename, e.message)\n for fname in self.files.keys():\n replica_location = self.files[fname]\n if nodename in replica_location:\n replica_location.remove(nodename)\n if len(replica_location) == 0:\n logging.info('The last replica of %s lost' % fname)\n del self.files[fname]", "def tearDown(self):\n try:\n remove(\"file.json\")\n except:\n pass", "def test_exif_data_removed_from_added_thumbnail(self):\n\n # The image that has GPS data:\n path = \"tests/core/fixtures/images/tester_exif_gps.jpg\"\n\n # Double-check the original image does have some GPS data:\n exif_dict = piexif.load(path)\n self.assertEqual(len(exif_dict[\"GPS\"].keys()), 15)\n\n pub = PublicationFactory(thumbnail__from_path=path)\n\n exif_dict = piexif.load(pub.thumbnail.path)\n self.assertEqual(exif_dict[\"GPS\"], {})\n\n # Tidy up:\n pub.thumbnail.delete()", "def clean_data_fragments(self) -> None:\n read_path: Path = Path(os.environ[\"DATA_PATH\"]) / \"fragments\"\n try:\n shutil.rmtree(read_path / \"__MACOSX\")\n except FileNotFoundError:\n print('Folder \"__MACOSX\" already removed.')\n\n # delete non-binarized images\n frag_paths: list = list((read_path / \"image-data\").iterdir())\n frags_binarized: list = [fp for fp in frag_paths if \"binarized\" in fp.name]\n frags_delete: set = set(frag_paths).difference(set(frags_binarized))\n for frag in frags_delete:\n frag.unlink()\n frag_paths = frags_binarized\n for frag_path in frag_paths:\n # Python 3.8 hack, seems to be supported without str() on 3.9\n shutil.move(str(frag_path.resolve()), str(read_path.resolve()))\n\n (read_path / \"image-data\").rmdir() # delete empty folder" ]
[ "0.64098233", "0.6114747", "0.58413917", "0.582247", "0.57496554", "0.56727177", "0.5573327", "0.5490028", "0.54383737", "0.53650856", "0.53048605", "0.5302251", "0.5285933", "0.5256595", "0.52538306", "0.5244757", "0.5230618", "0.5228572", "0.5218584", "0.5204441", "0.51699096", "0.5156562", "0.51382935", "0.5113371", "0.5105278", "0.5100508", "0.5095089", "0.5079535", "0.5077172", "0.506946", "0.50613075", "0.5050016", "0.5046539", "0.5046411", "0.50426745", "0.5032318", "0.50296885", "0.50192064", "0.5012713", "0.50069094", "0.4993304", "0.4987221", "0.49752077", "0.49693698", "0.4967471", "0.49601606", "0.49572754", "0.49544126", "0.49503598", "0.4949714", "0.49465775", "0.49397144", "0.4928647", "0.49269953", "0.4925788", "0.49234596", "0.49147063", "0.49146417", "0.49138153", "0.49072942", "0.48976365", "0.48934093", "0.48927465", "0.48882833", "0.48838744", "0.48836204", "0.48773316", "0.4876339", "0.48757955", "0.4865728", "0.48557115", "0.4846073", "0.48459077", "0.48420274", "0.484121", "0.48359293", "0.48357633", "0.48357633", "0.48281896", "0.4826869", "0.48254627", "0.48253587", "0.48182636", "0.48181656", "0.48169747", "0.48156515", "0.48111635", "0.48103797", "0.4808305", "0.48030674", "0.48016715", "0.48014358", "0.4796323", "0.4791618", "0.4788761", "0.4787681", "0.47863093", "0.47860715", "0.47814262", "0.47792614" ]
0.7458633
0
Ensure that graveyard_removal.py prints the correct entity handle for the graveyard volume.
def test_print_graveyard_removal(capfd): os.system("python svalinn_tools/graveyard_removal.py " + test_file_path + test_file + " -p") out, err = capfd.readouterr() assert ("12682136550675318127" in out) == True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.driver.delete_volume(volume)\n expected = {'name': 'volume10'}\n self.assertDictMatch(expected, self.deleted)", "def test_delete_snapshot(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snapshot = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'snap10'}\n self.driver.delete_snapshot(snapshot)\n expected = {'name': 'snap10'}\n self.assertDictMatch(expected, self.deleted)", "def drop(self):\n Game.instance.area_map.entities.append(self.owner)\n Game.instance.inventory.remove(self.owner)\n self.owner.x = Game.instance.player.x\n self.owner.y = Game.instance.player.y\n message('You dropped a ' + self.owner.name + '.', palette.yellow)", "def test_default_graveyard_removal():\n\tos.system(\"python svalinn_tools/graveyard_removal.py \" + test_file_path + test_file)\n\tsize = os.path.getsize(test_file[:-4] + \"_no_grave.h5m\")\n\tassert size == 5748780", "def test_delete_volume_failure_modes(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self._fail_space_delete = True\n # This should not throw an exception, space-delete failure not problem\n self.driver.delete_volume(volume)\n self._fail_space_delete = False\n volume['provider_id'] = None\n # This should also not throw an exception\n self.driver.delete_volume(volume)", "def _delete_disk(self, volume):\n\n # We only do this when we know it's not exported\n # anywhere in the gateway\n lun_name = self._lun_name(volume.name)\n config = self._get_config()\n\n # Now look for the disk on any exported target\n found = False\n for target_iqn in config['targets']:\n # Do we have the volume we are looking for?\n target = config['targets'][target_iqn]\n for client_iqn in target['clients'].keys():\n if lun_name in target['clients'][client_iqn]['luns']:\n found = True\n\n if not found:\n # we can delete the disk definition\n LOG.info(\"Deleting volume definition in iscsi gateway for %s\",\n lun_name)\n self.client.delete_disk(self.configuration.rbd_pool, volume.name,\n preserve_image=True)", "def dropObject(player):\n for treasure in Treasure.List:\n if player.treasureCaptured:\n player.treasureCaptured = False\n treasure.x = player.x\n treasure.y = player.y\n treasure.img = pygame.image.load(Treasure.treasure_img[0])", "def event11510870():\n header(11510870, 1)\n npc, = define_args('i')\n skip_if_this_event_slot_off(2)\n chr.drop_mandatory_treasure(npc)\n end()\n\n if_entity_dead(0, npc)\n end()", "def find_graveyard_inner_box():\n volumes = get_volume_list()\n graveyard = 0\n for v in volumes:\n if volume_is_graveyard( v ): \n graveyard = v\n break\n if graveyard == 0:\n raise DagmcError( 'Could not find a graveyard volume' )\n\n xyz_lo, xyz_hi = volume_boundary( graveyard )\n xyz_mid = numpy.array( [ (hi+lo)/2.0 for (hi,lo) in zip( xyz_hi, xyz_lo) ], dtype=numpy.float64 )\n\n result_lo = numpy.array( [0]*3, dtype=numpy.float64 )\n result_hi = numpy.array( [0]*3, dtype=numpy.float64 )\n\n for i in range(0,3):\n uvw = [0,0,0]\n uvw[i] = 1\n lo_mid = xyz_mid.copy()\n lo_mid[i] = xyz_lo[i]\n _, dist = fire_one_ray( graveyard, lo_mid, uvw )\n result_lo[i] = lo_mid[i] + dist\n uvw[i] = -1\n hi_mid = xyz_mid.copy()\n hi_mid[i] = xyz_hi[i]\n _, dist = fire_one_ray( graveyard, hi_mid, uvw )\n result_hi[i] = hi_mid[i] - dist\n \n return result_lo, result_hi", "def test_volume_snapshot_create_get_list_delete(self):\n volume = self.create_volume()\n self.addCleanup(self.delete_volume, volume['id'])\n\n s_name = data_utils.rand_name(self.__class__.__name__ + '-Snapshot')\n # Create snapshot\n snapshot = self.snapshots_client.create_snapshot(\n volume_id=volume['id'],\n display_name=s_name)['snapshot']\n\n def delete_snapshot(snapshot_id):\n waiters.wait_for_volume_resource_status(self.snapshots_client,\n snapshot_id,\n 'available')\n # Delete snapshot\n self.snapshots_client.delete_snapshot(snapshot_id)\n self.snapshots_client.wait_for_resource_deletion(snapshot_id)\n\n self.addCleanup(delete_snapshot, snapshot['id'])\n self.assertEqual(volume['id'], snapshot['volumeId'])\n # Get snapshot\n fetched_snapshot = self.snapshots_client.show_snapshot(\n snapshot['id'])['snapshot']\n self.assertEqual(s_name, fetched_snapshot['displayName'])\n self.assertEqual(volume['id'], fetched_snapshot['volumeId'])\n # Fetch all snapshots\n snapshots = self.snapshots_client.list_snapshots()['snapshots']\n self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))", "def test_create_volume_from_snapshot(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snap = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n volume = {'id': '2', 'name': 'volume2', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_volume_from_snapshot(volume, snap)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume2', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'volume2'}\n self.assertDictMatch(expected_pid, pid)", "def test_bad_uuid_blockdev(self):\n command_line = [\"blockdev\", \"debug\", \"get-object-path\", \"--uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def test_hotplug_storage(self):\n target_vols = [\n {\n \"type\": \"FCP\",\n \"volume_id\": \"1024400000000000\",\n \"boot_device\": True,\n \"specs\": {\n \"multipath\": True,\n \"adapters\": [{\n \"devno\": \"0.0.1800\",\n \"wwpns\": ['300607630503c1ae', '300607630503c1af']\n }, {\n \"devno\": \"0.0.1801\",\n \"wwpns\": ['300607630503c1ae', '300607630503c1af']\n }]\n }\n }\n ]\n # set response from storage pool object\n pool_resp = {target_vols[0]['volume_id']: '/dev/mapper/mpatha'}\n self._mock_pool.return_value.activate.return_value = pool_resp\n\n guest_obj = self._check_init()\n guest_obj.login()\n # validate response\n self.assertEqual(guest_obj.hotplug(vols=target_vols),\n {'vols': pool_resp})", "def test_removal_mount_dependency(self):\n from chroma_core.models import ManagedMgs\n\n self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, \"mounted\")\n try:\n # Make it so that the mount unconfigure operations will fail\n MockAgentRpc.succeed = False\n\n # -> the TargetMount removal parts of this operation will fail, we\n # want to make sure that this means that Target deletion part\n # fails as well\n self.set_and_assert_state(self.mgt.managedtarget_ptr, \"removed\", check=False)\n\n ManagedMgs.objects.get(pk=self.mgt.pk)\n self.assertNotEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, \"removed\")\n finally:\n MockAgentRpc.succeed = True\n\n # Now let the op go through successfully\n self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, \"removed\")\n with self.assertRaises(ManagedMgs.DoesNotExist):\n ManagedMgs.objects.get(pk=self.mgt.pk)\n self.assertEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, \"removed\")", "def dispense_full_plate(self, ref, reagent, volume):\n columns = []\n for col in range(0,ref.container_type.col_count):\n columns.append({\"column\": col, \"volume\": volume})\n self.instructions.append(Dispense(ref, reagent, columns))", "def database_volume_snapshot_delete(volume_snapshot_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n query.filter(model.VolumeSnapshot.uuid == volume_snapshot_uuid).delete()\n session.commit()", "def test_aws_service_api_volume_delete(self):\n pass", "def test_bad_uuid_filesystem(self):\n command_line = [\"filesystem\", \"debug\", \"get-object-path\", \"--uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def drop(self, command):\n \n for item in self.inventory:\n if item.name == command[1]:\n self.location.inventory.append(item)\n self.inventory.remove(item)\n print(\"You dropped a\", item.name)\n return \n print(command[1] + \" is not here!\")", "def help_drop(self):\n print(DROP)", "def database_volume_delete(volume_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.Volume)\n query.filter(model.Volume.uuid == volume_uuid).delete()\n session.commit()", "def test_delete_voltage_map_item(self):\n pass", "def process_IN_UNMOUNT(self, event):", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def remove_export(self, context, volume):\n pass", "def unmanage(self, volume):\n LOG.debug(\"Unmanaging Cinder volume %s. Changing name to %s\",\n volume['id'], _get_unmanaged(volume['id']))\n data = {'name': _get_unmanaged(volume['id'])}\n self._issue_api_request(URL_TEMPLATES['ai_inst']().format(\n _get_name(volume['id'])), method='put', body=data)", "def volume_down(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN, data)", "def test_pvremove():\n pvdisplay = MagicMock(return_value=False)\n with patch(\"salt.modules.linux_lvm.pvdisplay\", pvdisplay):\n mock = MagicMock(return_value=True)\n with patch.dict(linux_lvm.__salt__, {\"lvm.pvdisplay\": mock}):\n ret = {\n \"stdout\": \"saltines\",\n \"stderr\": \"cheese\",\n \"retcode\": 0,\n \"pid\": \"1337\",\n }\n mock = MagicMock(return_value=ret)\n with patch.dict(linux_lvm.__salt__, {\"cmd.run_all\": mock}):\n assert linux_lvm.pvremove(\"A\") is True", "def test_bad_uuid_pool(self):\n command_line = [\"pool\", \"debug\", \"get-object-path\", \"--uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def test_delete_attached_volume(self):\n server, validation_resources = self._create_server()\n volume = self.create_volume()\n self.attach_volume(server, volume)\n\n self.assertRaises(lib_exc.BadRequest,\n self.delete_volume, volume['id'])", "def _assert_source_detached(self, volume):\n if (volume['status'] != \"available\" or\n volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED):\n LOG.error(\"Volume must be detached for clone operation.\")\n raise exception.VolumeAttached(volume_id=volume['id'])", "def test_delete_destination_volume_in_migration(self):\n self._test_delete_volume_in_migration('target:vol-id')", "def test_dumpling_with_missing_chef(self, packet_dumpling_dict):\n del packet_dumpling_dict['metadata']['chef']\n\n with pytest.raises(InvalidDumpling):\n validate_dumpling(json.dumps(packet_dumpling_dict))", "async def test_deleting_entity(\n hass: HomeAssistant,\n entity_registry: er.EntityRegistry,\n client,\n sensor_entities,\n climate_entities,\n) -> None:\n data = {**sensor_entities, **climate_entities}\n body = await generate_latest_metrics(client)\n\n assert (\n 'sensor_temperature_celsius{domain=\"sensor\",'\n 'entity=\"sensor.outside_temperature\",'\n 'friendly_name=\"Outside Temperature\"} 15.6' in body\n )\n\n assert (\n 'entity_available{domain=\"sensor\",'\n 'entity=\"sensor.outside_temperature\",'\n 'friendly_name=\"Outside Temperature\"} 1.0' in body\n )\n\n assert (\n 'sensor_humidity_percent{domain=\"sensor\",'\n 'entity=\"sensor.outside_humidity\",'\n 'friendly_name=\"Outside Humidity\"} 54.0' in body\n )\n\n assert (\n 'entity_available{domain=\"sensor\",'\n 'entity=\"sensor.outside_humidity\",'\n 'friendly_name=\"Outside Humidity\"} 1.0' in body\n )\n\n assert (\n 'climate_action{action=\"heating\",'\n 'domain=\"climate\",'\n 'entity=\"climate.heatpump\",'\n 'friendly_name=\"HeatPump\"} 1.0' in body\n )\n\n assert (\n 'climate_action{action=\"cooling\",'\n 'domain=\"climate\",'\n 'entity=\"climate.heatpump\",'\n 'friendly_name=\"HeatPump\"} 0.0' in body\n )\n\n assert \"sensor.outside_temperature\" in entity_registry.entities\n assert \"climate.heatpump\" in entity_registry.entities\n entity_registry.async_remove(data[\"sensor_1\"].entity_id)\n entity_registry.async_remove(data[\"climate_1\"].entity_id)\n\n await hass.async_block_till_done()\n body = await generate_latest_metrics(client)\n\n # Check if old metrics deleted\n body_line = \"\\n\".join(body)\n assert 'entity=\"sensor.outside_temperature\"' not in body_line\n assert 'friendly_name=\"Outside Temperature\"' not in body_line\n assert 'entity=\"climate.heatpump\"' not in body_line\n assert 'friendly_name=\"HeatPump\"' not in body_line\n\n # Keep other sensors\n assert (\n 'sensor_humidity_percent{domain=\"sensor\",'\n 'entity=\"sensor.outside_humidity\",'\n 'friendly_name=\"Outside Humidity\"} 54.0' in body\n )\n\n assert (\n 'entity_available{domain=\"sensor\",'\n 'entity=\"sensor.outside_humidity\",'\n 'friendly_name=\"Outside Humidity\"} 1.0' in body\n )", "def test_clone_delete_snap(self):\n\n # pylint: disable=too-many-statements, too-many-locals\n # Enabling Volume options on the volume and validating\n g.log.info(\"Enabling volume options for volume %s \", self.volname)\n options = {\" features.uss\": \"enable\"}\n ret = set_volume_options(self.mnode, self.volname, options)\n self.assertTrue(ret, (\"Failed to set volume options for volume %s\"\n % self.volname))\n g.log.info(\"Successfully set volume options\"\n \"for volume %s\", self.volname)\n\n # Validate feature.uss enabled or not\n g.log.info(\"Validating feature.uss is enabled\")\n option = \"features.uss\"\n vol_option = get_volume_options(self.mnode, self.volname, option)\n self.assertEqual(vol_option['features.uss'], 'enable', \"Failed\"\n \" to validate \"\n \"volume options\")\n g.log.info(\"Successfully validated volume options\"\n \"for volume %s\", self.volname)\n\n # Creating snapshot\n g.log.info(\"Starting to Create snapshot\")\n for snap_count in range(0, 2):\n ret, _, _ = snap_create(self.mnode, self.volname,\n \"snap%s\" % snap_count)\n self.assertEqual(ret, 0, (\"Failed to create snapshot for volume %s\"\n % self.volname))\n g.log.info(\"Snapshot snap%s created successfully\"\n \"for volume %s\", snap_count, self.volname)\n\n # Activating snapshot\n g.log.info(\"Starting to Activate Snapshot\")\n for snap_count in range(0, 2):\n ret, _, _ = snap_activate(self.mnode, \"snap%s\" % snap_count)\n self.assertEqual(ret, 0, (\"Failed to Activate snapshot snap%s\"\n % snap_count))\n g.log.info(\"Snapshot snap%s activated successfully\", snap_count)\n\n # Reset volume:\n g.log.info(\"Starting to Reset Volume\")\n ret, _, _ = volume_reset(self.mnode, self.volname, force=False)\n self.assertEqual(ret, 0, (\"Failed to reset volume %s\" % self.volname))\n g.log.info(\"Reset Volume on volume %s is Successful\", self.volname)\n\n # Validate feature.uss enabled or not\n g.log.info(\"Validating feature.uss is enabled\")\n option = \"features.uss\"\n vol_option = get_volume_options(self.mnode, self.volname, option)\n self.assertEqual(vol_option['features.uss'], 'off', \"Failed\"\n \" to validate \"\n \"volume options\")\n g.log.info(\"Successfully validated volume options\"\n \"for volume %s\", self.volname)\n\n # Verify volume's all process are online\n g.log.info(\"Starting to Verify volume's all process are online\")\n ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)\n self.assertTrue(ret, (\"Volume %s : All process are\"\n \"not online\" % self.volname))\n g.log.info(\"Volume %s : All process are online\", self.volname)\n\n # Creating and starting a Clone of snapshot\n g.log.info(\"Starting to Clone Snapshot\")\n for clone_count in range(0, 2):\n ret, _, _ = snap_clone(self.mnode, \"snap%s\" % clone_count,\n \"clone%s\" % clone_count)\n self.assertEqual(ret, 0, (\"Failed to clone clone%s volume\"\n % clone_count))\n g.log.info(\"clone%s volume created successfully\", clone_count)\n\n # Start Cloned volume\n g.log.info(\"starting to Validate clone volumes are started\")\n for clone_count in range(0, 2):\n ret, _, _ = volume_start(self.mnode, \"clone%s\" % clone_count)\n self.assertEqual(ret, 0, (\"Failed to start clone%s\"\n % clone_count))\n g.log.info(\"clone%s started successfully\", clone_count)\n g.log.info(\"All the clone volumes are started Successfully\")\n\n # Validate Volume start of cloned volume\n g.log.info(\"Starting to Validate Volume start\")\n for clone_count in range(0, 2):\n vol_info = get_volume_info(self.mnode, \"clone%s\" % clone_count)\n if vol_info[\"clone%s\" % clone_count]['statusStr'] != 'Started':\n raise ExecutionError(\"Failed to get volume info for clone%s\"\n % clone_count)\n g.log.info(\"Volume clone%s is in Started state\", clone_count)\n\n # Validate feature.uss enabled or not\n g.log.info(\"Validating feature.uss is enabled\")\n option = \"features.uss\"\n for clone_count in range(0, 2):\n vol_option = get_volume_options(self.mnode, \"clone%s\"\n % clone_count, option)\n self.assertEqual(vol_option['features.uss'], 'enable', \"Failed\"\n \" to validate\"\n \"volume options\")\n g.log.info(\"Successfully validated volume options\"\n \"for volume clone%s\", clone_count)\n\n # Mount both the cloned volumes\n g.log.info(\"Mounting Cloned Volumes\")\n for mount_obj in range(0, 2):\n self.mpoint = \"/mnt/clone%s\" % mount_obj\n cmd = \"mkdir -p %s\" % self.mpoint\n ret, _, _ = g.run(self.clients[0], cmd)\n self.assertEqual(ret, 0, (\"Creation of directory %s\"\n \"for mounting\"\n \"volume %s failed: Directory already\"\n \"present\"\n % (self.mpoint, \"clone%s\" % mount_obj)))\n g.log.info(\"Creation of directory %s for mounting volume %s \"\n \"success\", self.mpoint, (\"clone%s\" % mount_obj))\n ret, _, _ = mount_volume(\"clone%s\" % mount_obj, self.mount_type,\n self.mpoint, self.mnode, self.clients[0])\n self.assertEqual(ret, 0, (\"clone%s is not mounted\"\n % mount_obj))\n g.log.info(\"clone%s is mounted Successfully\", mount_obj)\n\n # Perform I/O on mount\n # Start I/O on all mounts\n g.log.info(\"Starting to Perform I/O on Mountpoint\")\n all_mounts_procs = []\n for mount_obj in range(0, 2):\n cmd = (\"cd /mnt/clone%s/; for i in {1..10};\"\n \"do touch file$i; done; cd;\") % mount_obj\n proc = g.run(self.clients[0], cmd)\n all_mounts_procs.append(proc)\n g.log.info(\"I/O on mountpoint is successful\")\n\n # create snapshot\n g.log.info(\"Starting to Create snapshot of clone volume\")\n ret0, _, _ = snap_create(self.mnode, \"clone0\", \"snap2\")\n self.assertEqual(ret0, 0, \"Failed to create the snapshot\"\n \"snap2 from clone0\")\n g.log.info(\"Snapshots snap2 created successfully from clone0\")\n ret1, _, _ = snap_create(self.mnode, \"clone1\", \"snap3\")\n self.assertEqual(ret1, 0, \"Failed to create the snapshot snap3\"\n \"from clone1\")\n g.log.info(\"Snapshots snap3 created successfully from clone1\")\n\n # Listing all Snapshots present\n g.log.info(\"Starting to list all snapshots\")\n ret, _, _ = snap_list(self.mnode)\n self.assertEqual(ret, 0, (\"Failed to list snapshots present\"))\n g.log.info(\"Snapshots successfully listed\")", "def cli(env, snapshot_id):\n block_manager = SoftLayer.BlockStorageManager(env.client)\n deleted = block_manager.delete_snapshot(snapshot_id)\n\n if deleted:\n click.echo('Snapshot %s deleted' % snapshot_id)", "def delete_volume_record( volume ):\n \n logger.info( \"Delete Volume =%s\\n\\n\" % volume.name )\n \n volume_name = volume.name \n config = observer_core.get_config()\n \n # delete the Volume on Syndicate.\n try:\n rc = observer_core.ensure_volume_absent( volume_name )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to delete volume %s\", volume_name )\n raise e\n \n return rc", "def test_pvremove_not_pv():\n pvdisplay = MagicMock(return_value=False)\n with patch(\"salt.modules.linux_lvm.pvdisplay\", pvdisplay):\n assert linux_lvm.pvremove(\"A\", override=False) == \"A is not a physical volume\"\n\n pvdisplay = MagicMock(return_value=False)\n with patch(\"salt.modules.linux_lvm.pvdisplay\", pvdisplay):\n assert linux_lvm.pvremove(\"A\") is True", "def test_create_volume_name_creation_fail(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10}\n self._fail_space_list = True\n self.assertRaises(exception.VolumeDriverException,\n self.driver.create_volume, volume)", "def test_main_exit_absent(self, mock_delete_volume, mock_module, mock_client):\n PARAMS = {\n 'storage_system_ip': '192.168.0.1',\n 'storage_system_name': '3PAR',\n 'storage_system_username': 'USER',\n 'storage_system_password': 'PASS',\n 'volume_name': 'test_volume',\n 'cpg': None,\n 'size': None,\n 'size_unit': None,\n 'snap_cpg': None,\n 'wait_for_task_to_end': None,\n 'new_name': None,\n 'expiration_hours': None,\n 'retention_hours': None,\n 'ss_spc_alloc_warning_pct': None,\n 'ss_spc_alloc_limit_pct': None,\n 'usr_spc_alloc_warning_pct': None,\n 'usr_spc_alloc_limit_pct': None,\n 'rm_ss_spc_alloc_warning': None,\n 'rm_usr_spc_alloc_warning': None,\n 'rm_exp_time': None,\n 'rm_usr_spc_alloc_limit': None,\n 'rm_ss_spc_alloc_limit': None,\n 'compression': False,\n 'type': 'thin',\n 'keep_vv': None,\n 'state': 'absent'\n }\n # This creates a instance of the AnsibleModule mock.\n mock_module.params = PARAMS\n mock_module.return_value = mock_module\n instance = mock_module.return_value\n mock_delete_volume.return_value = (True, True, \"Deleted volume successfully.\", {})\n hpe3par_volume.main()\n # AnsibleModule.exit_json should be called\n instance.exit_json.assert_called_with(\n changed=True, msg=\"Deleted volume successfully.\")\n # AnsibleModule.fail_json should not be called\n self.assertEqual(instance.fail_json.call_count, 0)", "def do_drop(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToDrop = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n\r\n # find out if the player doesn't have that item\r\n if itemToDrop not in invDescWords:\r\n print('You do not have \"%s\" in your inventory.' % (itemToDrop))\r\n return\r\n\r\n # get the item name that the player's command describes\r\n item = getFirstItemMatchingDesc(itemToDrop, inventory)\r\n if item != None:\r\n print('You drop %s.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item) # remove from inventory\r\n worldRooms[location][GROUND].append(item) # add to the ground\r", "def on_drop(self):\n print(\"You have dropped\", self.name)", "def _test_delete_volume_in_migration(self, migration_status):\n volume = tests_utils.create_volume(self.context, host=CONF.host,\n migration_status=migration_status)\n self.volume.delete_volume(self.context, volume=volume)\n\n # The volume is successfully removed during the volume delete\n # and won't exist in the database any more.\n self.assertRaises(exception.VolumeNotFound, volume.refresh)", "def ensure_export(self, context, volume):\n pass", "def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):\n return", "def test_vgremove():\n mock = MagicMock(\n return_value={\n \"retcode\": 0,\n \"stdout\": ' Volume group \"fakevg\" successfully removed',\n }\n )\n with patch.dict(linux_lvm.__salt__, {\"cmd.run_all\": mock}):\n assert (\n linux_lvm.vgremove(\"fakevg\") == 'Volume group \"fakevg\" successfully removed'\n )", "def play_Gravecrawler_from_graveyard(hand, battlefield, graveyard, library):\n\tgraveyard['Gravecrawler'] -= 1\n\tbattlefield['Gravecrawler'] += 1\n\tlog(\"We play a Gravecrawler from graveyard.\")\n\tdescribe_game_state(hand, battlefield, graveyard, library)", "def test_reserve_reserve_delete(self, mock_allowed):\n mock_allowed.return_value = None\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n\n self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n self.volume_api.attachment_delete(self.context,\n aref)\n mock_allowed.assert_called_once_with(self.context, aref)\n vref = objects.Volume.get_by_id(self.context,\n vref.id)\n self.assertEqual('reserved', vref.status)\n self.assertEqual(1, len(vref.volume_attachment))", "def drop_inventory(self):\n header = \"Choose item to drop:\\n\"\n def drop(get_gameworld_cell, x, y, item):\n item_entity = ItemPickup([item], x, y, get_gameworld_cell)\n events.trigger_event(\"world_add_entity\", item_entity)\n self.inventory.remove(item)\n action_list = [(item, functools.partial(drop, get_gameworld_cell=self.get_gameworld_cell, x=self.x, y=self.y, item=item)) for item in self.inventory]\n if len(action_list) == 0:\n header += \"You hold nothing!\"\n events.trigger_event(\"print_list\", action_list, header=header)", "def die(self):\n self.pjs.bombermen.remove(self)\n for block in self.physics.blocks[self.stype]:\n if block == self.rects[0]:\n self.physics.blocks[self.stype].remove(block)", "def test_delete_source_volume_in_migration(self):\n self._test_delete_volume_in_migration('migrating')", "def test_space_list_fails(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.extended = {'name': '', 'size': '0',\n 'storageserver': ''}\n self._fail_space_list = True\n self.assertRaises(exception.VolumeDriverException,\n self.driver.extend_volume, volume, 12)", "def on_item_dropped(self, url):\n print 'Weld.on_item_dropped:', url\n #make sure all struct are present\n if not(self.project and self.project.level):\n print >> sys.stderr, 'it\\'s too early to drop stuff: '\\\n 'create a project and a level first !'\n return\n\n #retrieve data if it comes from weld\n if url in self.resMan:\n props = self.resMan.file_props(url)\n if props is None:\n print >> sys.stderr, curr_f(), ': url(\\'%s\\') in self.resMan '\\\n 'but can\\'t retrieve props.' % (url)\n return\n props = self.project.level.resMan.add_resource(self.resMan.base_path,\n props)\n url = props['url']\n if props == {} or url not in self.project.level.resMan:\n print >> sys.stderr, curr_f(), 'could not retrieve file and/or '\\\n 'dependencies for props:', pp(props)\n return\n\n #instanciate it\n if url in self.project.level.resMan:\n props = self.project.level.resMan.file_props(url)\n dtp = self.project.level.qsteelwidget.dropTargetPosition(Config.instance().drop_target_vec)\n props['position'] = dtp\n props['rotation'] = self.project.level.qsteelwidget.dropTargetRotation()\n if props['resource_type'] == 'meshes':\n props['meshName'] = props['name']\n self.project.level.instanciate(props)\n s = 'dropped agent \\'%s\\' with id %i' % (props['name'], props['agentId'])\n print s\n Ui.instance().show_status(s)\n else:\n Ui.instance().show_status('can only drop meshes so far')", "def test_attachment_deletion_allowed_attachment_from_volume(\n self, mock_get_server):\n mock_get_server.side_effect = nova.API.NotFound(404)\n attachment = self._get_attachment()\n volume = attachment.volume\n volume.volume_attachment = objects.VolumeAttachmentList(\n objects=[attachment])\n self.volume_api.attachment_deletion_allowed(self.context, None, volume)\n\n mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID,\n volume.id)", "def deregister_volume(VolumeId=None):\n pass", "def test_detach_volume_force(self, remove_mock, terminate_mock, exc_mock):\n connector = mock.Mock()\n connector.disconnect_volume.side_effect = Exception\n # TODO(geguileo): Remove this ExceptionChainer simulation once we\n # release OS-Brick version with it and bump min version.\n exc = exc_mock.ExceptionChainer.return_value\n exc.context.return_value.__enter__.return_value = exc\n exc.context.return_value.__exit__.return_value = True\n\n volume = {'id': fake.VOLUME_ID}\n attach_info = {'device': {},\n 'connector': connector,\n 'conn': {'data': {}, }}\n\n # TODO(geguileo): Change TypeError to ExceptionChainer once we release\n # OS-Brick version with it and bump min version.\n self.assertRaises(TypeError,\n self.volume.driver._detach_volume, self.context,\n attach_info, volume, {}, force=True)\n\n self.assertTrue(connector.disconnect_volume.called)\n self.assertTrue(remove_mock.called)\n self.assertTrue(terminate_mock.called)\n self.assertEqual(3, exc.context.call_count)", "def test_live_migration_src_check_volume_node_not_alive(self):\n\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n dic = {'instance_id': instance_id, 'size': 1}\n v_ref = db.volume_create(self.context, {'instance_id': instance_id,\n 'size': 1})\n t1 = utils.utcnow() - datetime.timedelta(1)\n dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',\n 'topic': 'volume', 'report_count': 0}\n s_ref = db.service_create(self.context, dic)\n\n self.assertRaises(exception.VolumeServiceUnavailable,\n self.scheduler.driver.schedule_live_migration,\n self.context, instance_id, i_ref['host'])\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.volume_destroy(self.context, v_ref['id'])", "def delete_entity(self, context, hm):\n resource_path = \"%s/%s/%s\" % (RESOURCE_PREFIX, MONITORS_RESOURCE,\n hm.id)\n msg = _(\"NetScaler driver healthmonitor removal: %s\") % hm.id\n LOG.debug(msg)\n self.client.remove_resource(context.tenant_id, resource_path)", "def test_sg_delete_non_associated(self):\n\n # Add a faked storage group to be tested and another one\n faked_storage_group = self.add_storage_group1()\n self.add_storage_group2()\n\n storage_group_mgr = self.console.storage_groups\n\n storage_group = storage_group_mgr.find(name=faked_storage_group.name)\n\n # Execute the code to be tested.\n storage_group.delete()\n\n # Check that the storage group no longer exists\n with pytest.raises(NotFound):\n storage_group_mgr.find(name=faked_storage_group.name)", "def test_run_terminate_with_vol_attached(self):\n instance = self._create_fake_instance_obj()\n\n self.compute.build_and_run_instance(self.context, instance, {}, {}, {},\n [], block_device_mapping=[])\n\n instances = db.instance_get_all(self.context)\n LOG.info(\"Running instances: %s\", instances)\n self.assertEqual(len(instances), 1)\n\n def fake_check_availability_zone(*args, **kwargs):\n pass\n\n def fake_attachment_create(*args, **kwargs):\n return {'id': uuids.attachment_id}\n\n def fake_volume_get(self, context, volume_id):\n return {'id': volume_id,\n 'attach_status': 'attached',\n 'attachments': {instance.uuid: {\n 'attachment_id': uuids.attachment_id\n }\n },\n 'multiattach': False\n }\n\n def fake_terminate_connection(self, context, volume_id, connector):\n pass\n\n def fake_detach(self, context, volume_id, instance_uuid):\n pass\n\n bdms = []\n\n def fake_rpc_reserve_block_device_name(self, context, instance, device,\n volume_id, **kwargs):\n bdm = objects.BlockDeviceMapping(\n **{'context': context,\n 'source_type': 'volume',\n 'destination_type': 'volume',\n 'volume_id': uuids.volume_id,\n 'instance_uuid': instance['uuid'],\n 'device_name': '/dev/vdc'})\n bdm.create()\n bdms.append(bdm)\n return bdm\n\n self.stub_out('nova.volume.cinder.API.get', fake_volume_get)\n self.stub_out('nova.volume.cinder.API.check_availability_zone',\n fake_check_availability_zone)\n self.stub_out('nova.volume.cinder.API.attachment_create',\n fake_attachment_create)\n self.stub_out('nova.volume.cinder.API.terminate_connection',\n fake_terminate_connection)\n self.stub_out('nova.volume.cinder.API.detach', fake_detach)\n self.stub_out('nova.compute.rpcapi.ComputeAPI.'\n 'reserve_block_device_name',\n fake_rpc_reserve_block_device_name)\n\n self.compute_api.attach_volume(self.context, instance, 1,\n '/dev/vdc')\n\n self.compute.terminate_instance(self.context,\n instance, bdms)\n\n instances = db.instance_get_all(self.context)\n LOG.info(\"After terminating instances: %s\", instances)\n self.assertEqual(len(instances), 0)\n bdms = db.block_device_mapping_get_all_by_instance(self.context,\n instance['uuid'])\n self.assertEqual(len(bdms), 0)", "def detach_volume(self, context, volume_id):\n # TODO(vish): refactor this into a more general \"unreserve\"\n # TODO(sleepsonthefloor): Is this 'elevated' appropriate?\n # self.db.volume_detached(context.elevated(), volume_id)\n self.db.volume_admin_metadata_delete(context.elevated(), volume_id,\n 'attached_mode')", "def test_power_removed_control():\n f = Level3File(get_test_data('nids/KGJX_NXF_20200817_0600.nids'))\n assert f.prod_desc.prod_code == 113\n assert f.metadata['rpg_cut_num'] == 1\n assert f.metadata['cmd_generated'] == 0\n assert f.metadata['el_angle'] == -0.2\n assert f.metadata['clutter_filter_map_dt'] == datetime(2020, 8, 17, 4, 16)\n assert f.metadata['compression'] == 1\n assert f.sym_block[0][0]", "def test_normal_flow_generic_distro(self):\n # set a simple log configuration to catch messages from all levels\n # and output to a file which we will check later\n log_file = NamedTemporaryFile() # pylint: disable=consider-using-with\n logging.basicConfig(filename=log_file.name, filemode='w',\n level=logging.DEBUG)\n\n # validate the constructor of the guest class\n guest_obj = self._check_init()\n\n # validate login() method\n self._check_login(\n guest_obj, 'DistroGeneric', 'uname -a'\n )\n\n # validate open_session() method\n self._check_open_session(guest_obj)\n\n # validate stop() method\n self._check_stop(guest_obj, 'nohup halt &')\n\n # validate logoff() method\n self._check_logoff(guest_obj)\n\n # validate the logging was correct\n # define the content we expect to see in the log file\n log_prefix = 'DEBUG:tessia.baselib.guests.linux.linux'\n expected_log = (\n \"{0}:create GuestLinux: name='{1.name}' host_name='{1.host_name}' \"\n \"user='{1.user}' extensions='{1.extensions}'\\n\".format(\n log_prefix, guest_obj)\n )\n expected_log += (\n \"{}:create distro system_name='{}' _distro_obj='DistroGeneric'\\n\"\n .format(log_prefix, guest_obj.name)\n )\n expected_log += (\n \"{}:logoff system_name='{}'\\n\".format(log_prefix, guest_obj.name)\n )\n # retrieve the content written in the log file\n with open(log_file.name, 'r') as log_fd:\n actual_log = log_fd.read()\n\n # allow unittest class to show the full diff in case of error\n # pylint: disable=invalid-name\n self.maxDiff = None\n # perform the comparison to validate the content\n self.assertEqual(expected_log, actual_log)", "def do_destroy(self, line):\n args = line.split()\n\n if not args:\n print(\"** class name missing **\")\n elif args[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n elif len(args) < 2:\n print(\"** instance id missing **\")\n else:\n key = args[0] + \".\" + args[1]\n dict_objects = storage.all()\n obj = dict_objects.get(key)\n if obj:\n dict_objects.pop(key)\n storage.save()\n else:\n print(\"** no instance found **\")", "def delete_volumeaccessright_record( vac ):\n \n principal_id = vac.owner_id.email \n volume_name = vac.volume.name \n \n try:\n observer_core.ensure_volume_access_right_absent( principal_id, volume_name )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to revoke access from %s to %s\" % (principal_id, volume_name))\n raise e\n \n return True", "def test_update_volume_stats_error(self):\n self._fail_host_storage = True\n actual = self.driver.get_volume_stats(True)\n self.assertEqual('HGST', actual['vendor_name'])\n self.assertEqual('hgst', actual['storage_protocol'])\n self.assertEqual('unknown', actual['total_capacity_gb'])\n self.assertEqual('unknown', actual['free_capacity_gb'])\n self.assertEqual(0, actual['reserved_percentage'])", "def _detach_external_volume_or_instance():\n\n if not utils.use_external_resource(ctx.source.node.properties) \\\n or not utils.use_external_resource(\n ctx.target.node.properties):\n return False\n\n utils.unassign_runtime_property_from_resource(\n 'instance_id', ctx.source.instance)\n ctx.logger.info(\n 'Either instance or EBS volume is an external resource so not '\n 'performing detach operation.')\n return True", "def openinv(cls): #THIS DOESN'T NEED TO BE MODIFIED!\n\n while True:\n inventory_items = {thing.id: thing.name for thing in cls.inventory}\n inventory_items[\"exit\"] = \"Exit Inventory\"\n inventory_items[\"newln\"] = \"\"\n inventory_items[\"playername\"] = str(gray('\"{}\"'.format(cls.name)))\n inventory_items[\"lv\"] = str(gray(\"LV: {}\".format(cls.lv)))\n inventory_items[\"hp\"] = str(gray(\"HP: {}/{}\".format(cls.hp, cls.max_hp)))\n inventory_items[\"exp\"] = str(gray(\"EXP: {}/40\".format(cls.exp)))\n\n choice = Menu.menu(\n title = \"Inventory\",\n contents = inventory_items \n )\n if choice == \"exit\":\n Terminal.clear_all()\n return\n while True:\n displayed_item = next((thing for thing in cls.inventory if thing.id == choice), None)\n final_choice = Menu.menu(\n title = displayed_item.name,\n contents = {\n \"interact\":displayed_item.interact_label,\n \"inspect\":\"Inspect\",\n \"drop\":\"Drop\",\n \"back\":\"Back\"\n }\n )\n if final_choice == \"back\":\n break\n if final_choice == \"interact\":\n use = displayed_item.interact()\n Terminal.clear_all()\n print(use[\"message\"])\n if \"heal_\" in use[\"action\"]:\n cls.hp += int(use[\"action\"].replace(\"heal_\", ''))\n if cls.hp > cls.max_hp:\n cls.hp = cls.max_hp\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break\n if final_choice == \"inspect\":\n Terminal.clear_all()\n print(displayed_item)\n Game.standard_wait()\n continue\n if final_choice == \"drop\":\n Terminal.clear_all()\n print(\"You dropped the {}\".format(displayed_item.name))\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break", "def _delete_uefi_entry(self, target, entry): # pylint: disable=R0201\n try:\n selection_pattern = r'\\[([0-9]+)\\] *'\n\n try:\n target.expect(re.compile(selection_pattern + entry), timeout=5)\n wl_menu_item = target.match.group(1)\n except pexpect.TIMEOUT:\n return # Entry does not exist, nothing to delete here...\n\n # Identify and select boot manager menu item\n target.expect(selection_pattern + 'Boot Manager', timeout=15)\n bootmanager_item = target.match.group(1)\n target.sendline(bootmanager_item)\n\n # Identify and select 'Remove entry'\n target.expect(selection_pattern + 'Remove Boot Device Entry', timeout=15)\n new_entry_item = target.match.group(1)\n target.sendline(new_entry_item)\n\n # Delete entry\n target.expect(re.compile(selection_pattern + entry), timeout=5)\n wl_menu_item = target.match.group(1)\n target.sendline(wl_menu_item)\n\n # Return to main manu\n target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)\n return_to_main_menu_item = target.match.group(1)\n target.sendline(return_to_main_menu_item)\n except pexpect.TIMEOUT:\n raise DeviceError('Timed out while deleting UEFI entry.')", "def volume_up(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_VOLUME_UP, data)", "def test_delete_system_asset(self):\n pass", "def delete_volume(self, context, volume_id, unmanage_only=False):\n context = context.elevated()\n\n volume_ref = self.db.volume_get(context, volume_id)\n\n if context.project_id != volume_ref['project_id']:\n project_id = volume_ref['project_id']\n else:\n project_id = context.project_id\n\n LOG.info(_(\"volume %s: deleting\"), volume_ref['id'])\n if volume_ref['attach_status'] == \"attached\":\n # Volume is still attached, need to detach first\n raise exception.VolumeAttached(volume_id=volume_id)\n\n self._notify_about_volume_usage(context, volume_ref, \"delete.start\")\n self._reset_stats()\n\n try:\n self._delete_cascaded_volume(context, volume_id)\n except Exception:\n LOG.exception(_(\"Failed to deleting volume\"))\n # Get reservations\n try:\n reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}\n QUOTAS.add_volume_type_opts(context,\n reserve_opts,\n volume_ref.get('volume_type_id'))\n reservations = QUOTAS.reserve(context,\n project_id=project_id,\n **reserve_opts)\n except Exception:\n reservations = None\n LOG.exception(_(\"Failed to update usages deleting volume\"))\n\n # Delete glance metadata if it exists\n try:\n self.db.volume_glance_metadata_delete_by_volume(context, volume_id)\n LOG.debug(_(\"volume %s: glance metadata deleted\"),\n volume_ref['id'])\n except exception.GlanceMetadataNotFound:\n LOG.debug(_(\"no glance metadata found for volume %s\"),\n volume_ref['id'])\n\n self.db.volume_destroy(context, volume_id)\n LOG.info(_(\"volume %s: deleted successfully\"), volume_ref['id'])\n self._notify_about_volume_usage(context, volume_ref, \"delete.end\")\n\n # Commit the reservations\n if reservations:\n QUOTAS.commit(context, reservations, project_id=project_id)\n\n self.publish_service_capabilities(context)\n\n return True", "def delete(self):\n for lv in self.logical_volumes:\n self.delete_lv(lv_name=lv)\n\n super().delete()", "def snap_delete_by_volumename(mnode, volname):\n\n cmd = \"gluster snapshot delete volume %s --mode=script\" % volname\n return g.run(mnode, cmd)", "async def test_entity_device_info_remove(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_entity_device_info_remove(\n hass, mqtt_mock_entry, select.DOMAIN, DEFAULT_CONFIG\n )", "def do_command(self, args):\n vendorops = dbops.Vendors()\n vendorops.delete(args)", "def main():\n dump(inventory(), fp=stdout, indent=4)", "def test_aws_service_api_snapshot_delete(self):\n pass", "def test_delete__volume(self):\n arglist = [\n '--volume',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'volume'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.network_mock.delete_quota.assert_not_called()", "async def test_protein_deletion(test_handler, protein_deletion_np_range):\n resp = await test_handler.normalize(\"NP_004439.2:p.Leu755_Thr759del\")\n assertion_checks(resp.variation_descriptor, protein_deletion_np_range,\n \"NP_004439.2:p.Leu755_Thr759del\")\n\n resp = await test_handler.normalize(\"ERBB2 p.Leu755_Thr759del\")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:ERBB2%20p.Leu755_Thr759del\"\n resp.variation_descriptor.id = \\\n \"normalize.variation:NP_004439.2%3Ap.Leu755_Thr759del\"\n assertion_checks(resp.variation_descriptor, protein_deletion_np_range,\n \"ERBB2 p.Leu755_Thr759del\")\n\n resp = await test_handler.normalize(\"ERBB2 Leu755_Thr759del\")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:ERBB2%20Leu755_Thr759del\"\n resp.variation_descriptor.id = \\\n \"normalize.variation:NP_004439.2%3Ap.Leu755_Thr759del\"\n assertion_checks(resp.variation_descriptor, protein_deletion_np_range,\n \"ERBB2 Leu755_Thr759del\")\n\n resp1 = await test_handler.normalize(\"EGFR L747_T751del\")\n resp2 = await test_handler.normalize(\"EGFR L747_T751delLREAT\")\n assert resp1.variation_descriptor.variation.id == \\\n resp2.variation_descriptor.variation.id\n\n # incorrect deleted sequence\n resp = await test_handler.normalize(\"EGFR L747_T751delLREA\")\n assert not resp.variation_descriptor", "def remove_export(self, context, volume):\n if not self.share_targets:\n self._locked_unmap_volume(volume)\n LOG.info('Volume %s is no longer exported', volume.id)", "def update_volumes():\n print 'do something useful here'", "def player_death(game_event):\n print(\"Player Death\")\n # Get the userid from the event\n victim = game_event.get_int('userid')\n attacker = game_event.get_int('attacker')\n print(\"victim: %s\" % victim)\n print(\"attacker: %s\" % attacker)\n \n #victim_edict = edict_from_userid(victim)\n #attacker_edict = edict_from_userid(attacker)\n #print(\"victim_edict: %s\" % victim_edict)\n #print(\"attacker_edict: %s\" % attacker_edict)\n \n # Get the CPlayerInfo instance from the userid\n victimplayerinfo = playerinfo_from_userid(victim)\n attackerplayerinfo = playerinfo_from_userid(attacker)\n print(\"victimplayerinfo: %s\" % victimplayerinfo)\n print(\"attackerplayerinfo: %s\" % attackerplayerinfo)\n # And finally get the player's name \n #victimname = victimplayerinfo.get_name()\n #attackername = attackerplayerinfo.get_name()\n #print(\"victimname: %s\" % victimname)\n #print(\"attackername: %s\" % attackername)\n \n # Get the index of the player\n victimindex = index_from_userid(victim)\n attackerindex = index_from_userid(attacker)\n print(\"victimindex: %s\" % victimindex)\n print(\"attackerindex: %s\" % attackerindex)\n \n print(\"victim_is_fake_client: %s\" % victimplayerinfo.is_fake_client())\n print(\"attacker_is_fake_client: %s\" % attackerplayerinfo.is_fake_client())\n \n victim_steamid = victimplayerinfo.get_networkid_string()\n attacker_steamid = attackerplayerinfo.get_networkid_string()\n \n if not victimplayerinfo.is_fake_client() and not attackerplayerinfo.is_fake_client():\n \n print(\"victim_steamid: %s\" % victim_steamid)\n print(\"attacker_steamid: %s\" % attacker_steamid)\n \n victim_64 = convertSteamIDToCommunityID(victim_steamid)\n attacker_64 = convertSteamIDToCommunityID(attacker_steamid)\n \n kick_player, v_balance, a_balance = leetcoin_client.recordKill(victim_64, attacker_64)\n if v_balance == \"noreg\":\n SayText2(message=\"Unregistered kill/death. Win free bitcoin by registering at leet.gg! (if you haven't already)\").send(victimindex)\n SayText2(message=\"Unregistered kill/death. Win free bitcoin by registering at leet.gg! (if you haven't already)\").send(attackerindex)\n vbalance = leetcoin_client.getPlayerBalance(convertSteamIDToCommunityID(victimplayerinfo.get_networkid_string()))\n SayText2(message=\"Updated \" + vbalance + \"\").send(victimindex)\n if victim_steamid != attacker_steamid:\n abalance = leetcoin_client.getPlayerBalance(convertSteamIDToCommunityID(attackerplayerinfo.get_networkid_string()))\n SayText2(message=\"Updated \" + abalance + \"\").send(attackerindex) \t\n\n return", "def test_shd_should_not_crash_executed_heal_info(self):\n # pylint: disable=too-many-statements\n bricks_list = get_all_bricks(self.mnode, self.volname)\n # Setting options\n g.log.info('Setting options...')\n options = {\"metadata-self-heal\": \"off\",\n \"entry-self-heal\": \"off\",\n \"data-self-heal\": \"off\"}\n ret = set_volume_options(self.mnode, self.volname, options)\n self.assertTrue(ret, 'Failed to set options %s' % options)\n g.log.info(\"Successfully set %s for volume %s\",\n options, self.volname)\n\n # Creating files on client side\n for mount_obj in self.mounts:\n g.log.info(\"Generating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n # Create files\n g.log.info('Creating files...')\n command = (\"python %s create_files -f 10 --fixed-file-size 1M %s\"\n % (self.script_upload_path, mount_obj.mountpoint))\n\n proc = g.run_async(mount_obj.client_system, command,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n self.io_validation_complete = False\n\n # Validate IO\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # Bring brick0 offline\n g.log.info('Bringing bricks %s offline', bricks_list[0])\n ret = bring_bricks_offline(self.volname, bricks_list[0])\n self.assertTrue(ret, 'Failed to bring bricks %s offline'\n % bricks_list[0])\n\n ret = are_bricks_offline(self.mnode, self.volname,\n [bricks_list[0]])\n self.assertTrue(ret, 'Bricks %s are not offline'\n % bricks_list[0])\n g.log.info('Bringing bricks %s offline is successful',\n bricks_list[0])\n\n # Creating files on client side\n number_of_files_one_brick_off = '1000'\n self.all_mounts_procs = []\n for mount_obj in self.mounts:\n g.log.info(\"Generating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n # Create files\n g.log.info('Creating files...')\n command = (\"python %s create_files \"\n \"-f %s \"\n \"--fixed-file-size 1k \"\n \"--base-file-name new_file \"\n \"%s\"\n % (self.script_upload_path,\n number_of_files_one_brick_off,\n mount_obj.mountpoint))\n\n proc = g.run_async(mount_obj.client_system, command,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n self.io_validation_complete = False\n\n # Validate IO\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # Get heal info\n g.log.info(\"Getting heal info...\")\n heal_info_data = get_heal_info_summary(self.mnode, self.volname)\n self.assertIsNotNone(heal_info_data, 'Failed to get heal info.')\n g.log.info('Success in getting heal info')\n\n # Check quantity of file pending heal\n for brick in bricks_list[1:]:\n self.assertEqual(heal_info_data[brick]['numberOfEntries'],\n str(int(number_of_files_one_brick_off)+1),\n 'Number of files pending heal is not correct')\n\n # Setting options\n g.log.info('Setting options...')\n options = {\"performance.enable-least-priority\": \"enable\"}\n ret = set_volume_options(self.mnode, self.volname, options)\n self.assertTrue(ret, 'Failed to set options %s' % options)\n g.log.info(\"Successfully set %s for volume %s\",\n options, self.volname)\n\n # Bring brick1 offline\n g.log.info('Bringing bricks %s offline', bricks_list[1])\n ret = bring_bricks_offline(self.volname, bricks_list[1])\n self.assertTrue(ret, 'Failed to bring bricks %s offline'\n % bricks_list[1])\n\n ret = are_bricks_offline(self.mnode, self.volname,\n [bricks_list[1]])\n self.assertTrue(ret, 'Bricks %s are not offline'\n % bricks_list[1])\n g.log.info('Bringing bricks %s offline is successful',\n bricks_list[1])\n\n # Setting options\n g.log.info('Setting options...')\n options = {\"quorum-type\": \"fixed\"}\n ret = set_volume_options(self.mnode, self.volname, options)\n self.assertTrue(ret, 'Failed to set options %s' % options)\n g.log.info(\"Successfully set %s for volume %s\",\n options, self.volname)\n\n # Creating files on client side\n number_of_files_two_brick_off = '100'\n self.all_mounts_procs = []\n for mount_obj in self.mounts:\n g.log.info(\"Generating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n # Create files\n g.log.info('Creating files...')\n command = (\"python %s create_files \"\n \"-f %s \"\n \"--fixed-file-size 1k \"\n \"--base-file-name new_new_file \"\n \"%s\"\n % (self.script_upload_path,\n number_of_files_two_brick_off,\n mount_obj.mountpoint))\n\n proc = g.run_async(mount_obj.client_system, command,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n self.io_validation_complete = False\n\n # Validate IO\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # Get heal info\n g.log.info(\"Getting heal info...\")\n heal_info_data = get_heal_info_summary(self.mnode, self.volname)\n self.assertIsNotNone(heal_info_data, 'Failed to get heal info.')\n g.log.info('Success in getting heal info')\n\n # Check quantity of file pending heal\n number_of_files_to_check = str(int(number_of_files_one_brick_off) +\n int(number_of_files_two_brick_off) + 1)\n self.assertEqual(heal_info_data[bricks_list[-1]]['numberOfEntries'],\n number_of_files_to_check,\n 'Number of files pending heal is not correct')", "def test_persistent_group_peer_dropped(dev):\n form(dev[0], dev[1], reverse_init=True)\n invite_from_cli(dev[0], dev[1])\n\n logger.info(\"Remove group on the GO and try to invite from the client\")\n dev[0].global_request(\"REMOVE_NETWORK all\")\n invite(dev[1], dev[0])\n ev = dev[1].wait_global_event([\"P2P-INVITATION-RESULT\"], timeout=10)\n if ev is None:\n raise Exception(\"No invitation result seen\")\n if \"status=8\" not in ev:\n raise Exception(\"Unexpected invitation result: \" + ev)\n networks = dev[1].list_networks(p2p=True)\n if len(networks) > 0:\n raise Exception(\"Unexpected network block on client\")\n\n logger.info(\"Verify that a new group can be formed\")\n form(dev[0], dev[1], reverse_init=True)", "def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))", "def execute_drop(item_id):\r\n if (item_id in inventory):\r\n current_room[\"items\"][item_id] = inventory[item_id]\r\n del inventory[item_id]\r\n wrap_print(\"You dropped \" + items[item_id][\"name\"] + \".\")\r\n global valid_move\r\n valid_move = True\r\n else:\r\n wrap_print(\"You cannot drop that.\")", "def grab(self):\n if len(self.location.contents) == 0:\n print('Hate to break it to you, but there\\'s nothing to grab.')\n elif random() >= .75:\n item = self.location.contents[\n randrange(len(self.location.contents))]\n self.inventory.append(item)\n self.location.remove(item)\n print('Nice one, you actually managed to grab the {}! '\n 'I\\'m not even angry, I\\'m impressed.'.format(item))\n else:\n print('Well, at least you flailed in an impressive fashion.')", "def handle(self, *args, **options):\n print(\"\\n\", \"* \"*30)\n self._drop_db() \n print(\"\\n\", \"* \"*30, \"\\n\")", "def test_locate_graveyard():\n groups_to_write, graveyard_sets = locate_graveyard(mb)\n assert groups_to_write == [12682136550675318125, 12682136550675318126,\n 12682136550675318128, 12682136550675318129]", "def print_player_error(self, **kwargs):\n source_entity = kwargs[action.SOURCE_ENTITY]\n item = self._get_item_on_floor(source_entity)\n if (not item is None and\n not self.parent.inventory.has_room_for_item(item)):\n message = \"Could not pick up: \" + item.description.name + \\\n \", the inventory is full.\"\n msg.send_visual_message(message, source_entity.position.value)", "def delete(**_):\n\n volume_id = utils.get_external_resource_id_or_raise(\n 'delete EBS volume', ctx.instance)\n\n if _delete_external_volume():\n return\n\n ctx.logger.debug('Deleting EBS volume: {0}'.format(volume_id))\n\n if not _delete_volume(volume_id):\n return ctx.operation.retry(\n message='Failed to delete volume {0}.'\n .format(volume_id))\n\n utils.unassign_runtime_property_from_resource(\n constants.ZONE, ctx.instance)\n\n utils.unassign_runtime_property_from_resource(\n constants.EXTERNAL_RESOURCE_ID, ctx.instance)\n\n ctx.logger.info(\n 'Deleted EBS volume: {0}.'\n .format(volume_id))", "def _delete_image_volume(self,\n context: context.RequestContext,\n cache_entry: dict) -> None:\n volume = objects.Volume.get_by_id(context, cache_entry['volume_id'])\n\n # Delete will evict the cache entry.\n self.volume_api.delete(context, volume)", "def _detach_volume(self, server, volume):\n try:\n volume = self.volumes_client.show_volume(volume['id'])['volume']\n # Check the status. You can only detach an in-use volume, otherwise\n # the compute API will return a 400 response.\n if volume['status'] == 'in-use':\n self.servers_client.detach_volume(server['id'], volume['id'])\n except lib_exc.NotFound:\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n pass", "def _detach_volume(self, server, volume):\n try:\n volume = self.volumes_client.show_volume(volume['id'])['volume']\n # Check the status. You can only detach an in-use volume, otherwise\n # the compute API will return a 400 response.\n if volume['status'] == 'in-use':\n self.servers_client.detach_volume(server['id'], volume['id'])\n except lib_exc.NotFound:\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n pass", "def test_instance_type_create_then_delete(self):\n name = 'Small Flavor'\n flavorid = 'flavor1'\n\n original_list = instance_types.get_all_types()\n\n # create new type and make sure values stick\n inst_type = instance_types.create(name, 256, 1, 120, 100, flavorid)\n inst_type_id = inst_type['id']\n self.assertEqual(inst_type['flavorid'], flavorid)\n self.assertEqual(inst_type['name'], name)\n self.assertEqual(inst_type['memory_mb'], 256)\n self.assertEqual(inst_type['vcpus'], 1)\n self.assertEqual(inst_type['root_gb'], 120)\n self.assertEqual(inst_type['ephemeral_gb'], 100)\n self.assertEqual(inst_type['swap'], 0)\n self.assertEqual(inst_type['rxtx_factor'], 1)\n\n # make sure new type shows up in list\n new_list = instance_types.get_all_types()\n self.assertNotEqual(len(original_list), len(new_list),\n 'instance type was not created')\n\n instance_types.destroy(name)\n self.assertRaises(exception.InstanceTypeNotFound,\n instance_types.get_instance_type, inst_type_id)\n\n # deleted instance should not be in list anymoer\n new_list = instance_types.get_all_types()\n self.assertEqual(original_list, new_list)", "def die(self):\n events.trigger_event(\"on_entity_death\", self)", "def test_create_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10}\n ret = self.driver.create_volume(volume)\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume10', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider, note that provider_id is hashed\n expected_pid = {'provider_id': 'volume10'}\n self.assertDictMatch(expected_pid, ret)", "def _handle_kernel_died(self, kernel_id):\n self.log.warning(\"Kernel %s died, removing from map.\", kernel_id)\n self.remove_kernel(kernel_id)", "async def test_genomic_deletion(test_handler, genomic_deletion):\n # CA915940709\n q = \"NC_000003.12:g.10146527_10146528del\"\n resp1 = await test_handler.normalize(q)\n assertion_checks(resp1.variation_descriptor, genomic_deletion, q)\n\n resp2 = await test_handler.normalize(\"NC_000003.12:g.10146527_10146528delCT\")\n assert resp1.variation_descriptor.variation.id == \\\n resp2.variation_descriptor.variation.id\n\n # incorrect deleted sequence\n resp = await test_handler.normalize(\"NC_000003.12:g.10146527_10146528delCC\")\n assert not resp.variation_descriptor" ]
[ "0.5670078", "0.5624108", "0.554459", "0.5481827", "0.5419034", "0.53412575", "0.5311965", "0.52677137", "0.518722", "0.51854277", "0.5156141", "0.5146773", "0.5126611", "0.5102824", "0.50757784", "0.5075659", "0.5067923", "0.50451326", "0.49824572", "0.49812868", "0.4970936", "0.4936046", "0.49182272", "0.48876503", "0.48804823", "0.4878058", "0.48513", "0.48507753", "0.4845009", "0.48424056", "0.4839381", "0.48270935", "0.4815666", "0.47895297", "0.4788043", "0.47876328", "0.47613657", "0.476088", "0.47604397", "0.4758788", "0.474997", "0.4748707", "0.4744826", "0.47390378", "0.472142", "0.4720467", "0.47039637", "0.47031388", "0.47004658", "0.46918774", "0.46868345", "0.46857527", "0.46832806", "0.4681993", "0.46732268", "0.46721455", "0.4663775", "0.46569902", "0.46509257", "0.46479076", "0.4646928", "0.46399885", "0.4637373", "0.4635646", "0.46313798", "0.46209928", "0.46094656", "0.46078074", "0.46025488", "0.45932603", "0.45888817", "0.4583545", "0.45802262", "0.45782626", "0.45744085", "0.45737678", "0.45683357", "0.45652777", "0.45594534", "0.45568874", "0.45462397", "0.45444778", "0.45435503", "0.45423776", "0.45419654", "0.45374927", "0.45266828", "0.45264363", "0.45254636", "0.4525098", "0.4516973", "0.45110527", "0.4509386", "0.4508491", "0.4508491", "0.4508265", "0.4504469", "0.45038113", "0.4503224", "0.45030838" ]
0.6111419
0
Remove the files written to disk by this class of tests.
def test_cleanup(): os.remove(test_file[:-4] + "_no_grave.h5m")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tearDown(self):\n for f in os.listdir('/tmp'):\n if not f.startswith(self.FILE_PREFIX):\n continue\n\n os.remove(os.path.join('/tmp', f))", "def tearDown(self) -> None:\n filtered = [f for f in glob.glob('steps/tests/test_output/*') if not re.match(r'\\.keep', f)]\n for file in filtered:\n try:\n if Path(file).is_dir():\n shutil.rmtree(file)\n else:\n os.remove(file)\n except PermissionError as pe:\n # We don't necessarily care that much\n continue", "def tearDown(self):\n testing_dir = os.path.split(os.path.realpath(__file__))[0]\n for f in glob.glob(os.path.join(testing_dir, \"*\")):\n if f.split(\".\")[-1] in [\"o\", \"out\", \"pyc\", \"log\"]:\n subprocess.call(['rm', f])", "def tearDown(self):\n self.remove_test_files()", "def tearDown(self):\n\n for fname in self.fnames:\n os.remove(fname)", "def teardown():\n for filename in files_to_delete:\n delete_file(filename)", "def tearDown(self):\n\n for fname in self.fnames:\n FileSystem.unlink(fname)", "def tearDownClass(cls):\n\n to_delete = [\"test.mtx.zip\", \"test.csv.zip\", \"test.loom\",\n \"test.mtx\", \"test.csv\", \".loom_parts\"]\n\n for path in to_delete:\n if os.path.isdir(path):\n shutil.rmtree(path)\n elif os.path.isfile(path):\n os.remove(path)", "def tearDown(self):\r\n remove_files(self.files_to_remove, False)\r\n if self.tmpdir:\r\n rmtree(self.tmpdir)\r\n\r\n # clean up the file from init_flowgram_file\r\n if (hasattr(self, \"tmp_filename\") and exists(self.tmp_filename)):\r\n remove(self.tmp_filename)", "def tearDown(self):\n for root, dirs, files in os.walk(TEMPDIR, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n os.rmdir(root)", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def tearDown(self):\n try:\n os.remove(self.filename)\n except:\n pass", "def tearDown(self):\n try:\n os.remove(self.filename)\n except:\n pass", "def tearDown(self):\n try:\n os.remove(self.filename)\n except:\n pass", "def tearDown(self):\n os.remove(self._file)", "def tearDownClass(cls):\n file_paths = [os.path.join(ARC_PATH, 'nul'),\n os.path.join(ARC_PATH, 'run.out'),\n os.path.join(ARC_PATH, 'arc', 'species', 'nul'),\n os.path.join(ARC_PATH, 'arc', 'species', 'run.out'),\n os.path.join(ARC_PATH, 'arc', 'testing', 'mol.sdf'),\n ]\n for file_path in file_paths:\n if os.path.isfile(file_path):\n os.remove(file_path)", "def tearDown(self):\n\t\ttry:\n\t\t\tos.remove(self.filename)\n\t\texcept:\n\t\t\tpass", "def tearDown(self):\n os.remove(self.testfilename)", "def tearDown(self):\r\n remove_files(self.files_to_remove)\r\n\r\n # Remove directories last, so we don't get errors trying to remove\r\n # files which may be in the directories.\r\n for d in self.dirs_to_remove:\r\n if exists(d):\r\n rmtree(d)", "def tearDown(self):\n for fn in self.tempImages:\n os.remove(os.path.join(self.root, fn))\n os.rmdir(self.root)", "def tearDown(self):\n for d in os.listdir(tmp_dir_path):\n d_path = os.path.join(tmp_dir_path,d)\n try:\n os.remove(d_path)\n except:\n for f in os.listdir(d_path):\n f_path = os.path.join(d_path,f)\n os.remove(f_path)\n os.rmdir(d_path)\n assert os.listdir(tmp_dir_path) == []", "def clear_data():\n for i in range(_MAX_NUM_TESTS):\n rand, ref = filename(i)\n if os.path.exists(rand):\n os.remove(rand)\n if os.path.exists(ref):\n os.remove(ref)", "def clean_files(self):\n self.filenames.clear()", "def teardown_class(self):\n\n file_list = \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*') + HEN_FILE_EXTENSION) + \\\n glob.glob(os.path.join(self.datadir,\n '*lcurve*') + HEN_FILE_EXTENSION) + \\\n glob.glob(os.path.join(self.datadir,\n '*lcurve*.txt')) + \\\n glob.glob(os.path.join(self.datadir,\n '*.log')) + \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*.dat')) + \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*.png')) + \\\n glob.glob(os.path.join(self.datadir,\n '*monol_test*.txt')) + \\\n glob.glob(os.path.join(self.datadir,\n 'monol_test_fake*.evt')) + \\\n glob.glob(os.path.join(self.datadir,\n 'bubu*'))\n for f in file_list:\n print(\"Removing \" + f)\n os.remove(f)", "def tearDownClass(cls):\n\n os.remove(cls.temp_file_input_csv.name)\n os.remove(cls.temp_file_input_csv_larger.name)\n os.remove(cls.temp_file_input_csv_confusion.name)\n os.remove(cls.temp_file_output_series.name)\n os.remove(cls.temp_file_output_seriessuggest.name)\n os.remove(cls.temp_file_output_seriessuggest2.name)\n os.remove(cls.temp_file_output_autosearch.name)", "def tearDown(self):\n # Empty objects in engine\n FileStorage._FileStorage__objects = {}\n # Remove file.json if exists\n if os.path.exists(\"file.json\"):\n os.remove(\"file.json\")", "def NOtearDown(self):\n\n for f in self.testoutput:\n if os.path.exists(f):\n os.remove(f)", "def tearDown(self):\n try:\n os.remove(self.fixtureFile)\n except OSError:\n pass", "def cleanUp(self):\n print(\" cleaning up\",self.folderSave)\n for fname in glob.glob(self.folderSave+\"/*.*\"):\n if not fname.endswith(\".npy\") and not fname.endswith(\".csv\"):\n print(\" deleting\",os.path.basename(fname))\n os.remove(fname)", "def tearDown(self):\n try:\n remove(\"file.json\")\n except:\n pass", "def tearDown(self):\r\n # Change back to the start dir.\r\n chdir(self.start_dir)\r\n remove_files(self.files_to_remove)\r\n\r\n # Remove directories last, so we don't get errors trying to remove\r\n # files which may be in the directories.\r\n for d in self.dirs_to_remove:\r\n if exists(d):\r\n rmtree(d)", "def tearDown(self):\n try:\n os.remove(self.fixture_file)\n except OSError:\n pass", "def teardown(self):\n self.file_comm.remove_file()\n super(TestCisAsciiFileOutput, self).teardown()", "def test_999_remove_testfiles(self):\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __test_filename = consts.TEST_FILENAME\n __dir_game_testfile = os.path.join(__dir_game_saves, __test_filename)\n __test_filename_append1 = __test_filename + \"__1\"\n __dir_game_testfile_append1 = os.path.join(__dir_game_saves, __test_filename_append1)\n __test_filename_append2 = __test_filename + \"__2\"\n __dir_game_testfile_append2 = os.path.join(__dir_game_saves, __test_filename_append2)\n __test_filename_append3 = __test_filename + \"__3\"\n __dir_game_testfile_append3 = os.path.join(__dir_game_saves, __test_filename_append3)\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __test_logname = __test_filename + \"_log.txt\"\n __dir_game_logfile = os.path.join(__dir_game_log, __test_logname)\n os.remove(__dir_game_logfile)\n self.assertFalse(os.path.isfile(__dir_game_logfile))\n __list_files = os.listdir(__dir_game_log)\n if len(__list_files) == 0:\n os.removedirs(__dir_game_log)\n os.remove(__dir_game_testfile)\n self.assertFalse(os.path.isfile(__dir_game_testfile))\n os.remove(__dir_game_testfile_append1)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append1))\n os.remove(__dir_game_testfile_append2)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append2))\n os.remove(__dir_game_testfile_append3)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append3))\n __list_files = os.listdir(__dir_game_saves)\n if len(__list_files) == 0:\n os.removedirs(__dir_game_saves)", "def teardown_method(self,method):\n filenames = ['poisson_bdm1_test.h5', 'poisson_bdm1_test.xmf','reference_triangle.ele',\n 'reference_triangle.node', 'reference_triangle.poly','proteus.log']\n for file in filenames:\n if os.path.exists(file):\n try:\n os.remove(file)\n except OSError as e:\n print (\"Error: %s - %s.\" %(e.filename, e.strerror ))\n else:\n pass", "def tearDown(self):\n rmtree(self.output_path)\n rmtree(self.content_path)", "def tearDown(self):\n\n rmtree(self.test_output_dir)\n\n return", "def classCleanup(cls):\n cls.RemoveTempFile(\"child_send1.txt\")\n cls.RemoveTempFile(\"child_read1.txt\")\n cls.RemoveTempFile(\"child_send2.txt\")\n cls.RemoveTempFile(\"child_read2.txt\")", "def clean():\n clean_files()", "def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()", "def tearDown(self):\n try:\n os.remove(self.targets_filename)\n os.remove(self.headers_filename)\n except:\n raise", "def tearDown(self):\n print(\n \"\\nDeleting temporary files...\\n\")\n try:\n shutil.rmtree(TEST_DIR)\n except OSError:\n pass", "def tearDown(self):\n\n self.file_instance.delete()\n\n StdoutBase.tearDown(self)", "def teardown_class(cls):\n self = cls()\n self.remove_files_created_during_previous_runs()", "def teardown_class(cls):\n self = cls()\n self.remove_files_created_during_previous_runs()", "def __del__(self):\n for filename in self.files:\n unlink(filename)", "def tearDown(self):\n for base_path in self.feature_paths:\n # Remove the feature files\n for feature in self.features:\n feature = os.path.join(base_path, feature)\n os.remove(feature)\n\n # Attempt to remove all the directories we created\n os.removedirs(os.path.join(base_path, 'subdir'))", "def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()", "def delete(self):\n if os.path.isfile(TESTS_PATH + \"/\" + self.name):\n os.remove(TESTS_PATH + \"/\" + self.name)", "def vtest_ut_cleanup(self):\n shutil.rmtree(self.get_test_file_path(), ignore_errors=True)", "def tearDown(self):\n utils.rm_rf(TMP_DIR_PATH)", "def clean_file_before_test():\n\n if os.path.exists(LOG_FOLDER):\n for file in os.listdir(LOG_FOLDER):\n os.remove(LOG_FOLDER + \"/\" + file)", "def tearDown(self):\n if os.path.isfile(LOGFILENAME):\n os.remove(LOGFILENAME)", "def classCleanup(cls):\n cls.RemoveTempFile(SettingsCommandTestCase.output_file_name)", "def teardown(self):\n super(TestCisObjOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def teardown(self):\n super(TestCisPickleOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "def tearDown(self):\n shutil.rmtree(self.test_pic_folder)", "def tearDown():\n for output_file_path in Path(output_dir).glob(\"test_voting_learner_cross_validate*\"):\n output_file_path.unlink()\n\n for output_file_path in Path(\".\").glob(\"test_voting_learner_cross_validate*\"):\n output_file_path.unlink()\n\n config_file_path = Path(config_dir) / \"test_voting_learner_cross_validate.cfg\"\n config_file_path.unlink()\n\n remove_jsonlines_feature_files(train_dir)", "def tearDown(self):\n # unittest.TestCase.tearDown(self)\n\n root = os.path.join(\".\", \"files\")\n endingList = os.listdir(root)\n rmList = [fn for fn in endingList if fn not in self.startingList]\n\n if self.oldRoot == root:\n for fn in rmList:\n fnFullPath = os.path.join(root, fn)\n if os.path.isdir(fnFullPath):\n os.rmdir(fnFullPath)\n else:\n os.remove(fnFullPath)\n\n os.chdir(self.oldRoot)", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def test_final_cleanup():\n cleanup_file(\"tfsaves\")", "def tearDown(self):\n if not self.test_manager.leave_output:\n shutil.rmtree(self.directory)", "def tearDown(self):\r\n shutil.rmtree(self.working_directory)", "def tearDown(self):\n rmtree(self.out_dir, ignore_errors=True)", "def tearDown(cls):\n\n # cls.test_mmp_series_object.clean_out_data_seriesobj()\n # reusable data struct\n cls.test_mmp_series_object.clean_out_data_seriesobj()\n cls.test_dataset_testresults.clear()\n # reusable results file\n # os.remove(cls.temp_file_output_series.name)", "def tearDown(self):\n storage = FileStorage()\n for obj in storage.all().values():\n storage.delete(obj)\n storage.save()", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()", "def tearDown(self):\n if os.path.exists(self.temp):\n shutil.rmtree(self.temp)", "def cleanup_test(self):\n test_variables = _get_test_variables()\n test_status = test_variables['${TEST STATUS}']\n if test_status == 'FAIL':\n # Test failed: keep directory for later inspection\n return\n\n # Test passed: remove the execution directory but preserve all\n # important log files, if any (valgrind, gcov, ...)\n\n if len(self._preserve_files) == 0:\n shutil.rmtree(self._execdir, True)\n return\n\n # Move all the files to preserve to a temporary directory\n\n backup_dir = self._execdir + '.preserve'\n os.makedirs(backup_dir)\n for file in self._preserve_files:\n shutil.move(file, backup_dir)\n\n # Delete the execution directory and rename the temporary directory\n\n shutil.rmtree(self._execdir, True)\n os.rename(backup_dir, self._execdir)", "def __del__(self):\n for f in self._files:\n f.close()", "def tearDown(self):\n if os.path.exists('file.json'):\n os.remove(\"file.json\")", "def teardown(self):\n super(TestCisPlyOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def tearDown(self):\n shutil.rmtree(self.working_directory)", "def tearDown(self):\n if self.workdir and os.path.exists(self.workdir):\n shutil.rmtree(self.workdir)", "def tearDown(self) -> None:\n self.directory.cleanup()", "def tearDown(self):\n if os.path.exists(\"file.json\"):\n os.remove(\"file.json\")", "def tearDown(self):\n if os.path.exists(\"file.json\"):\n os.remove(\"file.json\")", "def cleanUp(self):\r\n remove_files(self._db_files_to_remove, error_on_missing=False)", "def clean_outputs(self) -> None:\n\n def _delete_if_not_none(fn: Optional[str]) -> None:\n if fn is not None:\n Path(fn).unlink()\n\n _delete_if_not_none(self.config[\"LOG_FILE\"])\n\n for file_ in self.exporter.get_all_files():\n file_.unlink()", "def cleanup(self):\n if os.path.exists(f\"{self.save_path}{self.name}\"):\n shutil.rmtree(f\"{self.save_path}{self.name}\")", "def tearDown(self):\n\n PyFunceble.helpers.File(self.storage_file).delete()", "def tearDownClass(cls):\n path = os.path.join(os.path.dirname(os.path.dirname(rmgpy.__file__)),\n 'examples', 'arkane', 'species')\n cls.dump_path = os.path.join(path, 'C2H6')\n cls.load_path = os.path.join(path, 'C2H6_from_yaml')\n cls.extensions_to_delete = ['pdf', 'txt', 'inp', 'csv']\n cls.files_to_delete = ['arkane.log', 'output.py']\n cls.files_to_keep = ['C2H6.yml']\n for path in [cls.dump_path, cls.load_path]:\n for name in os.listdir(path):\n item_path = os.path.join(path, name)\n if os.path.isfile(item_path):\n extension = name.split('.')[-1]\n if name in cls.files_to_delete or \\\n (extension in cls.extensions_to_delete and name not in cls.files_to_keep):\n os.remove(item_path)\n else:\n # This is a sub-directory. remove.\n shutil.rmtree(item_path)", "def tearDownClass(self):\n remove('temp_mol_file.csv')", "def tearDown(self):\n rmtree(getcwd(), ignore_errors=True)", "def tearDown(self):\n rmtree(getcwd(), ignore_errors=True)", "def tearDownClass(self):\n if (os.path.exists(MEDIA_ROOT+\"/gitload_test\")):\n shutil.rmtree(MEDIA_ROOT+\"/gitload_test\")", "def tearDownClass(cls):\n for testfile in [cls.testfile, cls.testyfile, cls.testbrfile, cls.testlog]:\n if os.path.exists(testfile):\n os.remove(testfile)\n\n for e in cls.origEnv:\n if cls.origEnv[e] is None:\n del os.environ[e]\n else:\n os.environ[e] = cls.origEnv[e]\n\n if os.path.exists(cls.testDir):\n rmtree(cls.testDir)", "def cleanup(self):\n self.qemu.clean_run_files()\n for tmp in glob.glob(self.configfile + \"?*\"):\n os.unlink(tmp)", "def tearDown(self):\n try:\n os.remove(self.junk_file)\n except OSError as doh:\n if doh.errno == 2:\n # No such File, ignore\n pass\n else:\n raise", "def tearDown(self):\r\n shutil.rmtree(self.temp_dir_path)", "def tearDown(self):\n shutil.rmtree(self._data_dir_path)", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)", "def tearDown(self):\n \tshutil.rmtree(self.tempdir)", "def clear_files_paths(self):\n del self.__files_paths[:]", "def tearDownClass(cls):\n os.removedirs(cls.test_dir)\n del cls.checkpoint\n del cls.dataset\n del cls.experiment\n del cls.test_dir\n del cls.tokenizer_parameters\n gc.collect()", "def tearDown(self):\n sys.stdout = sys.__stdout__\n os.remove(\"file.json\")", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def tearDown(self):\n with contextlib.suppress(FileNotFoundError):\n Path(\"test.xlsx\").absolute().unlink()", "def tearDown(self):\n if self.workspace is not None:\n rmtree(self.workspace.workspace, ignore_errors=True)\n Path.cwd().joinpath(\"workspace.tar.gz\").unlink(missing_ok=True)\n for item in self.items:\n if item.is_dir():\n rmtree(item)\n elif item.is_file():\n item.unlink()\n self.workspace = None", "def tearDown(self):\n if os.path.isfile(TestConsole.jsfile_test):\n os.remove(TestConsole.jsfile_test)" ]
[ "0.8038646", "0.80053955", "0.79976135", "0.7974729", "0.7966363", "0.78566366", "0.7773938", "0.7761133", "0.77407455", "0.7704826", "0.76752174", "0.76329154", "0.76329154", "0.76329154", "0.7632071", "0.7624899", "0.761122", "0.76048136", "0.7599777", "0.7578465", "0.7522839", "0.75016505", "0.7480962", "0.7475709", "0.744732", "0.7410407", "0.7407539", "0.74008465", "0.73942304", "0.7383741", "0.7373901", "0.73590696", "0.7334493", "0.7333375", "0.73288053", "0.7316756", "0.7303792", "0.73019993", "0.7300765", "0.72865677", "0.72814363", "0.72754", "0.72679764", "0.72647184", "0.72647184", "0.7264687", "0.7258506", "0.7253885", "0.72343254", "0.72185165", "0.7213685", "0.7208504", "0.72063816", "0.71919763", "0.7189614", "0.71876365", "0.71635795", "0.7155935", "0.7154771", "0.7152638", "0.71506107", "0.714949", "0.7142696", "0.71383667", "0.7137032", "0.7131639", "0.7125749", "0.7125279", "0.7124298", "0.7123749", "0.71125084", "0.711051", "0.7108695", "0.70927715", "0.7090611", "0.7090407", "0.7090129", "0.7090129", "0.7089294", "0.7089211", "0.70798373", "0.70678216", "0.70622504", "0.7059626", "0.7057756", "0.7057756", "0.705744", "0.7057044", "0.7030979", "0.7030709", "0.70284265", "0.7019991", "0.70100623", "0.69947845", "0.69924784", "0.69817", "0.69710326", "0.69689775", "0.6954413", "0.6948023", "0.69419277" ]
0.0
-1
Validate the regex patterns, but only partially while the user is still typing. Because the 'from' pattern will be where the user specifies captures, changing it also requires revalidating the substitution pattern. However if the user is still typing (as opposed to hitting enter to complete the input) we do the minimal amount of work necessary, i.e we just set the colors back to neutral and disable the Apply button.
def validateRegexFields(self, complete=False): # Assume the patterns aren't valid. self.m_validFromRe = False self.m_validPatterns = False ### Validate the 'from' pattern # regexCtl = self.m_reFromCtl subsCtl = self.m_reToCtl regex, subs = regexCtl.Value, subsCtl.Value regColor, subColor = wx.NullColour, wx.NullColour if complete and regex: regColor = subColor = wx.BLUE try: re.sub(regex, subs, '') except re.error as e: subColor = wx.RED try: re.compile(regex) except re.error as e: regColor = wx.RED else: self.m_validFromRe = True else: self.m_validFromRe = True self.m_validPatterns = bool(subs) self.setTextColor(regexCtl, regColor) self.setTextColor(subsCtl, subColor) if complete: self.populateFileList() else: self.m_applyBtn.Enabled = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onTextChange(self, event):\n\n self.validateRegexFields(complete=False)\n event.Skip()", "def onHitEnterInFrom(self, event):\n\n self.validateRegexFields(complete=True)\n if self.m_validFromRe:\n self.m_reToCtl.SetFocus()", "def __checkForPattern(self):\n if self._keyCode in self._patterns:\n assert(self.notify.debug(\"Pattern Match: \" + self._keyCode))\n messenger.send(KeyCodes.PATTERN_MATCH_EVENT, [self._keyCode])\n self.reset()\n \n # If the key code is longer than the longest pattern possible,\n # Then reset! \n elif self._keyCodeCount == self._patternLimit or len(self.getPossibleMatchesList()) == 0:\n assert(self.notify.debug(\"No pattern match!\"))\n messenger.send(KeyCodes.PATTERN_NO_MATCH_EVENT)\n self.reset()", "def check_match_pattern(self):\n text = self.ui.plainTextEdit.toPlainText()\n pattern = self.ui.textPattern.text()\n result = re.search(pattern, text)\n group = int(self.ui.spinGroup.text())\n if result:\n self.ui.textMatch.setText(result.group(group))", "def onHitEnterInTo(self, event):\n\n self.validateRegexFields(complete=True)\n if self.m_validPatterns:\n self.m_fileList.SetFocus()", "def _validate_pattern_fields(self):\n # TO ADD:\n ## check pattern is dict ??\n ## A1 ! check if all vars used in sprintf are declared\n ## check for quoted '%s' in sprintf text (not allowed).\n ## check that only all subs are %s and that the number of %s matches the length of the var list.\n ## re.search(r\"\\%(.)\", , ) => from this get list to check all %s and length to check against var list.\n ## Given these checks - makes more sense to hardwire sprintf subfield names than use config approach.\n \n for field, field_content in self.pattern.items():\n if field not in self.pkey_dict:\n warnings.warn(\"Pattern has unknown field: %s !\" % field)\n \n # The following is quote ugly and hard to follow. Should probably be refactored\n oneOf = False\n oneOf_list = []\n for field, field_spec in self.pkey_dict.items():\n if field_spec['compulsory']:\n if field not in self.pattern:\n warnings.warn(\"Pattern is missing compulsory field: %s !\" % field)\n elif field_spec['OneOf']:\n oneOf_list.append(field)\n if field in self.pattern:\n oneOf = True \n if field_spec['sprintf']:\n if field in self.pattern:\n for subfield in self.pattern[field]:\n if subfield not in self.sprintf_keys:\n warnings.warn(\"The field %s has an unknown subfield %s.\" % (field, subfield))\n for subfield in self.sprintf_keys:\n if subfield not in self.pattern[field]:\n warnings.warn(\"The field %s lacks the compulsory subfield %s.\" % (field, subfield))\n # Check that number of vars matches number %s in text field\n if not len(re.findall('%s', self.pattern[field]['text'])) == len(self.pattern[field]['vars']):\n warnings.warn(\"Wrong number of vars in field '%s' of %s\" % (field, self.pattern['pattern_name']))\n for v in self.pattern[field]['vars']:\n if v not in self.pattern['vars']:\n warnings.warn(\"%s not in varlist %s\" % (v, str(self.pattern['vars'])))\n# Move spec checks down: \n# if field_spec['msExpression']:\n# self._validate_quoted(field['text'])\n# self._validate_ms\n \n if not oneOf:\n warnings.warn(\"Pattern must have at least one of: \" + str(oneOf_list))\n\n # Poss to add: validate number of vars for sprintf subs", "def validate_data(self):\n for pattern in self.patterns:\n if pattern == \"\":\n self.patterns.remove(\"\")\n\n if not self.patterns:\n print(\"WARNING! Missing pattern or empty string!\")\n sys.exit()", "def __call__(self, value):\n valid = True\n for regex in self.regexs:\n search = regex.search(value)\n valid = valid and ( search != None)\n if not valid or len(value) < self.min_length:\n raise ValidationError(self.message, code=self.code)", "def prepare_regexps(self):\r\n print(\"Preparing regular expressions for this session.\")\r\n privmsg_parse = re.compile(\"\")", "def test_grammar_rules_regex(self) -> None:\n for rule in self.rules.grammar_regex:\n positions: List[Tuple[int, int]] = self.report.get_regex_postions(\n rule[\"regex\"], ignore_case=True\n )\n for position in positions:\n self.add_error(rule[\"message\"], position=position)", "def validateInput(self):\n palette = QPalette()\n validInput = self.sender().hasAcceptableInput()\n if validInput:\n palette.setColor(QPalette.Text, Qt.black)\n else:\n palette.setColor(QPalette.Text, Qt.blue)\n self.sender().setPalette(palette)\n self.hasValidInput.emit(validInput)", "def regex_pattern(self):\n regex_to_match = input(\"Enter the regex pattern you'd like to use> \")\n return regex_to_match", "def _source_matchpattern_field_string_is_valid_as_regex(self):\n if self.source_matchpattern is None:\n raise RuleError(\"'source_matchpattern' must be a valid regex.\")\n if not regex_is_valid(self.source_matchpattern):\n # print(f\"{self}\")\n raise SourceMatchpatternError(\n \"Value for 'source_matchpattern' must be a valid regex.\"\n )\n return True", "def validate(self):\n stguess = self.text.toPlainText()\n if not self.pkfitdlg.checkUserInput(stguess):\n return\n self.stguess = stguess\n self.accept()", "def validate_search_inputs(self):\r\n\r\n debug(\"validate\")\r\n fail = False\r\n msg = \"\"\r\n if self.m_regex_search_checkbox.GetValue():\r\n if self.m_searchfor_textbox.GetValue() == \"\" or self.validate_search_regex():\r\n msg = _(\"Please enter a valid search regex!\")\r\n fail = True\r\n elif self.m_searchfor_textbox.GetValue() == \"\":\r\n msg = _(\"Please enter a valid search!\")\r\n fail = True\r\n if not fail and self.m_fileregex_checkbox.GetValue():\r\n if self.m_filematch_textbox.GetValue().strip() == \"\" or self.validate_regex(self.m_filematch_textbox.Value):\r\n msg = \"Please enter a valid file regex!\"\r\n fail = True\r\n elif self.m_filematch_textbox.GetValue().strip() == \"\":\r\n msg = _(\"Please enter a valid file pattern!\")\r\n fail = True\r\n if not fail and self.m_dirregex_checkbox.GetValue():\r\n if self.validate_regex(self.m_exclude_textbox.Value):\r\n msg = _(\"Please enter a valid exlcude directory regex!\")\r\n fail = True\r\n if not fail and not exists(self.m_searchin_text.GetValue()):\r\n msg = _(\"Please enter a valid search path!\")\r\n fail = True\r\n if (\r\n not fail and\r\n self.m_logic_choice.GetStringSelection() != \"any\" and\r\n re.match(r\"[1-9]+[\\d]*\", self.m_size_text.GetValue()) is None\r\n ):\r\n msg = _(\"Please enter a valid size!\")\r\n fail = True\r\n if not fail:\r\n try:\r\n self.m_modified_date_picker.GetValue().Format(\"%m/%d/%Y\")\r\n except:\r\n msg = _(\"Please enter a modified date!\")\r\n fail = True\r\n if not fail:\r\n try:\r\n self.m_created_date_picker.GetValue().Format(\"%m/%d/%Y\")\r\n except:\r\n msg = _(\"Please enter a created date!\")\r\n fail = True\r\n if fail:\r\n errormsg(msg)\r\n return fail", "def _line_fits_pattern(self, logline):\n for (fieldname, pattern) in self._excludepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return False\n if len(self._includepatterns) == 0:\n return True # no includepatterns means 'accept everything'\n for (fieldname, pattern) in self._includepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return True\n return False", "def constraint_clause_pattern_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n value = getattr(presentation, field.name)\n if value is not None:\n try:\n # From TOSCA 1.0 3.5.2.1:\n #\n # \"Note: Future drafts of this specification will detail the use of regular expressions\n # and reference an appropriate standardized grammar.\"\n #\n # So we will just use Python's.\n re.compile(value)\n except re.error as e:\n context.validation.report(\n u'constraint \"{0}\" is not a valid regular expression in \"{1}\": {2}'\n .format(field.name, presentation._fullname, safe_repr(value)),\n locator=presentation._get_child_locator(field.name), level=Issue.FIELD, exception=e)", "def pre_search(self):\n self.update_status(\"Edit pattern filter\")\n self.patternEditor.show()", "def isValid(text):\n return bool(re.search(r'\\b(start|stop) (look|watch|guard)ing\\b', text, re.IGNORECASE))", "def generate_regex_from_string(self):\n tries = 0\n while tries < self.max_tries:\n try:\n tries += 1\n if tries % 100 == 0:\n print(f\"Tries: {tries}\", end=\"\\r\")\n patterns_to_try = self.generate_regex_pattern()\n for _, pattern in patterns_to_try:\n if re.fullmatch(pattern, self.string):\n self.found_patterns.add(pattern)\n else:\n print(f\"Doesn't Match! {pattern} -> {self.string}\")\n except Exception as e:\n pass\n if self.negative_string:\n self.found_patterns = self.best_pattern()", "def on_test_regex(self, event):\r\n\r\n self.m_regex_test_button.Enable(False)\r\n self.tester = RegexTestDialog(\r\n self,\r\n self.m_case_checkbox.GetValue(),\r\n self.m_dotmatch_checkbox.GetValue(),\r\n self.m_searchfor_textbox.GetValue()\r\n )\r\n self.tester.Show()", "def is_valid(teorema, args):\n if args.ignore_case:\n for value in teorema.values():\n if args.pattern.lower() in value.lower():\n return True\n else:\n for value in teorema.values():\n if args.pattern in value:\n return True\n\n return False", "def find_by_pattern(self):\n while True: \n word = input(\"Enter a regular expression ex: \\d\\d\\w+. Press Q to \"\n \"quit to the main screen: \")\n if word.upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n return self.dict_list\n self.find_by_pattern_list = []\n count = 0\n for i in self.dict_list:\n for key, value in i.items():\n if re.search(word, value):\n self.find_by_pattern_list.append(i)\n count+=1\n break\n if count == 0:\n print(\"There were no matches.\")\n else:\n self.display_style(self.find_by_pattern_list)\n break\n self.del_or_edit()", "def __validate_conn_pattern(conns:str)->str:\n pattern1 = re.compile(r'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n # pattern2 = re.compile(r'^\\w+:\\w+@\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n\n for conn in conns.split(\",\"):\n if not pattern1.match(conn) and not pattern2.match(conn):\n raise argparse.ArgumentTypeError(f'Invalid connection format: {conn}. Supported formats: 127.0.0.1:32049 or user:[email protected]:32049')\n\n return conns", "def _config_regex(self):", "def highlight_pattern(self, pad, pattern,\n tag, start=\"1.0\", end=\"end\", regexp=False):\n start = pad.index(start)\n end = pad.index(end)\n pad.mark_set(\"matchStart\", start)\n pad.mark_set(\"matchEnd\", start)\n pad.mark_set(\"searchLimit\", end)\n\n count = GUI.IntVar()\n while True:\n index = pad.search(pattern, \"matchEnd\", \"searchLimit\", count=count,\n regexp=regexp)\n if index == \"\":\n break\n pad.mark_set(\"matchStart\", index)\n pad.mark_set(\"matchEnd\", \"%s+%sc\" % (index, count.get()))\n pad.tag_add(tag, \"matchStart\", \"matchEnd\")", "def build_match_and_apply_functions(pattern, search, replace):\n\n def matches_rule(word):\n \"\"\" Check if word contains pattern.\n \"\"\"\n return re.search(pattern, word)\n\n def apply_rule(word):\n \"\"\" Replace text with replacement in word.\n \"\"\"\n return re.sub(search, replace, word)\n\n return (matches_rule, apply_rule)", "def test_patterns(self):\n tests = (\n (\"https://youtu.be/OQwD0QCbxaA\", \"https://www.youtube.com/watch?v=OQwD0QCbxaA&feature=my_favorites\"),\n (\"https://smile.amazon.com/Simons-Cat-Simon-Tofield/dp/0446560065\",\n \"http://www.amazon.com/Simons-Cat-Simon-Tofield/dp/0446560065/ref=sr_1_1?ie=UTF8&qid=1346302386&sr=\"),\n (\"http://example.com/\", \"http://example.com/?feat=directlink\"),\n (\"http://example.com/\", \"http://example.com/?\"),\n (\"http://example.com/?foo=1\", \"http://example.com/?foo=1&\"),\n\n )\n\n config = copyclipper.LoadConfig()\n for test in tests:\n result = copyclipper.ProcessValue(config, test[1])\n self.assertEquals(result, test[0],\n msg=\"Expected\\n%r\\ngot\\n%r\\nfrom\\n%r\\n\" % (test[0], test[1], result))", "def register_patterns(self) -> None:\n\n if (patterns := getattr(self, \"WORDS\", None)) is not None:\n for k, v in patterns.items():\n self.register_replacement(Replacement(rf\"\\b{k}\\b\", v))\n\n if (patterns := getattr(self, \"PATTERNS\", None)) is not None:\n for k, v in patterns.items():\n self.register_replacement(Replacement(k, v))\n\n if (replacements := getattr(self, \"REPLACEMENTS\", None)) is not None:\n for replacement in replacements:\n self.register_replacement(replacement)", "def validate(self, document) -> None:\n\n # document.text will have value in two cases, after we pressed enter in the prompt or when navigating down\n # the autocomplete commands list. In the second case there is no need to press enter to trigger this method,\n # but in those cases self.validation_type == ''\n typed = document.text\n\n if typed:\n if self.validation_type == \"number\":\n regex = r\"^-?\\d+$\"\n\n if not re.search(regex, typed):\n\n raise ValidationError(\n message=\"Please input a positive or negative number.\"\n )\n elif self.validation_type == \"yes_no\":\n regex = r\"^[yYnN]$\"\n\n if not re.search(regex, typed):\n raise ValidationError(message=\"Please type y, n, Y or N.\")\n elif self.validation_type == \"text_max_len\":\n if len(typed) > 100:\n raise ValidationError(message=\"La oración debe tener menos de 100 caracteres.\")\n else:\n raise ValidationError(message=\"Internal Error: Wrong validation type\")", "def enterGuess(self):\n validPattern = False\n while not validPattern:\n print # intentional blank line\n prompt = 'Enter a guess (colors are '\n prompt += self._palette[:self._numColorsInUse] + '): '\n patternString = raw_input(prompt)\n \n validPattern = True\n if len(patternString) != self._lengthOfPattern:\n print 'The pattern must have', self._lengthOfPattern, 'pegs'\n validPattern = False\n else:\n for i in range(self._lengthOfPattern):\n if patternString[i].upper() not in self._palette[:self._numColorsInUse]:\n validPattern = False\n if not validPattern:\n print 'The color options are', self._palette[:self._numColorsInUse]\n \n if validPattern:\n pattern = Pattern(self._lengthOfPattern)\n for i in range(self._lengthOfPattern):\n pattern.setPegColor(i, self._palette.index(patternString[i].upper()))\n\n return pattern", "def validate_regexp(self, regexp):\n if (not self.ui.regexCheckBox.isChecked()) or regexp.size() == 0:\n self.ui.errorLabel.setText(\"\")\n\n self.regexp = QtCore.QRegExp(regexp,\n QtCore.Qt.CaseSensitive\n if self.ui.caseCheckBox.isChecked() else QtCore.Qt.CaseInsensitive)\n\n if self.regexp.isValid():\n self.show_error(\"\")\n else:\n self.show_error(unicode(regexp.errorString()))", "def validate_input(self, *args):\n return", "def validate_regex(self, pattern, flags=0):\r\n try:\r\n re.compile(pattern, flags)\r\n return False\r\n except:\r\n errormsg(_(\"Invalid Regular Expression!\"))\r\n error(traceback.format_exc())\r\n return True", "def test_regex_onlyfullmatch(self):\n val = DwcaValidator(yaml.load(self.yaml_regexfullmatch, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n\n document = {'occurrenceid': \"123:12345678\"}\n self.assertTrue(val.validate(document))\n document = {'occurrenceid': \"123:123456789\"}\n self.assertFalse(val.validate(document))", "def integrated_address_regex(self) -> Any:", "def validator(self):\n if not hasattr(self, 'pbApply'): # True at initalisation\n return\n\n pbApply_enabled = True\n\n # Validate the hostname\n val = self.leHostname.text()\n valid_chars = all([c in VALID_HN_CHARS for c in val])\n if (val and valid_chars and not val.startswith('-')):\n color = GREEN\n else:\n color = RED\n pbApply_enabled = False\n self.leHostname.setStyleSheet('QLineEdit { background-color: %s }'\n % color)\n\n # Validate networking elements\n for item in (self.leIP, self.leGateway,\n self.leNetmask, self.leBroadcast):\n try:\n IPv4Address(item.text())\n color = GREEN\n except AddressValueError:\n pbApply_enabled = False\n color = RED\n item.setStyleSheet('QLineEdit { background-color: %s }' % color)\n\n # Validate commands\n if any([self.cbReboot.isChecked(),\n self.cbDynamic.isChecked(),\n self.cbFlash.isChecked()]):\n pbApply_enabled &= True\n else:\n pbApply_enabled = False\n\n # Set pbApply after having validated all other fields\n self.pbApply.setEnabled(pbApply_enabled)", "def pattern_with_digits_and_delimiter_validate_regular_expression(cls, value):\n if value is None:\n return value\n\n if not re.match(r\"^image_\\d{1,3}$\", value ,re.IGNORECASE):\n raise ValueError(r\"must validate the regular expression /^image_\\d{1,3}$/i\")\n return value", "def toClean(self, *patterns):\n self.cleanables.extend([*patterns])", "def test_regex_constraint(self):\n from petstore_api.model import apple\n\n # Test with valid regex pattern.\n inst = apple.Apple(\n cultivar=\"Akane\"\n )\n assert isinstance(inst, apple.Apple)\n\n inst = apple.Apple(\n cultivar=\"Golden Delicious\",\n origin=\"cHiLe\"\n )\n assert isinstance(inst, apple.Apple)\n\n # Test with invalid regex pattern.\n err_regex = r\"Invalid value `.+?`, must match regular expression `.+?` at \\('args\\[0\\]', 'cultivar'\\)\"\n with self.assertRaisesRegex(\n petstore_api.ApiValueError,\n err_regex\n ):\n inst = apple.Apple(\n cultivar=\"!@#%@$#Akane\"\n )\n\n err_regex = r\"Invalid value `.+?`, must match regular expression `.+?` at \\('args\\[0\\]', 'origin'\\)\"\n with self.assertRaisesRegex(\n petstore_api.ApiValueError,\n err_regex\n ):\n inst = apple.Apple(\n cultivar=\"Golden Delicious\",\n origin=\"!@#%@$#Chile\"\n )", "def check_input_by_regex(message, regex):\n while True:\n try:\n input_str = str(input(message)).capitalize()\n except ValueError:\n # input incorrect retry\n continue\n if not re.fullmatch(regex, input_str):\n # Value input incorrect\n continue\n else:\n return input_str", "def address_regex(self) -> Any:", "def process(patterns, text):\n\n for i, p in enumerate(patterns):\n pattern = _fix_pattern(p)\n\n found = []\n for grammar, replace in pattern:\n\n find_and_replace = create_find_and_replace(grammar, replace)\n results = parse_grammar(find_and_replace, text)\n if not results:\n break\n else:\n found.append(len(results))\n text = _transform_results(results, text)\n\n if found:\n log.info('=> pattern {} found {} time(s) in {} pass(es)'\n .format(i + 1, sum(found), len(found)))\n else:\n log.info('__ pattern {} not found'\n .format(i + 1))\n\n return text", "def _validate(self, *_):\n provider = self.provider_entry.get_text()\n username = self.account_name_entry.get_text()\n token = \"\".join(self.token_entry.get_text().split())\n\n if not username:\n self.account_name_entry.get_style_context().add_class(\"error\")\n valid_name = False\n else:\n self.account_name_entry.get_style_context().remove_class(\"error\")\n valid_name = True\n\n if not provider:\n self.provider_combobox.get_style_context().add_class(\"error\")\n valid_provider = False\n else:\n self.provider_combobox.get_style_context().remove_class(\"error\")\n valid_provider = True\n\n if (not token or not OTP.is_valid(token)) and not self.props.is_edit:\n self.token_entry.get_style_context().add_class(\"error\")\n valid_token = False\n else:\n self.token_entry.get_style_context().remove_class(\"error\")\n valid_token = True\n\n self.emit(\"changed\", all([valid_name, valid_provider, valid_token]))", "def match(pattern: List[str], source: List[str]) -> List[str]:\n sind = 0 # current index we are looking at in the source list\n pind = 0 # current index we are looking at in the pattern list\n result: List[str] = [] # to store the substitutions that we will return if matched\n acc = ''\n\n # keep checking as long as we haven't hit the end of both pattern and source\n while sind != len(source) or pind != len(pattern): \n # Your job is to fill out the body fo this loop\n # 1) if we reached the end of the pattern but not source \n if pind == len(pattern):\n return None\n # 2) if the current thing in the pattern is a %\n elif pattern[pind] == '%':\n pind += 1 # moving from % to next word \n while sind != len(source):\n if pind != len(pattern) and pattern[pind] == source[sind]:\n break \n else: \n if acc == \"\": \n acc += source[sind] # if it is the first character do not add a space \n else: \n acc += \" \"\n acc += source[sind]\n sind += 1\n result.append(acc)\n acc = ''\n # 3) if we reached the end of the source but not the pattern\n elif sind == len(source):\n return None \n # 4) if the current thing in the pattern is an _\n elif pattern[pind] == '_':\n result.append(source[sind])\n sind += 1\n pind += 1\n #appending is for lists and adding is for strings\n # 5) if the current thing in the pattern is the same as the current thing \n # in the source\n elif pattern[pind] == source[sind]:\n sind += 1\n pind += 1\n # 6) else : this will happen if none of the other conditions are met\n # it indicates the current thing it pattern doesn't match the current\n # thing in source\n else: \n return None\n return result", "def long_suggestion_grammar_check(text):\n tool = language_check.LanguageTool('en-US')\n matches = tool.check(text)\n for i, match in enumerate(matches):\n fromy = match.fromy + 1\n fromx = match.fromx + 1\n ruleId = match.ruleId\n replacement = match.replacements[0]\n matches[i] = \"Line {}, column {}, Rule ID: {}[{}]\\nMessage: Did you mean '{}'?\\nSuggestion: {}\".format(fromy, fromx, ruleId, i, replacement, replacement)\n return matches", "def tok_full_regexp(self, case=False):\n\t\tre_str=\"\"\n\t\t\n\t\t# => cas normal : une seule chaîne dans self.xtexts\n\t\tif not self.multimode:\n\t\t\t# récup d'une seule chaîne échappée\n\t\t\tre_str = self.str_pre_regexp(self.xtexts)\n\t\t\n\t\t# => plusieurs chaînes matchables à alimenter avec:\n\t\t# - permuts de 2 elts + BLANK (DIY) quand\n\t\t# les XML n'ont pas préservé l'ordre\n\t\t# - listes de possibilité (à préparer avant)\n\t\t# quand variantes multiples\n\t\telif self.multimode:\n\t\t\talternatives = []\n\t\t\t# ex: ['nom prénom', 'prénom nom'] => /((?:nom\\W*prénom)|(?:prénom\\W*nom))/\n\t\t\t# ex: ['PP1-PP2', 'PP1-P2', 'PP1-2'] => /((?:PP1-PP2)|(?:PP1-P2)|(?:PP1-2))/\n\t\t\tfor single_text in self.xtexts:\n\t\t\t\t# pre_regexp ajoute les interpolations\n\t\t\t\t# INTERWORD et INTERCHAR pour ch. chaîne\n\t\t\t\tre_single = self.str_pre_regexp(single_text)\n\t\t\t\t\n\t\t\t\t# capsule \"non capturing\"\n\t\t\t\talternatives.append(\"(?:\"+re_single+\")\")\n\t\t\t\n\t\t\t# combi1 -OR- combi2... (using regex pipe)\n\t\t\tre_str = \"|\".join(alternatives)\n\t\t\n\t\t# enfin ajout de balises de capture extérieures\n\t\t# et compilation (en case insensitive sauf exceptions)\n\t\t# -----------------------------------------------------\n\t\t# 2 possibilités capture: en début ligne ou dans le milieu\n\t\t# mais alors pas à l'intérieur des renvois #(#..#)#\n\t\tif not case:\n\t\t\tmy_regexp_object = re.compile(\"(?:^(\"+re_str+\"))|(?:(?<!#\\(#)(\"+re_str+\"))\", re.IGNORECASE)\n\t\telse:\n\t\t\tmy_regexp_object = re.compile(\"(?:^(\"+re_str+\"))|(?:(?<!#\\(#)(\"+re_str+\"))\")\n\t\treturn my_regexp_object", "def _validator_regex(self, field, value):\n try:\n re.compile(value)\n except re.error:\n self._error(field, \"{} is not a valid regex\".format(value))", "def regex():\n #ttttttccccccppppppvvvvvvvvvvvvoooooo\n pattern = r'#? *' # patter may or may not start with a '\n pattern += r'([0-9A-F]{6})' # temperature\n pattern += r'([0-9A-F]{6})' # conductivity\n pattern += r'([0-9A-F]{6})' # pressure\n pattern += r'([0-9A-F]{4})' # pressure temp\n pattern += r'([0-9A-F]{0,14})' # volt0, volt1, oxygen\n pattern += NEWLINE\n return pattern", "def validate_long_url(form, field):\n for regex in LinkForm.rejected_regexes:\n if regex.search(field.data):\n raise ValidationError(\"That URL is not allowed.\")", "def str_pre_regexp(self, anystring, debug_lvl = 0):\n\t\t\n\t\tstrlen = len(anystring)\n\t\t\n\t\t# A) préparation du contenu\n\t\t# --------------------------\n\t\tsubtokens = re_TOUS.findall(anystring)\n\t\t\n\t\t# £TODO now read those params in conf\n\t\tdo_cesure=True\n\t\tdo_espace=True\n\t\tdo_newline=True\n\t\tdo_char_classes=True\n\t\t\n\t\t\n\t\tif do_espace and do_newline:\n\t\t # autorise 1 saut de ligne, 2 espaces et 1x toutes poncts\n\t\t # (ex: ',' entre nom et prénom) -------------\n\t\t r_INTER_WORD = '[¤ \\W]{0,4}'\n\t\telif do_espace:\n\t\t r_INTER_WORD = '[ \\W]{0,3}'\n\t\telif do_newline:\n\t\t r_INTER_WORD = '[¤\\W]{0,2}'\n\t\telse:\n\t\t r_INTER_WORD = '[\\W]?'\n\t\t\n\t\t# autorise césure, saut de ligne, espace\n\t\tr_INTER_CHAR = '[-¤ ]{0,3}'\n\t\t\n\t\t# on ne fait pas la césure pour les locutions courtes\n\t\tif (not do_cesure or strlen < 6):\n\t\t\t# re: chaîne de base \n\t\t\t# ------------------\n\t\t\t# autorisant un ou des passage-s à la ligne à chaque limite \n\t\t\t# limites selon re_FINDALL = (\\b et/ou bords de ch. ponct)\n\t\t\tmy_re_str = r_INTER_WORD.join(r\"%s\" % re.escape(u) for u in subtokens)\n\t\t\n\t\t# expression + sioux pour permettre césure inattendue et erreurs OCR\n\t\telse:\n\t\t\tminlen = strlen\n\t\t\t# on permet 3 caractères en plus tous les 80 caractères\n\t\t\tmaxlen = strlen + ((strlen // 80)+1) * 3\n\t\t\t\n\t\t\t# lookahead sur /./ ==> exprime la contrainte de longueur de la regex qui suivra\n\t\t\tre_length_prefix = r\"(?=.{%i,%i})\" % (minlen, maxlen)\n\t\t\t\n\t\t\tinterpolated_tokens = []\n\t\t\t\n\t\t\t# interpolations dans chaque token...\n\t\t\tfor u in subtokens:\n\t\t\t\tinterpold_word=\"\"\n\t\t\t\tarray_c_re = []\n\t\t\t\t\n\t\t\t\t# ... donc pour chaque **caractère**\n\t\t\t\t# =========================\n\t\t\t\tfor c in u:\n\t\t\t\t\t# each character regexp\n\t\t\t\t\tc_re = \"\"\n\t\t\t\t\t\n\t\t\t\t\t# cas simple sans traitement OCR\n\t\t\t\t\t# ----------\n\t\t\t\t\tif not do_char_classes or (c not in XTokinfo.OCR_CLASSES):\n\t\t\t\t\t\t# esc !\n\t\t\t\t\t\tc_re = re.escape(c)\n\t\t\t\t\t\t\n\t\t\t\t\t\t# store\n\t\t\t\t\t\tarray_c_re.append(c_re)\n\t\t\t\t\t\n\t\t\t\t\t# cas avec OCR: sub/caractère/groupe de caractères 'semblables'/g\n\t\t\t\t\t# -------------\n\t\t\t\t\telse:\n\t\t\t\t\t\tc_matchables = XTokinfo.OCR_SIMILAR_CHARACTER[c]\n\t\t\t\t\t\t# esc + joined alternatives\n\t\t\t\t\t\tc_alter = '|'.join(map(re.escape,c_matchables))\n\t\t\t\t\t\t\n\t\t\t\t\t\t# ex : regexp = '(?:i|l)'\n\t\t\t\t\t\tc_re += '(?:' + c_alter + ')'\n\t\t\t\t\t\t\n\t\t\t\t\t\t# store\n\t\t\t\t\t\tarray_c_re.append(c_re)\n\t\t\t\t\t\n\t\t\t\t\t# dans les 2 cas: césure\n\t\t\t\t\t# ----------------------\n\t\t\t\t\t# on va ajouter /-?/ entre ch. \"regexp caractère\" (ou re_INTER_CHAR)\n\t\t\t\t\tinterpold_word = r_INTER_CHAR.join(array_c_re)\n\t\t\t\t\n\t\t\t\tinterpolated_tokens.append(interpold_word)\n\t\t\t\t\n\t\t\t\tmy_re_str = re_length_prefix + r_INTER_WORD.join(r\"%s\" % u \n\t\t\t\t for u in interpolated_tokens)\n\t\t\t\t\n\t\t\t\t# exemple\n\t\t\t\t# ====x_str==== Oxidation of Metals\n\t\t\t\t# ====re_str==== (?=.{19,22})(?:O|0))[-¤ ]{0,3}x[-¤ ]{0,3}(?:i|\\;|l)[-¤ ]{0,3}(?:d|cl)[-¤ ]{0,3}(?:a|u|n)[-¤ ]{0,3}t[-¤ ]{0,3}(?:i|\\;|l)[-¤ ]{0,3}(?:o|c)[-¤ ]{0,3}n[¤ ]{0,2}(?:o|c)[-¤ ]{0,3}(?:f|t)[¤ ]{0,2}M[-¤ ]{0,3}(?:e|c)[-¤ ]{0,3}t[-¤ ]{0,3}(?:a|u|n)[-¤ ]{0,3}(?:1|l|i|I|\\]|\\/|Z)[-¤ ]{0,3}s\n\t\t\t\n\t\t\n\t\t\n\t\tif debug_lvl >= 2 :\n\t\t\tprint(\"SUBTOKS\", subtokens)\n\t\t\n\t\tif debug_lvl >= 3 :\n\t\t\tprint(\"pre_regexp:\", file=sys.stderr)\n\t\t\tprint(\"\\t=x_str=\", anystring, file=sys.stderr)\n\t\t\tprint(\"\\t=re_str=\", my_re_str, file=sys.stderr)\n\t\t\n\t\t\n\t\t# B) Décision du format des limites gauche et droite pour les \\b\n\t\t# --------------------------------------------------\n\t\t# test si commence par une ponctuation\n\t\tif re.search(r'^\\W', subtokens[0]):\n\t\t\tre_boundary_prefix = \"\"\n\t\telse:\n\t\t\tre_boundary_prefix = \"\\\\b\"\n\t\t# idem à la fin\n\t\tif re.search(r'\\W$', subtokens[-1]):\n\t\t\tre_boundary_postfix = \"\"\n\t\telse:\n\t\t\tre_boundary_postfix = \"\\\\b\"\n\t\t\n\t\t# voilà\n\t\treturn re_boundary_prefix + my_re_str + re_boundary_postfix", "def _validate_from_and_to_time(from_time, to_time):\n # Validate From Time.\n from_datetime = datetime.datetime.strptime(from_time, '%Y-%m-%d %H:%M:%S')\n datetime_today = datetime.datetime.today()\n from_datetime_limit = datetime_today - datetime.timedelta(days=7)\n if from_datetime < from_datetime_limit:\n raise ValueError(\"The from time may not be earlier than '{from_datetime_limit}'.\".format(\n from_datetime_limit=from_datetime_limit\n ))\n if from_datetime > datetime_today:\n raise ValueError(\"The from time may not be in the future.\")\n # Validate To Time.\n to_datetime = datetime.datetime.strptime(to_time, '%Y-%m-%d %H:%M:%S')\n if to_datetime > datetime_today:\n raise ValueError(\"The to time may not be in the future.\")\n if to_datetime <= from_datetime:\n raise ValueError(\"The to time must after the from time.\")", "def buffer_build_regex(buffer):\n\n\thdata = weechat.hdata_get(\"buffer\")\n\tinput = weechat.hdata_string(hdata, buffer, \"input_buffer\")\n\texact = weechat.hdata_integer(hdata, buffer, \"text_search_exact\")\n\twhere = weechat.hdata_integer(hdata, buffer, \"text_search_where\")\n\tregex = weechat.hdata_integer(hdata, buffer, \"text_search_regex\")\n\n\tif not regex:\n\t\tinput = re.escape(input)\n\n\tif exact:\n\t\tinput = \"(?-i)%s\" % input\n\n\tfilter_regex = None\n\tif where == 1: # message\n\t\tfilter_regex = input\n\telif where == 2: # prefix\n\t\tfilter_regex = \"%s\\\\t\" % input\n\telse: # prefix | message\n\t\tfilter_regex = input # TODO: impossible with current filter regex\n\n\treturn \"!%s\" % filter_regex", "def clean_errors(self):\n self._vim.eval('clearmatches()')\n self._errors = []\n self._matches = []\n # Reset Syntastic notes - TODO: bufdo?\n self._vim.current.buffer.vars['ensime_notes'] = []", "def listenForPatterns(self, patterns):\n self._patterns = patterns\n for pattern in self._patterns:\n if len(pattern) > self._patternLimit:\n self._patternLimit = len(pattern)\n \n if self._enabled:\n self.disable()\n self.enable()", "def valid_from(self, valid_from):\n\n self._valid_from = valid_from", "def isValid(text):\n return bool(re.search(r'\\b((kill|stop) the (alarm|clock|music))\\b', text, re.IGNORECASE))", "def get_regex_mismatch_error_text(field_name, source_regex):\n\n\treturn(\"Value entered for '{0}' does not match regex '{1}'\"\n\t\t .format(field_name, source_regex.pattern))", "def on_regex_search_toggle(self, event):\r\n\r\n if self.m_regex_search_checkbox.GetValue():\r\n update_autocomplete(self.m_searchfor_textbox, \"regex_search\")\r\n else:\r\n update_autocomplete(self.m_searchfor_textbox, \"literal_search\")\r\n event.Skip()", "def validate_input(self):\n self._validate_limits_cols_prefixed()\n self._validate_fillna_cols_prefixed()\n self._validate_ratio_input()", "def clean(self):\n start_time = self.cleaned_data.get('start_time', None)\n end_time = self.cleaned_data.get('end_time', None)\n\n if start_time is None:\n self._errors['start_time'] = self.error_class(['This field is required.'])\n raise forms.ValidationError('Please choose a start time')\n\n if end_time is None:\n self._errors['end_time'] = self.error_class(['This field is required.'])\n raise forms.ValidationError('Please choose an end time')\n\n if end_time <= start_time:\n self._errors['end_time'] = self.error_class(['This field is required.'])\n raise forms.ValidationError('The end time must be AFTER the start time')\n \n conflict_checker = ConflictChecker()\n if conflict_checker.does_timeslot_conflict(TimeSlot(start_time, end_time)):\n self._errors['end_time'] = self.error_class(['Please choose a different time.'])\n raise forms.ValidationError('Sorry! That time conflicts with another event reservation or message! Please choose another one.')\n \n return self.cleaned_data", "def _init_good_paper(self):\n self.good_paper = [False] * len(self.src_raw)\n for i in range(len(self.src_raw)):\n # if any(pattern in ' '.join(self.src_raw[i]) for pattern in self.good_patterns):\n if any(pattern.lower() in ' '.join(self.src_raw[i]).lower() for pattern in self.good_patterns):\n self.good_paper[i] = True", "def Validate(self):\n \n hklmin = self.hklmin_txtCtrl.GetValue()\n hklmax = self.hklmax_txtCtrl.GetValue()\n hklsteps = self.hkl_steps_ctrl.GetValue()\n \n wmin = self.wmin_txtCtrl.GetValue()\n wmax = self.wmax_txtCtrl.GetValue()\n wsteps = self.w_steps_ctrl.GetValue()\n \n kx = self.kx_txtCtrl.GetValue()\n ky = self.ky_txtCtrl.GetValue()\n kz = self.kz_txtCtrl.GetValue()\n \n zmin = self.zmin_ctrl.GetValue()\n zmax = self.zmax_ctrl.GetValue()\n colorbar_bool = self.color_bar_box.GetValue()\n \n temp = self.temp_ctrl.GetValue()\n sphavg_bool = self.spherical_avg_box.GetValue()\n \n bgColor = \"pink\"\n failed = False\n \n #Validate hkl values\n num_hklmin = None\n num_hklmax = None\n try:\n num_hklmin = float(hklmin)*np.pi\n self.hklmin_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.hklmin_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_hklmax = float(hklmax)*np.pi\n self.hklmax_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.hklmax_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n \n #Validate w values\n num_wmin = None\n num_wmax = None\n try:\n num_wmin = float(wmin)\n self.wmin_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.wmin_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_wmax = float(wmax)\n self.wmax_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.wmax_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n \n #Validate kx,ky,kz,temp,zmin,zmax values\n num_kx = None\n num_ky = None\n num_kz = None\n num_temp = None\n num_zmin = None\n num_zmax = None\n try:\n num_kx = float(kx)\n self.kx_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.kx_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_ky = float(ky)\n self.ky_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.ky_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n try:\n num_kz = float(kz)\n self.kz_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.kz_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_temp = float(temp)\n self.temp_ctrl.SetBackgroundColour(\"white\")\n except:\n self.temp_ctrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_zmin = float(zmin)\n self.zmin_ctrl.SetBackgroundColour(\"white\")\n except:\n self.zmin_ctrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_zmax = float(zmax)\n self.zmax_ctrl.SetBackgroundColour(\"white\")\n except:\n self.zmax_ctrl.SetBackgroundColour(bgColor)\n failed = True\n \n #Validate File Fields\n int_str = self.int_file_txtCtrl.GetValue()\n spin_str = self.spin_file_txtCtrl.GetValue()\n tau_str = self.tau_file_txtCtrl.GetValue()\n out_str = self.output_file_txtCtrl.GetValue()\n if int_str:\n self.int_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.int_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if spin_str:\n self.spin_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.spin_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if tau_str:\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if out_str:\n self.output_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.output_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n \n direction = {}\n direction['kx'] = num_kx\n direction['ky'] = num_ky\n direction['kz'] = num_kz\n hkl_interval = [num_hklmin, num_hklmax, int(self.hkl_steps_ctrl.GetValue())]\n w_interval = [num_wmin, num_wmax, int(self.w_steps_ctrl.GetValue())]\n \n tau_text = ''\n try:\n tau_file = open(tau_str,'r')\n tau_text = tau_file.read()\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n\n items = tau_text.split()\n if len(items)%3 and not len(items):\n failed = True\n\n tau_list = []\n i = 0\n while not failed and i < len(items)-3:\n tau1, tau2, tau3 = None, None, None\n try:\n tau1 = float(items[i])\n tau2 = float(items[i+1])\n tau3 = float(items[i+2])\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n tau_list.append([tau1,tau2,tau3])\n i+=3\n \n self.Refresh()\n# self.window.Show(True,True)\n \n plotstats = [zmin, zmax, colorbar_bool]\n \n return failed, hkl_interval, w_interval, tau_list, direction, num_temp, sphavg_bool, plotstats", "def test_match_validate_any(self):\n self.analyzer = IBANAnalyzer(None, validate=True)\n self.paste.body = \"Mutlipe IBANS: DE89 3704 0044 0532 0130 00 and FR14 2004 1010 0505 0001 3 should not match\"\n match = self.analyzer.match(self.paste)\n self.assertTrue(match)\n\n # The validate method should filter the wrong FR IBAN out\n self.assertEqual(1, len(match))\n self.assertEqual(\"DE89 3704 0044 0532 0130 00\", match[0])", "def check_entries():\n\tfor i, entry in enumerate(frame.entries):\n\t\tif i==0 or i==5:\n\t\t\tif entry.get().isalnum():\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tframe.entries[i].config(bg=ERROR_COLOR)\n\t\t\t\traise ValueError(\"user must be alphanumeric\")\n\t\telif i==(len(frame.entries)-1):\n\t\t\tif not entry.get().isdigit():\n\t\t\t\tframe.entries[i].config(bg=ERROR_COLOR)\n\t\t\t\traise ValueError(\"duration should be a positive digit\")\n\t\telse:\n\t\t\tif entry.get().isdigit():\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tframe.entries[i].config(bg=ERROR_COLOR)\n\t\t\t\traise ValueError(\"ip field \"+str(i+1)+\" type is incorrect\")\n\treturn True", "def validate(self):\n super().validate()\n frame = getattr(self, 'frame', None)\n if frame is None:\n raise ValueError('Missing columns %s since no frame' % ', '.join(\n [c[0] for c in self.col_regexps]))\n for col_name, c_re in self.col_regexps:\n if col_name not in self.frame:\n raise ValueError('Missing required column %s' % col_name)\n if c_re is None:\n continue # Just verified that column is present\n c_re_c = re.compile(c_re)\n for date, item in self.frame[col_name].iteritems():\n if not c_re_c.match(item):\n raise ValueError(\n 'In column %s, index %s, item %s fails regexp %s' % (\n col_name, date, item, c_re))", "def check_pass(text):\r\n\r\n upperRegex = re.compile(r'[A-Z]')\r\n lowerRegex = re.compile(r'[a-z]')\r\n lengthRegex = re.compile(r'.{8,}')\r\n digitRegex = re.compile(r'\\d')\r\n\r\n if not upperRegex.search(text):\r\n return False\r\n elif not lowerRegex.search(text):\r\n return False\r\n elif not lengthRegex.search(text):\r\n return False\r\n elif not digitRegex.search(text):\r\n return False\r\n else:\r\n return True", "def validate_syntax(self):\n self._validate_network_prefix()\n self._validate_zero_network()\n self._validate_families()\n self._validate_unicast_addresses()\n self._validate_addresses()\n self._validate_gateway()\n self._validate_metric()", "def validate():", "def on_matching_rules(self, matching_rules):\n pass", "def pattern_with_digits_validate_regular_expression(cls, value):\n if value is None:\n return value\n\n if not re.match(r\"^\\d{10}$\", value):\n raise ValueError(r\"must validate the regular expression /^\\d{10}$/\")\n return value", "def test_email_against_pattern_with_asterix_prefix(create_user):\n emails = [\"[email protected]\"]\n patterns = [\"*bar.com\"]\n assert create_user.preprocess_pattern(emails, patterns) == True", "def construct_variable_regex(before, after, mismatches):\n\tif mismatches == 0:\n\t\treturn f\"{before}(.*){after}\"\n\t\n\t# get a regex for a mismatch in every place in before and after sequences\n\tbefores = create_mismatches_regex([before], mismatches)\n\tafters = create_mismatches_regex([after], mismatches)\n\t\n\t# combine each before and after regex with (.+) in the middle\n\tregexes = []\n\tfor b in befores.split(\"|\"):\n\t\tfor a in afters.split(\"|\"):\n\t\t\tregexes.append(f\"{b}(.*){a}\")\n\treturn \"|\".join(regexes)", "def clean_password1(self):\n if 'password1' in self.cleaned_data and self.cleaned_data['password1']:\n PWD_MIN_CHAR = 8\n PWD_MAX_CHAR = 45\n\n pattern = \"(?=^.{%i,%i}$)((?=.*\\\\d)(?=.*[A-Z])(?=.*[a-z])(?=.*[^A-Za-z0-9]))^.*\" % (PWD_MIN_CHAR, PWD_MAX_CHAR)\n\n if re.match(pattern, self.cleaned_data['password1']) is None:\n raise forms.ValidationError('Valid password should contain at least %i alphanumeric characters. Contain both upper and lower case letters. Contain at least one number (for example, 0-9). Contain at least one special character (for example,!@#$%%^&*()+=-[]\\\\\\';,./{}|\\\":?~_<>)' % PWD_MIN_CHAR)\n\n return self.cleaned_data['password1']", "def passwordValidate(form, field):\n\n pwd_regexp = compile(r'^.*(?=.{8,})(?=.*[a-zA-Z])(?=.*?[A-Z])(?=.*\\d)[a-zA-Z0-9!@£$%^&*()_+={}?:~\\[\\]]+$')\n\n if not fullmatch(pwd_regexp, field.data):\n raise ValidationError(message='Password must match the specific pattern')", "def validated_input(input_msg: str, error_msg: str, validator, screenshot:str =None):\n while(True):\n reset_screen()\n\n if screenshot is not None:\n print(screenshot)\n\n data = input(input_msg)\n\n try:\n return validator(data)\n except:\n reset_screen()\n popup(error_msg.format(data), screenshot)\n input(\"\")", "def validate(self, document) -> None:\n if not self._re.match(document.text):\n raise ValidationError(\n message=self._message, cursor_position=document.cursor_position\n )", "def testBoardRegularExpression(self):\n self.assertEqual(self.boards.SelectBoards(['T.*r&^Po']),\n {'T.*r&^Po': 2, 'all': 2})", "def word_and_pattern (word,pattern):\r\n for i in range(len(pattern)):\r\n if pattern[i]!= '_' and pattern.count(pattern[i]) != word.count(pattern[i]):\r\n return False\r\n return True", "def get_regex_format(self, case_sensitive=True):\n\n if case_sensitive is True:\n c = self.cursor()\n c.execute('PRAGMA case_sensitive_like=true')\n elif case_sensitive is False:\n c = self.cursor()\n c.execute('PRAGMA case_sensitive_like=false')\n elif case_sensitive is None:\n pass\n else:\n raise errors.UnknownCaseSensitiveError(value=case_sensitive)\n\n return \"{target:s} REGEXP {pattern:s}\"", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)", "def input_validation(input_: str) -> bool:\n return fullmatch('[1-9]', input_) is not None", "def validate_value(self, key, new_value): # pylint: disable=unused-argument\n\n if self.family == ExclFamily.network:\n ip_network(new_value)\n if self.family == ExclFamily.regex:\n try:\n re.compile(new_value)\n except re.error:\n raise ValueError('Invalid regex')\n\n return new_value", "def build_regex(self) -> typing.Pattern:\n self._regex = re.compile(\"|\".join(sorted(self._includes)))\n return self._regex", "def validate(self, request):\n values = {\n 'robot_match_comments':request.POST['robot_match_comments'],\n 'did_foul':'did_foul' in request.POST,\n 'did_technical_foul':'did_technical_foul' in request.POST,\n 'foul_description':request.POST['foul_description'],\n 'did_shoot':'did_shoot' in request.POST,\n 'auto_1':request.POST['auto_1'],\n 'auto_2':request.POST['auto_2'],\n 'auto_3':request.POST['auto_3'],\n 'auto_miss':request.POST['auto_miss'],\n 'teleop_1':request.POST['teleop_1'],\n 'teleop_2':request.POST['teleop_2'],\n 'teleop_3':request.POST['teleop_3'],\n 'teleop_5':request.POST['teleop_5'],\n 'teleop_miss':request.POST['teleop_miss'],\n 'shooting_description':request.POST['shooting_description'],\n 'did_climb':'did_climb' in request.POST,\n 'climb_start':request.POST['climb_start'],\n 'climb_finish':request.POST['climb_finish'],\n 'level_reached':request.POST.get('level_reached'),\n 'frisbees_dumped':request.POST['frisbees_dumped'],\n 'climbing_description':request.POST['climbing_description'],\n 'did_human_load':'did_human_load' in request.POST,\n 'did_ground_load':'did_ground_load' in request.POST,\n 'auto_frisbees_ground_loaded':\\\n request.POST['auto_frisbees_ground_loaded'],\n 'loading_description':request.POST['loading_description'],\n }\n if ((values['did_foul'] or values['did_technical_foul']) and\n not values['foul_description']):\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'Please enter a description of the foul(s) the robot committed',\n new_values\n )\n if values['did_shoot']:\n try:\n values['auto_1'] = int(values['auto_1'])\n values['auto_2'] = int(values['auto_2'])\n values['auto_3'] = int(values['auto_3'])\n values['auto_miss'] = int(values['auto_miss'])\n values['teleop_1'] = int(values['teleop_1'])\n values['teleop_2'] = int(values['teleop_2'])\n values['teleop_3'] = int(values['teleop_3'])\n values['teleop_5'] = int(values['teleop_5'])\n values['teleop_miss'] = int(values['teleop_miss'])\n except ValueError:\n raise ValidationError(\n 'You must enter a number for all of the shooting numbers',\n self.__dict__.copy().update(values)\n )\n if values['did_climb']:\n try:\n values['climb_start'] = int(values['climb_start'])\n values['climb_finish'] = int(values['climb_finish'])\n try:\n values['level_reached'] = int(values['level_reached'])\n except TypeError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'You must select a level the robot climbed too',\n new_values\n )\n values['frisbees_dumped'] = int(values['frisbees_dumped'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All climbing related numbers must be numbers',\n new_values\n )\n if values['did_ground_load']:\n try:\n values['auto_frisbees_ground_loaded'] = int(\n values['auto_frisbees_ground_loaded'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All numbers of frisbees ground loaded must be numbers',\n new_values\n )\n return values", "def test_pattern(pattern, fields):\n if not pattern: # \"empty\" pattern\n return True\n\n def eval_exp(text):\n m = re.match(r'^(\\$(\\d+))?(!)?/([^/]*)/$', text)\n try:\n if m: # regular expression\n _, num, neg, pat = m.groups()\n num = int(num) if num else 0 # if no `$i` specified, default to `$0`\n m = re.search(pat, fields[num])\n logging.info(u\"regex: '%s' %s~ /%s/\" % (fields[num], neg or u'', pat))\n return bool(m) != bool(neg)\n else: # expression\n exp = translate_fields(text, fields, u'_') # replace non-exist `$i` with u'_'\n logging.info(u'exp: %s' % exp)\n return bool(exp and eval(exp))\n except Exception, e:\n logging.debug(unicode(e))\n return False\n\n if u',' not in pattern: # \"regular expression\" or \"expression\" pattern\n return eval_exp(pattern)\n else: # \"begpat, endpat\" pattern\n global SWITCH_ON\n\n value = False\n\n begpat, endpat = [s.strip() for s in pattern.split(u',')]\n if eval_exp(begpat):\n SWITCH_ON = True\n if SWITCH_ON:\n value = True\n if eval_exp(endpat):\n SWITCH_ON = False\n\n return value", "def match_rule_patterns(fixed_text, cur=0):\n pattern = exact_find_in_pattern(fixed_text, cur, RULE_PATTERNS)\n # if len(pattern) == 1:\n if len(pattern) > 0:\n return {\"matched\": True, \"found\": pattern[0]['find'],\n \"replaced\": pattern[0]['replace'], \"rules\": pattern[0]['rules']}\n else:\n return {\"matched\": False, \"found\": None,\n \"replaced\": fixed_text[cur], \"rules\": None}", "def validatePhoneNumberType(self):\n ## Declaring a Flag to control a while loop\n phone_number_type_ok = False\n ## While loop to have user retry their input if they enter incorrectly\n while not phone_number_type_ok:\n if self.phone_number_type.lower() in self.valid_phone_number_types:\n phone_number_type_ok = True\n return True\n\n else:\n return False", "def validate_urlpattern_with_options(cls):\n p = re.compile('\\{\\w+\\}')\n for urlpattern in cls.urlpatterns:\n matches = p.findall(urlpattern)\n not_found = copy.copy(matches)\n for match in matches:\n if not cls.urlpattern_options.get(match.strip('{}')):\n raise ImproperlyConfigured('Keyword not found for placeholder in urlpattern. Got \\'{}\\'.'.format(match.strip('{}')))\n not_found.remove(match)\n if not_found:\n raise ImproperlyConfigured('Unexpected placeholder(s) in urlpattern. No matching key in urlpattern_options. Got placeholders {}.'.format(not_found))\n return True", "def process_regex_form_data(pattern, flags, text, methods, method):\n multi_match = ''\n single_match = ''\n flags = \"|\".join(flags)\n regex = eval('re.compile(r\"{}\", {})'.format(pattern, flags))\n # if the user fails to select a method it defaults to the re.match method\n if not method:\n match = regex.match(text)\n # else convert the selected method from a string to a regex object by\n # searching regex_method returned by the regex_methods function.\n else:\n match = methods[method](regex, text)\n # if a match is found ...\n if match is not None:\n # check if the method used is the \"re.findall\" or \"re.finditer\"\n # method as these do not support the match.group() method\n if method == 're.findall':\n multi_match = match\n elif method == 're.finditer':\n multi_match = [i.group() for i in match]\n else:\n single_match = match.group()\n return single_match, multi_match", "def test_regex_matches_multiple_valid(self):\n token_1 = \"NDY3MjIzMjMwNjUwNzc3NjQx.XsyWGg.uFNEQPCc4ePwGh7egG8UicQssz8\"\n token_2 = \"NDcyMjY1OTQzMDYyNDEzMzMy.XsyWMw.l8XPnDqb0lp-EiQ2g_0xVFT1pyc\"\n message = f\"garbage {token_1} hello {token_2} world\"\n\n results = token_remover.TOKEN_RE.finditer(message)\n results = [match[0] for match in results]\n self.assertCountEqual((token_1, token_2), results)", "def compile_patterns(patterns: List[str], anchor: Optional[str]):\n start = ending = ''\n if anchor == 'start':\n patterns = [pattern[1:] for pattern in patterns]\n start = '^'\n elif anchor == 'end':\n patterns = [pattern[:-1] for pattern in patterns]\n ending = '$'\n\n if patterns:\n core = '|'.join(patterns)\n else:\n core = CompanyCleaner.MATCH_NOTHING # If iter is empty, return regex that can match nothing.\n\n return re.compile(start + '(?:' + core + ')+' + ending)", "def validate_abstract_pattern(self):\n valid = True\n if not self._validate_pattern_fields():\n valid = False\n if not self._validate_entities():\n valid = False\n return valid", "def search(self, pattern):\n result = set()\n\n if re.search(pattern, 'any'):\n result |= set([rule for rule in self.object.get_rules() if\n not rule.ip_source or not rule.ip_dest or not rule.port_source or not rule.port_dest])\n if re.search(pattern, 'ip'):\n result |= set([rule for rule in self.object.get_rules() if not rule.protocol])\n result |= set([rule for rule in self.object.get_rules() if rule.search(pattern)])\n self.model.clear()\n self.add_rules(list(result))", "def gen_matches(self, subseq, startpos):\n \n raise TypeError, \"PatternBase is an abstract base class\"", "def load_event_patterns():\n # Initialize an empty pattern database\n patterns = collections.defaultdict(list)\n # Current trigger is used during pattern reading\n current_trigger = None\n \n # Pack up the filenames with the type patterns stored in there\n types_and_files = ((\"Increase\", os.path.join(pattern_folder, increase_pattern_file)),\n (\"Decrease\", os.path.join(pattern_folder, decrease_pattern_file)),\n (\"Change\", os.path.join(pattern_folder, change_pattern_file)),\n (\"NegChange\", os.path.join(pattern_folder, neg_change_pattern_file)))\n \n # Read in the patterns\n for change_type, filename in types_and_files:\n with open(filename, 'r') as filee:\n for line in filee:\n split = line.strip().split()\n # Skip comments and empty lines\n if not len(split): continue\n if split[0][0] == '#': continue\n \n # If the current line is a TRIGGER line, update which trigger we are working with\n if split[0].upper() == 'TRIGGER':\n # Some minor syntax checking of trigger script\n assert len(split) == 2, \"TRIGGER must consist of the 'TRIGGER' keyword and the trigger, and nothing else\"\n current_trigger = split[1]\n # If the current line is something else, it is a pattern for the given trigger\n else:\n # Do some minor correctness checking of the trigger script\n assert current_trigger, \"A trigger must be specified before you can start writing patterns!\"\n assert split[0].upper() == 'VAR' or split[0].upper() == \"THN\", \"Keyword \" + split[0] + \" not recognized!\"\n \n # Build new pattern based on information given in script\n new_pattern_is_thing = (split[0].upper() == \"THN\")\n if change_type == \"NegChange\":\n new_pattern_change_type = \"Change\"\n is_negative = True\n else:\n new_pattern_change_type = change_type\n is_negative = False\n \n new_pattern = Pattern(new_pattern_change_type, new_pattern_is_thing)\n new_pattern.is_negative = is_negative\n \n # Extract the subpatterns by splitting on semicolon\n subpatterns_domain = split[1:]\n subpatterns = []\n while ';' in subpatterns_domain:\n first_subpattern = subpatterns_domain[:subpatterns_domain.index(';')]\n subpatterns.append(first_subpattern)\n subpatterns_domain = subpatterns_domain[subpatterns_domain.index(';')+1:]\n # Then add the final subpattern that is not split by any semicolons\n subpatterns.append(subpatterns_domain)\n \n # Do some syntax checking of subpatterns;\n # Check that there is only one T per subpattern, and only one N or S.\n # Check that each element is either a string (\"\"), a dependency, or X,T,N or S\n for subpattern in subpatterns:\n assert subpattern.count('T') <= 1, \"Error in line \" + line + \": There can only be one Trigger (T) per subpattern!\"\n assert subpattern.count('N')+subpattern.count('S') <= 1, \"Error in line \" + line + \": There can only be one N or S target per subpattern!\"\n for element in subpattern:\n accept = False\n # Element is string\n if element[0] == '\"' and element[-1] == '\"':\n accept = True\n # Element is accepted variable\n elif element in ['T', 'N', 'X', 'Y', 'S']:\n accept = True\n # Element is a dependency\n elif element in ['prep', 'pobj', 'amod', 'nsubj', 'nsubjpass', \n 'dobj', 'nn', 'vmod', 'iobj', 'advmod', 'dep',\n 'xcomp', 'aux', 'ccomp', 'rcmod', 'pcomp',\n 'appos', 'advcl', 'mark', 'csubj']:\n accept = True\n # Element is a negation\n elif element == \"!\":\n accept = True\n assert accept, \"Element '\" + element + \"' is not an accepted element type of a pattern!\"\n # Do some correctness checking of entire pattern\n # Needs at least one T, and at least one N or S\n t_count = 0; n_s_count = 0\n for subpattern in subpatterns:\n for element in subpattern:\n if element == 'T':\n t_count += 1\n if element in ['N', 'S']:\n n_s_count += 1\n assert t_count > 0, \"There needs to be at least one occurence of the trigger word in a pattern!\"\n assert n_s_count > 0, \"There needs to be at least one occurence of N or S in a pattern!\"\n \n # Assuming that the entire pattern is accepted, add it to the pattern base\n new_pattern.subpatterns = subpatterns\n patterns[current_trigger].append(new_pattern)\n collections.defaultdict(list) \n return patterns", "def validate_move(self, move_from, move_to, board):\n\n from_coordinates = JanggiGame.translate_to_grid(move_from)\n to_coordinates = JanggiGame.translate_to_grid(move_to)\n from_col = from_coordinates[0]\n from_row = from_coordinates[1]\n to_col = to_coordinates[0]\n to_row = to_coordinates[1]\n\n if self._color == 'red':\n # if destination within the palace:\n if (to_col in range(3,6) and to_row in range(3) and\n # and the move is 1 horizontal or 1 vertical:\n (((abs(to_col-from_col) == 1 and to_row-from_row == 0) or\n (to_col-from_col == 0 and abs(to_row-from_row) == 1)) or\n # or the move is one diagonal:\n ((from_coordinates == [4,1] and to_coordinates in [[3,0],[3,2],[5,0],[5,2]]) or\n (from_coordinates in [[3,0],[3,2],[5,0],[5,2]] and to_coordinates == [4,1]))\n )\n ):\n return True\n else:\n return False\n\n if self._color == 'blue':\n # if destination within the palace:\n if (to_col in range(3,6) and to_row in range(7,10) and\n # and the move is 1 horizontal or 1 vertical:\n (((abs(to_col-from_col) == 1 and to_row-from_row == 0) or\n (to_col-from_col == 0 and abs(to_row-from_row) == 1)) or\n # or the move is one diagonal:\n ((from_coordinates == [4,8] and to_coordinates in [[3,7],[3,9],[5,7],[5,9]]) or\n (from_coordinates in [[3,7],[3,9],[5,7],[5,9]] and to_coordinates == [4,8]))\n )\n ):\n return True\n else:\n return False", "def validate_move(self, move_from, move_to, board):\n\n from_coordinates = JanggiGame.translate_to_grid(move_from)\n to_coordinates = JanggiGame.translate_to_grid(move_to)\n from_col = from_coordinates[0]\n from_row = from_coordinates[1]\n to_col = to_coordinates[0]\n to_row = to_coordinates[1]\n\n if self._color == 'red':\n # if destination within the palace:\n if (to_col in range(3, 6) and to_row in range(3) and\n # and the move is 1 horizontal or 1 vertical:\n (((abs(to_col - from_col) == 1 and to_row - from_row == 0) or\n (to_col - from_col == 0 and abs(to_row - from_row) == 1)) or\n # or the move is one diagonal:\n ((from_coordinates == [4, 1] and to_coordinates in [[3, 0], [3, 2], [5, 0], [5, 2]]) or\n (from_coordinates in [[3, 0], [3, 2], [5, 0], [5, 2]] and to_coordinates == [4, 1]))\n )\n ):\n return True\n else:\n return False\n\n if self._color == 'blue':\n # if destination within the palace:\n if (to_col in range(3, 6) and to_row in range(7, 10) and\n # and the move is 1 horizontal or 1 vertical:\n (((abs(to_col - from_col) == 1 and to_row - from_row == 0) or\n (to_col - from_col == 0 and abs(to_row - from_row) == 1)) or\n # or the move is one diagonal:\n ((from_coordinates == [4, 8] and to_coordinates in [[3, 7], [3, 9], [5, 7], [5, 9]]) or\n (from_coordinates in [[3, 7], [3, 9], [5, 7], [5, 9]] and to_coordinates == [4, 8]))\n )\n ):\n return True\n else:\n return False", "def format_tregex(results, whole=False):\n import re\n\n if countmode:\n return results\n\n if not results:\n return\n\n done = []\n if whole:\n fnames, snames, results = zip(*results)\n\n if 'l' in show or 'x' in show:\n lemmata = lemmatiser(results, gettag(search.get('t'), lemmatag))\n else:\n lemmata = [None for i in results]\n for word, lemma in zip(results, lemmata):\n bits = []\n if exclude and exclude.get('w'):\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('w'), word):\n continue\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('l'), lemma):\n continue\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('p'), word):\n continue\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('x'), lemma):\n continue\n if exclude and excludemode == 'all':\n num_to_cause_exclude = len(list(exclude.keys()))\n current_num = 0\n if exclude.get('w'):\n if re.search(exclude.get('w'), word):\n current_num += 1\n if exclude.get('l'):\n if re.search(exclude.get('l'), lemma):\n current_num += 1\n if exclude.get('p'):\n if re.search(exclude.get('p'), word):\n current_num += 1\n if exclude.get('x'):\n if re.search(exclude.get('x'), lemma):\n current_num += 1 \n if current_num == num_to_cause_exclude:\n continue \n\n for i in show:\n if i == 't':\n bits.append(word)\n if i == 'l':\n bits.append(lemma)\n elif i == 'w':\n bits.append(word)\n elif i == 'p':\n bits.append(word)\n elif i == 'x':\n bits.append(lemma)\n joined = '/'.join(bits)\n done.append(joined)\n if whole:\n done = zip(fnames, snames, done)\n return done" ]
[ "0.6140925", "0.6094179", "0.60423654", "0.56661874", "0.5438786", "0.53020585", "0.5268809", "0.51951706", "0.5172561", "0.50293213", "0.501354", "0.49975908", "0.49453467", "0.49100548", "0.49090192", "0.48892468", "0.48374686", "0.48184943", "0.47469115", "0.46745083", "0.46548316", "0.46476665", "0.46304142", "0.46202356", "0.4609982", "0.45809138", "0.45758003", "0.4560009", "0.45496055", "0.45484376", "0.45470876", "0.4527851", "0.4498438", "0.44914716", "0.44876218", "0.44813633", "0.4481064", "0.44766232", "0.44687748", "0.44656947", "0.44631705", "0.44541675", "0.4450291", "0.44401273", "0.44378433", "0.44291854", "0.442211", "0.44164637", "0.44078144", "0.43986633", "0.4392483", "0.4390163", "0.4371439", "0.4366746", "0.43658563", "0.4356039", "0.43525046", "0.43474522", "0.43361023", "0.43308637", "0.4329497", "0.43276912", "0.43266064", "0.43250325", "0.4322246", "0.43186596", "0.43162432", "0.43157533", "0.43075615", "0.43039554", "0.43037182", "0.43031678", "0.42928773", "0.42912647", "0.42895573", "0.42857233", "0.42820168", "0.4274324", "0.42703438", "0.42698705", "0.42616248", "0.42582464", "0.42570093", "0.42561707", "0.42504588", "0.42418113", "0.4236668", "0.42351264", "0.42344573", "0.4230051", "0.42238018", "0.42197314", "0.42187074", "0.42179435", "0.42134762", "0.42101613", "0.4206438", "0.41992262", "0.41967085", "0.41946885" ]
0.7344064
0
Refresh our list of what's on disk.
def updateDiskFileList(self): if self.m_curPath: # Get me just the files please. for _, _, files in os.walk(self.m_curPath): break else: files = [] files.sort() if files != self.m_diskNames: self.m_diskNames[:] = files self.m_newNames[:] = [] self.populateFileList()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self):\n self.config.read(self.filename)\n self.loadRecentFiles()", "def update(self):\n if os.path.isdir(self.full_path):\n self.file_list = os.listdir(self.full_path)\n else:\n self.file_list = []", "def refresh(self, list_of_tables):\n self.dismod_file.refresh(list_of_tables)", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh(self):\n self.update_from_file()\n self.update_from_env()", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self):\n self.__refresh()", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def reload(self):", "def reload(self):", "def refresh_all(self) -> None:\n self._update_thread.force_refresh_folder(self.feed_cache)", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def refresh(self):\n f = open(self._filepath, 'r')\n self._raw_sysfs_data = f.read()\n f.close()\n self._process_raw_data()", "def reload(self):\n\n pass", "def refresh(self):\r\n self.metadata = self.db.read(self.path).json()", "def Refresh(self):\n pass", "def reload(self):\n if len(self.files) > 0:\n self.load(self.files, regfiles=self.regions)", "def reload(self):\n self.known_stations = {}\n self.read_noaa_stations()\n self.read_table_stations()\n self.last_reload_check_time = datetime.datetime.utcnow()\n LOGGER.info('Have %s known stations', len(self.known_stations.keys()))", "def refresh_source(self):\n pass", "def refresh(self):\n self.Refresh()", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def refreshMTimes(self):\n del self.mtimesReset[:]\n for fileName, fileInfo in self.data.items():\n oldMTime = self.mtimes.get(fileName,fileInfo.mtime)\n self.mtimes[fileName] = oldMTime\n #--Reset mtime?\n if fileInfo.mtime != oldMTime and oldMTime != -1:\n fileInfo.setMTime(oldMTime)\n self.mtimesReset.append(fileName)", "def refresh(self):\n self.dir = dirs['app']\n ssBase = GPath(mwIniFile.getSetting('General','Screen Shot Base Name','ScreenShot'))\n if ssBase.head:\n self.dir = self.dir.join(ssBase.head)\n newData = {}\n reImageExt = re.compile(r'\\.(bmp|jpg)$',re.I)\n #--Loop over files in directory\n for fileName in self.dir.list():\n filePath = self.dir.join(fileName)\n maImageExt = reImageExt.search(fileName.s)\n if maImageExt and filePath.isfile(): \n newData[fileName] = (maImageExt.group(1).lower(),filePath.mtime)\n changed = (self.data != newData)\n self.data = newData\n return changed", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def invalidate_for_files(self):\r\n return []", "def reload(self):\n if os.path.exists(FileStorage.__file_path):\n with open(FileStorage.__file_path, \"r\", encoding=\"utf-8\") as f:\n loaded = json.load(f)\n for _id, v in loaded.items():\n cls = loaded[_id].pop(\"__class__\", None)\n try:\n loaded[_id][\"created_at\"] = datetime.strptime(\n loaded[_id][\"created_at\"], dt_format)\n loaded[_id][\"updated_at\"] = datetime.strptime(\n loaded[_id][\"updated_at\"], dt_format)\n except:\n pass\n FileStorage.__objects[_id] = FileStorage.class_models[cls](**v)", "def flush(self):\n for f in self.files:\n f.flush()", "def refresh(self):\n hasChanged = self.hasChanged()\n if hasChanged: self.loadIni()\n if len(self.loadFiles) > 255:\n del self.loadFiles[255:]\n self.safeSave()\n return hasChanged", "def update_hdu_list(self, rebuild=True):\n if not hasattr(self, 'hdu_list'):\n rebuild = True\n\n if rebuild:\n self.hdu_list = []\n self.hdu_map = {}\n\n # we don't know how many hdus there are, so iterate\n # until we can't open any more\n ext_start = 0\n else:\n # start from last\n ext_start = len(self)\n\n ext = ext_start\n while True:\n try:\n self._append_hdu_info(ext)\n except IOError:\n break\n except RuntimeError:\n break\n\n ext = ext + 1", "def updateList(self):\n self._recreateJobs()", "def refresh(self):\n ida_strlist.build_strlist()\n self.size = ida_strlist.get_strlist_qty()", "def refresh(self):\n self.fetch(False)", "def reload(self):\n puts('Reloading application...')\n local('touch ../reload.txt')", "def invalidate_for_files(self):\n return []", "def refresh(self) -> None:\n self._itempage.get()", "def update_list(self):\n Asset.update_list(self, uri_keys=('ems_sys', 'list'))", "def reloadfile(self, ):\n self.loadfile()", "def __loadListOfDocuments(self):\n\t\tfor value in default_paths():\n\t\t\titem = addNewListItemCalled([os.path.normpath(value)], self.ui.listWidget)\n\t\t\titem.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)\n\t\t\tt = item.font()\n\t\t\tt.setItalic(True)\n\t\t\titem.setFont(t)\n\n\t\t# load up state from storage...\n\t\tpaths = QSettings().value(\"paths\").toList()\n\t\tfor value in paths:\n\t\t\tstr = os.path.normpath(value.toString())\n\t\t\tif str not in default_paths():\n\t\t\t\taddNewListItemCalled([str], self.ui.listWidget, mutable=True)", "def loadFileList(self):\r\n try:\r\n data = open(self.filelist_file, 'rb')\r\n except IOError:\r\n '''print \"No SRTM cached file list. Creating new one!\"'''\r\n if self.offline == 0:\r\n self.createFileList()\r\n return\r\n try:\r\n self.filelist = pickle.load(data)\r\n except:\r\n '''print \"Unknown error loading cached SRTM file list. Creating new one!\"'''\r\n if self.offline == 0:\r\n self.createFileList()", "def reset(self):\n self.fscore_history = []", "def refresh(self, filename, template_dir, cache_dir):\n\t\tself.update(filename, template_dir, cache_dir)\n\t\tself.tab_dep=1\n\t\tself.page=[]", "def _update_items(self):\n\n self._item_list = []\n for f in os.listdir(self._folder):\n # Skip text files\n # -> It is important that we don't delete the list file if the user puts it here!\n ext = os.path.splitext(f)[1]\n if ext not in ['.csv', 'txt']:\n self._item_list.append(f)", "def refresh(self):\n self._refresh_method()", "def reload(self):\n if file_exist(self.__file_path):\n with open(self.__file_path, \"r\", encoding=\"UTF-8\") as file:\n data = read_data(file)\n for key, value in data.items():\n instance = BaseModel(**value)\n FileStorage.__objects[key] = instance", "def _Refresh(self):\n raise NotImplementedError", "def refresh(self):\n self._list_of_points = []\n self._add_points()", "def loadRecentFiles(self):\n self.recentFiles.clear()\n for n in range(RECENTFILEMAX):\n rf = self.getSection(CFG_RECENT, str(n))\n if rf:\n self.recentFiles.append(rf)\n else:\n break", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh(self):\n if self.is_server_process and self.cache_manager.is_refreshing():\n raise RefreshInProgressError()\n catalogs = MetadataManager(schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID).get_all()\n for catalog in catalogs:\n self._insert_request(self.refresh_queue, catalog, \"modify\")", "def reload(self):\n\n fn = self[\"~filename\"]\n saved = {}\n for key in self:\n if key in MIGRATE: saved[key] = self[key]\n self.clear()\n self[\"~filename\"] = fn\n self.__init__(fn)\n self.update(saved)", "def readdata(self, filepaths):\n pass", "def flush(self):\n\n # save ddocs\n all_ddocs = self.all_docs(startkey=u\"_design\", endkey=u\"_design/\\u9999\", include_docs=True)\n ddocs = []\n for ddoc in all_ddocs:\n doc = ddoc['doc']\n old_atts = doc.get('_attachments', {})\n atts = {}\n for name, info in old_atts.items():\n att = {}\n att['content_type'] = info['content_type']\n att['data'] = self.fetch_attachment(ddoc['doc'], name)\n atts[name] = att\n\n # create a fresh doc\n doc.pop('_rev')\n doc['_attachments'] = resource.encode_attachments(atts)\n\n ddocs.append(doc)\n\n # delete db\n self.server.delete_db(self.dbname)\n\n # we let a chance to the system to sync\n times = 0\n while times < 10:\n if self.dbname in self.server:\n break\n time.sleep(0.2)\n times += 1\n\n # recreate db + ddocs\n self.server.create_db(self.dbname)\n self.bulk_save(ddocs)", "def refresh_all(self):\n\t\t\n\t\tself.symbolsList.set_datasource(self.source)\n\t\tself.symbolsList.refresh()\n\t\t\n\t\tself.plotFrame.set_datasource(self.source)\n\t\tself.plotFrame.refresh()", "def refresh_list(self):\n if self._dominfo_lock.acquire(False):\n try:\n return self._refresh_list()\n finally:\n self._dominfo_lock.release()\n else:\n # wait until the refresh done by the other party is complete\n with self._dominfo_lock:\n pass", "def file_update(self, data):\n result = self.search(data)\n\n if result == True:\n index = self.hash_function(data)\n self.objects_list[index].remove(data)\n self.display_content_hashtable()\n\n if result == False:\n index = self.hash_function(data)\n self.objects_list[index].append(data)\n self.display_content_hashtable()", "def reload_cache(self):\n self.data = self.read_data_cache()", "def refresh_configuration(self):\n pass", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n aq_data.add_aq_to_db()\n DB.session.commit()\n return 'Data refreshed!'", "def updateFileData(self):\n with open(pagePath(self.pageName)) as f:\n self.fileData = f.read()\n self.lastUpdated = time.time()", "def refresh(self):\n\t\tself.driver.refresh()", "def flush(self):\n self.genomes = []", "def refresh(self, new_content):\n pass", "def reload(self):\n try:\n with open(FileStorage.__file_path) as json_file:\n dict_from_json = json.load(json_file)\n for key, value in dict_from_json.items():\n tmp = eval(value['__class__'])(**value)\n FileStorage.__objects[key] = tmp\n except(FileNotFoundError):\n pass", "def __loadRecent(self):\n self.recent = []\n Preferences.Prefs.rsettings.sync()\n rs = Preferences.Prefs.rsettings.value(recentNameFiles)\n if rs is not None:\n for f in Preferences.toList(rs):\n if QFileInfo(f).exists():\n self.recent.append(f)", "def clear_lists(self): \n self.fp_config_files = []\n self.txt_files = []\n self.fr_config_files = []", "def sync_all_lists(self):\r\n print(\"Started syncing influencer master lists with DB\")\r\n screen_names_on_lists = []\r\n self._add_or_update(screen_names_on_lists)\r\n print(\"Removing entries which are no longer on any list\")\r\n self._delete_entries_not_in_list(screen_names_on_lists) # remove entries from DB if they are on no list\r\n print(\"Sync complete\")", "def clearRecentFiles(self):\n self.recentFiles.clear()\n for n in range(RECENTFILEMAX):\n self.setSection(CFG_RECENT, str(n), None)", "def reindex(self):\n self.index.drop_db()\n objectpath = os.path.join(self.rootpath, self.OBJECTPATH)\n for root, dirs, files in os.walk(objectpath, topdown=False):\n for name in files:\n blob_uuid = name\n self.index.update_from_metadata(self.load_blob_metadata(blob_uuid))", "def flush(self):\n super().flush()\n self.dists = {}", "def syncfolder():", "def refresh(self):\n\n assets_model = self.data[\"model\"][\"assets\"]\n assets_model.clear()\n\n has = {\"children\": False}\n\n project = io.ObjectId(os.environ[\"MINDBENDER__PROJECT\"])\n assets = io.find({\"type\": \"asset\", \"parent\": project})\n for asset in sorted(assets, key=lambda i: i[\"name\"]):\n item = QtWidgets.QListWidgetItem(asset[\"name\"])\n item.setData(QtCore.Qt.ItemIsEnabled, True)\n item.setData(DocumentRole, asset)\n assets_model.addItem(item)\n has[\"children\"] = True\n\n if not has[\"children\"]:\n item = QtWidgets.QListWidgetItem(\"No assets found\")\n item.setData(QtCore.Qt.ItemIsEnabled, False)\n assets_model.addItem(item)\n\n assets_model.setFocus()\n assets_model.setCurrentRow(0)\n self.data[\"button\"][\"load\"].hide()\n self.data[\"button\"][\"stop\"].hide()", "def refresh(self):\n if self.isDamaged:\n raise StateError(self.fileInfo.name+_(': Attempted to access damaged file.'))\n if not self.isLoaded:\n try:\n self.load()\n self.isLoaded = True\n except Tes3ReadError, error:\n self.isDamaged = True\n if not error.inName:\n error.inName = self.fileInfo.name\n raise", "def _update_ondisk(self):\n with open(self.orig_path, \"w\") as f:\n f.write(self.content)", "def refreshRenamed(self):\n changed = False\n pRenamed = dirs['mods'].join('Mash','Official_Local.csv')\n if not pRenamed.exists():\n changed = bool(Installer.off_local)\n self.renamedSizeDate = (0,0)\n Installer.off_local.clear()\n elif self.renamedSizeDate != (pRenamed.size,pRenamed.mtime):\n self.renamedSizeDate = (pRenamed.size,pRenamed.mtime)\n off_local = {}\n reader = bolt.CsvReader(pRenamed)\n for fields in reader:\n if len(fields) < 2 or not fields[0] or not fields[1]: continue\n off,local = map(string.strip,fields[:2])\n if not reModExt.search(off) or not reModExt.search(local): continue\n off,local = map(GPath,(off,local))\n if off != local: off_local[off] = local\n reader.close()\n changed = (off_local != Installer.off_local)\n Installer.off_local = off_local\n #--Refresh Installer mappings\n if changed:\n for installer in self.data.itervalues():\n installer.refreshDataSizeCrc()\n #--Done\n return changed", "def do_maint (self):\n self.log.info (\"cleanup\")\n current = glob.glob (os.path.join (self.infodir, self.infomask))\n removed = set(self.infomap) - set(current)\n for fn in removed:\n self.log.debug (\"forgetting file %s\", fn)\n del self.infomap[fn]\n self.log.info (\"current: %i, removed: %i\", len(current), len(removed))\n self.maint_timer = threading.Timer (self.maint_period, self.do_maint)\n self.maint_timer.start()", "def refresh(self):\n for budget in self.budgets:\n budget.refresh()\n self._budgets = None", "def file_update(self, data):\n file = open(\"../util/LinkedList_File\", \"r+\")\n file.truncate(0)\n file.close()\n if self.search_item(data) == True:\n self.remove(data)\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n orderedlist_content = []\n orderedlist_content = self.display_content()\n\n for i in orderedlist_content:\n file.write(i + \" \", )\n file.close()\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()\n else:\n self.add(data)\n\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n orderedlist_content = []\n orderedlist_content = self.display_content()\n\n for i in orderedlist_content:\n file.write(i + \" \")\n file.close()\n\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()", "def update_reports():\n return os.listdir('./reports')", "def _save_sync_list(self):\n\t\tfp = open(self.sync_file, 'w')\n\t\tself.sync_list.write(fp)\n\t\tfp.close()", "def file_update(self, data):\n file = open(\"../util/LinkedList_File\", \"r+\")\n file.truncate(0)\n file.close()\n if self.search_item(data) == True:\n self.remove(data)\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n linkedlist_content = []\n linkedlist_content = self.display_content()\n\n for i in linkedlist_content:\n file.write(i + \" \", )\n file.close()\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()\n else:\n self.append(data)\n\n file = open(\"../util/LinkedList_File\", \"a+\")\n\n linkedlist_content = []\n linkedlist_content = self.display_content()\n\n for i in linkedlist_content:\n file.write(i + \" \")\n file.close()\n\n file = open(\"../util/LinkedList_File\", \"r\")\n for i in file:\n print(i)\n file.close()", "def compute_update_data_disks(self):\n self.update_data_disks_ = []\n compute_config = self.config_['compute']\n\n # get update data disk names\n for zone, disk in zip(compute_config['zones'], compute_config['data_disks']):\n # create update disk names\n update_disk_name = '%s-update' %(disk)\n self.update_data_disks_.append(update_disk_name)", "def refresh(self):\n self.logging.refresh()", "def refreshSizeCrcDate(apRoot,old_sizeCrcDate,progress=None,removeEmpties=False,fullRefresh=False):\n rootIsMods = (apRoot == dirs['mods']) #--Filtered scanning for mods directory.\n norm_ghost = (rootIsMods and Installer.getGhosted()) or {}\n ghost_norm = dict((y,x) for x,y in norm_ghost.iteritems())\n rootName = apRoot.stail\n progress = progress or bolt.Progress()\n new_sizeCrcDate = {}\n bethFiles = bush.bethDataFiles\n skipExts = Installer.skipExts\n asRoot = apRoot.s\n relPos = len(apRoot.s)+1\n pending = set()\n #--Scan for changed files\n progress(0,_(\"%s: Pre-Scanning...\") % rootName)\n progress.setFull(1)\n dirDirsFiles = []\n emptyDirs = set()\n for asDir,sDirs,sFiles in os.walk(asRoot):\n progress(0.05,_(\"%s: Pre-Scanning...\\n%s\") % (rootName,asDir[relPos:]))\n if rootIsMods and asDir == asRoot:\n sDirs[:] = [x for x in sDirs if x.lower() not in Installer.dataDirsMinus]\n dirDirsFiles.append((asDir,sDirs,sFiles))\n if not (sDirs or sFiles): emptyDirs.add(GPath(asDir))\n progress(0,_(\"%s: Scanning...\") % rootName)\n progress.setFull(1+len(dirDirsFiles))\n for index,(asDir,sDirs,sFiles) in enumerate(dirDirsFiles):\n progress(index)\n rsDir = asDir[relPos:]\n inModsRoot = rootIsMods and not rsDir\n apDir = GPath(asDir)\n rpDir = GPath(rsDir)\n for sFile in sFiles:\n #print '...',sFile\n ext = sFile[sFile.rfind('.'):].lower()\n rpFile = rpDir.join(sFile)\n if inModsRoot:\n if ext in skipExts: continue\n if not rsDir and sFile.lower() in bethFiles: continue\n rpFile = ghost_norm.get(rpFile,rpFile)\n isEspm = not rsDir and (ext == '.esp' or ext == '.esm')\n apFile = apDir.join(sFile)\n size = apFile.size\n date = apFile.mtime\n oSize,oCrc,oDate = old_sizeCrcDate.get(rpFile,(0,0,0))\n if size == oSize and (date == oDate or isEspm):\n new_sizeCrcDate[rpFile] = (oSize,oCrc,oDate)\n else:\n pending.add(rpFile)\n #--Remove empty dirs?\n if settings['bash.installers.removeEmptyDirs']:\n for dir in emptyDirs: \n try: dir.removedirs()\n except OSError: pass\n #--Force update?\n if fullRefresh: pending |= set(new_sizeCrcDate)\n changed = bool(pending) or (len(new_sizeCrcDate) != len(old_sizeCrcDate))\n #--Update crcs?\n if pending:\n progress(0,_(\"%s: Calculating CRCs...\\n\") % rootName)\n progress.setFull(1+len(pending))\n try:\n us = unicode(rpFile.s, sys.getfilesystemencoding())\n except TypeError:\n us = rpFile.s\n for index,rpFile in enumerate(sorted(pending)):\n string = (_(\"%s: Calculating CRCs...\\n%s\") % \n (rootName, us)\n )\n progress(index,string)\n apFile = apRoot.join(norm_ghost.get(rpFile,rpFile))\n crc = apFile.crc\n size = apFile.size\n date = apFile.mtime\n new_sizeCrcDate[rpFile] = (size,crc,date)\n old_sizeCrcDate.clear()\n old_sizeCrcDate.update(new_sizeCrcDate)\n #--Done\n return changed", "def reload(self):\n\t\toldlayers = self.layers\n\t\tself.layers = []\n\t\tfor cp, filename, fp in oldlayers:\n\t\t\tcp = cp # pylint\n\t\t\tif fp is None:\n\t\t\t\tself.read(filename)\n\t\t\telse:\n\t\t\t\tself.readfp(fp, filename)", "def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))", "def _load_disk(self):", "def _load_disk(self):", "def refresh(self): \n return self._config.refreshObj(self)", "def syncrepl_refreshdone(self):\n pass", "def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()", "def refresh(self):\n self.dir = dirs['app']\n #-# Since there is only one utils file, its name is hardcoded.\n utilsFile = \"utils.dcg\"\n newData = {}\n if os.path.isfile(utilsFile) and os.access(utilsFile, os.R_OK):\n f = open(utilsFile, \"r\")\n lines = f.readlines()\n f.close()\n for line in lines:\n line = line.strip()\n if line.startswith(\";\") == False and line != \"\":\n name, commandLine, arguments, description = line.split(\";\")\n newData[name] = (commandLine.strip(), arguments, description.strip())\n changed = (self.data != newData)\n self.data = newData\n return changed", "def refresh_cache_file(form, model, is_created):\n common.save_serialized_file()\n app.global_content = common.load_cached()", "def _flush(self):\n self._d = {}", "def updateTree(self):\n self.reset()\n self.resetTree() \n self.read()" ]
[ "0.71091926", "0.7044655", "0.700236", "0.68334323", "0.68334323", "0.6765857", "0.66914636", "0.66914636", "0.66914636", "0.65789855", "0.65704066", "0.65704066", "0.6506549", "0.6506549", "0.6505286", "0.6480908", "0.6439831", "0.6422957", "0.64214545", "0.64141774", "0.6324581", "0.62873477", "0.6237474", "0.6180018", "0.6178493", "0.6177689", "0.6163327", "0.6157545", "0.61551976", "0.6139173", "0.6128255", "0.6097631", "0.6079805", "0.6047056", "0.60237855", "0.60046977", "0.6002922", "0.59972835", "0.5992805", "0.5989833", "0.59848255", "0.59839094", "0.5977311", "0.5971743", "0.5935455", "0.5920936", "0.59201795", "0.59189695", "0.59020704", "0.5890203", "0.5883345", "0.58682114", "0.58682114", "0.58682114", "0.58626264", "0.58581704", "0.58530134", "0.58485174", "0.5839339", "0.5837458", "0.58341706", "0.58258075", "0.5815651", "0.5812487", "0.58063394", "0.58046496", "0.57926595", "0.5785846", "0.57799226", "0.5770362", "0.57594717", "0.5749568", "0.57442206", "0.573234", "0.57065654", "0.57002234", "0.5694912", "0.5688139", "0.5680484", "0.56729233", "0.5659575", "0.56559074", "0.56463253", "0.56458765", "0.5644589", "0.56409734", "0.5626544", "0.5623763", "0.56191075", "0.561429", "0.5611265", "0.5610817", "0.5610817", "0.56102335", "0.559455", "0.5591945", "0.558161", "0.55807525", "0.5576563", "0.5569023" ]
0.70745224
1
Uses the list of filesondisk and the regex patterns to build a list of what the directory will look like if we renamed the files. Because we're justusing a simple text list, we use symbols to show the user which filenames would change and whether they would produce any duplicates, substituting "." with "\1.txt".
def populateFileList(self): self.m_fileList.SetForegroundColour(wx.NullColour) # We'll need to track which file names are modified and which # file names duped. applicable, dupes = set(), set() if not self.m_validPatterns: # Regex's don't compile yet, just use the raw filename list. newNames = self.m_diskNames else: # Apply the substitution to the filename list to produce a # destination-name list, and identify whether the patterns # actually affect anything. # newNames, modifiedIndexes = [], [] matcher = re.compile(self.m_reFromCtl.Value).subn subs = self.m_reToCtl.Value for filename in self.m_diskNames: # Perform the sub (filename, numChanges) = matcher(subs, filename) # Was there a modification? if numChanges: # Record the affected name. applicable.add(filename) if filename in newNames: dupes.add(filename) # Add to the primary list newNames.append(filename) # Does this produce a different list than we already had? If so, # clear the file list and replace it with the new one. # if newNames != self.m_newNames: self.m_fileList.Clear() # Figure out the longest name so we can create a cleanly-formatted # set of prefix/suffix characters for the modified/duped annotation. # maxLen = max(map(len, newNames)) decorate = '{m} {fn:<{ml}} {m}'.format # Now build a list of display elements. for filename in newNames: mark = ' ' if filename not in applicable else '|' if filename in dupes: mark = '*' self.m_fileList.Append(decorate(m=mark, fn=filename, ml=maxLen)) # Keep the list. self.m_newNames[:] = newNames # Update the apply button, we only want it enabled when the user # has a valid set of patterns that affect any files and have no # dupes produced as a result. # self.m_applyBtn.Enabled = bool(applicable) and not dupes if dupes: # Emphasize the presence of dupes. self.m_fileList.SetForegroundColour(wx.RED) # Draw the list. self.m_fileList.Refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rename_files(file_list, src_dir, pattern, rename=False):\n i = 0\n renamed = regex_group_split(file_list, pattern, False)\n renamed_w_path = [src_dir + fn for fn in renamed]\n orig_fp_list = orig_filepath_list(file_list, src_dir)\n\n for filename in file_list:\n if not (orig_fp_list[i] == renamed_w_path[i]):\n print (colors.BLUE + \"_ORIGINAL_: \" + orig_fp_list[i].replace(src_dir, \"\") + colors.ENDC)\n print (colors.RED + \"__UPDATE__: \" + renamed_w_path[i].replace(src_dir, \"\") + colors.ENDC)\n\n if rename:\n os.rename(orig_fp_list[i], renamed_w_path[i])\n i += 1", "def tidyFileNames(folderToCheck):\n\n filters = list(map(lambda x: \"*.\" + x, expectedExts))\n\n for filter in filters:\n\n for f in getFiles(folderToCheck,filter):\n\n clean = f\n for search in searches:\n clean = replace(clean,search)\n\n if renameFile(f,clean):\n results = list(map(os.path.basename,[f,clean]))\n if results[0] != results[1]:\n print(f\"Renamed: {results[0]} -> {results[1]}\")", "def findDuplicateReleaseFiles(self, initialList, workingTowerName, newInfix):\n Release_Tower_Name = self.getReleaseVersion(workingTowerName, newInfix)\n Duplicate_List = []\n for fname in initialList:\n prefixStream, postfixStream = string.split(fname, workingTowerName)\n A_File_Name = prefixStream + Release_Tower_Name + postfixStream\n if (os.path.exists(A_File_Name)):\n Duplicate_List = Duplicate_List + [A_File_Name]\n \n return Duplicate_List", "def main(root, filelist):\n #print \"got %s: %s\" % (root, filelist)\n rename(root, filelist)", "def rename(root, filelist):\n if not filelist:\n return\n def apply_rules(filename):\n rulez = [('_+' , ' '), # One or more underscores to spaces\n ('-{2,}' , '-'), # Two or more hyphens to single hyphen\n ('&' , 'And'), # An ampersand to 'And'\n ('(-)(\\w*)' ,r' \\1 \\2')]# Spaces around hyphen seperated words\n \n for look_for, replacement in rulez:\n filename = re.sub(look_for, replacement, filename)\n # Capitalize first letter of every word\n filename = \" \".join([ word.capitalize() for word in filename.split() ])\n return filename\n \n names = []\n for filename in filelist:\n basename = os.path.basename(filename)\n names.append(os.path.join(root, apply_rules(filename)))\n try:\n dest = os.tmpnam()\n fl = open(dest, 'w')\n fl.write(\"\\n\".join(names))\n fl.close()\n os.system(\"%s %s\" % (EDITOR, dest))\n ans = 'no'\n for oldname, newname in zip(filelist, open(dest).readlines()):\n oldname = os.path.join(root, oldname)\n newname = newname.strip()\n if oldname == newname:\n print \"No change from %s to %s ...skipping\" % (oldname, newname)\n else:\n print \"Changing %s to %s\" % (oldname, newname)\n if not ans[0].lower == 'a':\n ans = raw_input(\"Contine (Yes/No/All) ? [N] \") or 'no'\n if ans[0].lower() in ('a', 'y'):\n os.rename(oldname, newname)\n else:\n os.rename(oldname, newname)\n finally:\n os.remove(dest)", "def handleFileNames(self):\n \n # expand the wild cards - but do not create the full directory path\n # as the work sub directories have yet to be created.\n if not os.path.exists(self.shareArea):\n m = 'Cannot set self.auxfiles due to non-existent share directory: %s' % self.shareArea\n self.logger.fatal(m)\n raise RTTCodingError(m)\n\n # resolve auxFile patterns to file names\n auxFiles = []\n for pattern in self.auxFilePatterns:\n base, fnpattern = os.path.split(pattern)\n srcDir = os.path.normpath(os.path.join(self.shareArea, base))\n filesInShare = os.listdir(srcDir)\n auxFiles.extend([os.path.join(base,file) for file in filesInShare if fnmatch.fnmatch(file, fnpattern)])\n\n self.auxFiles = unique(auxFiles)", "def file_names(acqfolder):\n log.info('anonymizer.py file_names {0}'.format(acqfolder))\n\n subj_path = path(acqfolder)\n\n done = -1\n for ext in dicom_file_extensions:\n file_lst = subj_path.glob('*' + ext)\n if file_lst:\n rename_file_group_to_serial_nums(file_lst)\n done = 0\n\n return done", "def pele_folders(input_, file_list, dir_=None):\r\n os.chdir(\"../\")\r\n if not dir_:\r\n base = basename(input_)\r\n base = base.replace(\".pdb\", \"\")\r\n else:\r\n base = dir_\r\n count = 0\r\n folder = []\r\n for files in file_list:\r\n name = basename(files)\r\n name = name.replace(\".pdb\", \"\")\r\n if not count:\r\n hold = \"bla\"\r\n count += 1\r\n if name != \"original\" and hold != name[:-1]:\r\n hold = name[:-1]\r\n folder.append(\"mutations_{}/{}\\n\".format(base, hold))\r\n with open(\"dirnames_{}.txt\".format(base), \"w\") as txt:\r\n txt.writelines(folder)", "def getFileNames():\n input_path = \"/Users/tim/OneDrive/Master/Text_Mining/project/texts/glenarvon_html/\"\n temp_list = os.listdir(input_path)\n name_list = [i for i in temp_list if i[-4:] == \"html\"]\n name_list.sort(key=natural_keys) # see http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside\n return name_list, input_path", "def rename_date_formats(files_list):\n\n count_renamed = 0\n count_skipped = 0\n\n for file in files_list:\n\n # finding DD-DD-DDDD matches\n if date_regex.search(file):\n date_format = date_regex.search(file).group()\n date_split = date_format.split(\"-\")\n\n # detecting MM-DD-YYYY format and renaming to DD-MM-YYYY format\n if 1 <= int(date_split[0]) <= 12 and 1 <= int(date_split[1]) <= 31:\n european_format_date = \"-\".join([date_split[1], date_split[0], date_split[2]])\n new_file_name = file.replace(date_format, european_format_date)\n\n # checking that newly renamed file won't be a duplicate\n if new_file_name not in files_list:\n shutil.move(file, new_file_name)\n print(f\"<{file}> renamed to <{new_file_name}>\")\n count_renamed += 1\n else:\n print(f\"Cannot rename <{file}> because file <{new_file_name}> already exists\")\n count_skipped += 1\n\n # for files with DD-DD-DDDD format, but not MM-DD-YYYY like 89-77-3445\n else:\n print(f\"<{file}> has no MM-DD-YYYY date in name\")\n count_skipped += 1\n\n # for files with no MM-DD-YYYY format like 12-1221.txt or text.pdf\n else:\n print(f\"<{file}> has no MM-DD-YYYY date in name\")\n count_skipped += 1\n\n print(f\"\\nSUMMARY:\\nRenamed files count - {count_renamed}, not affected files count - {count_skipped}.\")", "def main():\n print(\"Current directory is\", os.getcwd())\n os.chdir('Lyrics/Lyrics')\n\n for dir_name, dir_list, file_list in os.walk(\".\"):\n for filename in file_list:\n file_path = dir_name + \"\\\\\" + filename\n new_name = get_fixed_filename(file_path)\n os.rename(file_path, new_name)", "def findDuplicateWorkingFiles(self, initialList, curInfix, newInfix):\n Duplicate_List = []\n for fname in initialList:\n infixStream = iccs_apex.whatInfixIsStream(fname)\n if (infixStream == curInfix):\n prefixStream, postfixStream = string.split(fname, infixStream)\n A_File_Name = prefixStream + newInfix + postfixStream\n if (os.path.exists(A_File_Name)):\n Duplicate_List = Duplicate_List + [A_File_Name]\n \n return Duplicate_List", "def newDuplicateFiles(self, duplicateList, curInfixStream, newInfixStream):\n Return_List = []\n for fname in duplicateList:\n File_Name_Part = os.path.basename(fname)\n Directory_Name_Part = os.path.dirname(fname)\n Parent_Directory_Name = os.path.dirname(Directory_Name_Part)\n File_Name = os.path.join(Parent_Directory_Name, File_Name_Part)\n prefixStream, postfixStream = string.split(File_Name, curInfixStream)\n New_File_Name = prefixStream + newInfixStream + postfixStream\n Return_List = Return_List + [New_File_Name]\n return Return_List", "def test_paths_to_plates():\n output = filelister_yoko.paths_to_plates(TEST_PATH_YOKO)\n prefix = os.path.abspath(TEST_PATH_YOKO)\n plate_names = [\"screen-name-batch1_20190213_095340/A000002-PC\"]\n make_own = [os.path.join(prefix, name) for name in plate_names]\n assert len(output) == len(plate_names)\n for ans in output:\n assert ans in make_own", "def _filenames(self, dir_or_file):\n if os.path.isdir(dir_or_file):\n return glob(os.path.join(dir_or_file, \"*.txt\"))\n else:\n return [dir_or_file]", "def file_matches(self, text):\n \n #print 'Completer->file_matches: <%s>' % text # dbg\n\n # chars that require escaping with backslash - i.e. chars\n # that readline treats incorrectly as delimiters, but we\n # don't want to treat as delimiters in filename matching\n # when escaped with backslash\n \n protectables = ' ()[]{}'\n\n def protect_filename(s):\n return \"\".join([(ch in protectables and '\\\\' + ch or ch)\n for ch in s])\n\n lbuf = self.get_line_buffer()[:self.readline.get_endidx()]\n open_quotes = 0 # track strings with open quotes\n try:\n lsplit = shlex_split(lbuf)[-1]\n except ValueError:\n # typically an unmatched \", or backslash without escaped char.\n if lbuf.count('\"')==1:\n open_quotes = 1\n lsplit = lbuf.split('\"')[-1]\n elif lbuf.count(\"'\")==1:\n open_quotes = 1\n lsplit = lbuf.split(\"'\")[-1]\n else:\n return None\n except IndexError:\n # tab pressed on empty line\n lsplit = \"\"\n\n if lsplit != protect_filename(lsplit):\n # if protectables are found, do matching on the whole escaped\n # name\n has_protectables = 1\n text0,text = text,lsplit\n else:\n has_protectables = 0\n text = os.path.expanduser(text)\n \n if text == \"\":\n return [protect_filename(f) for f in self.glob(\"*\")]\n\n m0 = self.clean_glob(text.replace('\\\\',''))\n if has_protectables:\n # If we had protectables, we need to revert our changes to the\n # beginning of filename so that we don't double-write the part\n # of the filename we have so far\n len_lsplit = len(lsplit)\n matches = [text0 + protect_filename(f[len_lsplit:]) for f in m0]\n else:\n if open_quotes:\n # if we have a string with an open quote, we don't need to\n # protect the names at all (and we _shouldn't_, as it\n # would cause bugs when the filesystem call is made).\n matches = m0\n else:\n matches = [protect_filename(f) for f in m0]\n if len(matches) == 1 and os.path.isdir(matches[0]):\n # Takes care of links to directories also. Use '/'\n # explicitly, even under Windows, so that name completions\n # don't end up escaped.\n matches[0] += '/'\n return matches", "def eachfilename(dir2list, printfname=0):\n if printfname: print('eachfilename is matching for \\n' + dir2list);\n if isinstance(dir2list,str):\n if not os.path.exists(dir2list): # if not a valid (single) filename\n dir2list=[dir2list] # try it as a list\n if isinstance(dir2list,list) or isinstance(dir2list,tuple):\n for line in dir2list:\n for fname in glob.iglob(line):\n fname = fname.replace('\\\\','/')\n if printfname: print(fname)\n yield fname\n elif isinstance(dir2list,str):\n pp, ff = os.path.split(dir2list); pp+='/';\n for line in open(dir2list):\n line = line.strip()\n if line.startswith('##') : continue ## skip those lines\n for fname in glob.iglob( pp + line ):\n fname=fname.replace('\\\\','/')\n if printfname: print(fname)\n yield fname", "def replsuffix(files, suffix):\n\toutfiles = []\n\tif suffix is None: return\n\tif type(files) is type(\"\"):\n\t\tfiles = [files]\n\tfor f in files:\n\t\tfname, ext = os.path.splitext(f)\n\t\tnewfname = fname + suffix\n\t\toutfiles.append(newfname)\n\treturn outfiles", "def _get_rename_command(self,\r\n out_filenames,\r\n tmp_output_dir,\r\n output_dir):\r\n result = ''\r\n result_filepaths = []\r\n for fn in out_filenames:\r\n tmp_result_filepath = '%s/%s' % (tmp_output_dir, fn)\r\n result_filepath = '%s/%s' % (output_dir, fn)\r\n result += \\\r\n '; mv %s %s' % (tmp_result_filepath, result_filepath)\r\n result_filepaths.append(result_filepath)\r\n return result, result_filepaths", "def createFileNames(nFileNames, seqPrefix):\n nameList = []\n nameList = [seqPrefix+str(i)+\".txt\" for i in range(0, nFileNames)]\n return nameList", "def compile_filename_patterns(pattern_list):\n\n pats=list(pattern_list)\n for i in range(len(pats)):\n if isinstance(pats[i],str):\n if pats[i].startswith('re:'):\n pats[i]=pats[i][3:]\n else:\n pats[i]=fnmatch.translate(pats[i])\n pats[i]=re.compile(pats[i])\n return pats", "def processed_file_names(self):\n if self.force_reprocess == True:\n self.force_reprocess = False\n return 'reprocess.pt'\n \n ''' HR 01/06/22 Workaround to avoid FileNotFoundError '''\n print('self.processed_dir:', self.processed_dir)\n # folder,file = os.path.split(self.processed_dir)\n folder = self.processed_dir\n if not os.path.isdir(folder):\n print(' Making folder', folder)\n os.makedirs(folder)\n \n processedfiles = [f for f in os.listdir(self.processed_dir) if os.path.isfile(\n os.path.join(self.processed_dir, f))]\n if 'pre_filter.pt' in processedfiles:\n processedfiles.remove('pre_filter.pt')\n if 'pre_transform.pt' in processedfiles:\n processedfiles.remove('pre_transform.pt')\n # 'not_implimented.pt' #[f'data_{i}.pt' for i in list(self.data.index)]\n return processedfiles", "def rename_file_group_to_serial_nums(file_lst):\n file_lst.sort()\n c = 1\n for f in file_lst:\n dirname = path.abspath(f.dirname())\n fdest = f.joinpath(dirname, \"{0:04d}\".format(c) + output_dicom_extension)\n log.info('Renaming {0} to {1}'.format(f, fdest))\n f.rename(fdest)\n c += 1", "def handle_filenames(filenames):\n suffixes = [\".mod\", \".dat\", \".run\"]\n if len(filenames) == 1:\n return (filenames[0].with_suffix(suffix) for suffix in suffixes)\n else:\n try:\n return sorted(filenames, key=lambda x: suffixes.index(x.suffix))\n except ValueError:\n click.echo(click.style(f\"Invalid filename.\", fg=\"red\", bold=True))", "def filenamePatterns(self):\n return ['*.'+e for e in self.filenameExtensions]", "def getNames(self):\r\n ListFiles = os.listdir(\"Save\")\r\n centering = \" \"\r\n stringFiles = centering + \"List of {} files in your Save folder : \\n \\n\".format(\r\n \"PVP\" if self.PVP else \"AI\"\r\n )\r\n if self.PVP:\r\n for k in ListFiles:\r\n if self.PVP and \"PVP_mode\" == k[:8]:\r\n realName = k[9:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n else:\r\n stringFiles += \" Files where AI is playing white : \\n\"\r\n for k in ListFiles:\r\n if \"AI_mode\" == k[:7] and k[8] == \"B\":\r\n realName = k[8:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n stringFiles += \"\\n Files where AI is playing black : \\n\"\r\n for k in ListFiles:\r\n if \"AI_mode\" == k[:7] and k[8] == \"W\":\r\n realName = k[8:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n self.existingFiles.setText(stringFiles)", "def test_paths_to_plates():\n output = filelister_ix.paths_to_plates(TEST_PATH_IX)\n prefix = os.path.abspath(TEST_PATH_IX)\n plate_names = [\"test-plate-1\", \"test-plate-2\",\n \"test-plate-3\", \"test-plate-4\"]\n make_own = [os.path.join(prefix, name) for name in plate_names]\n assert len(output) == len(plate_names)\n for ans in output:\n assert ans in make_own", "def transform_suffix(filenames, suffix_old, suffix_new):\n\n new_filenames = set([])\n len_suffix_old = len(suffix_old) + 1 # add one for the \".\"\n # loop over the list of files and remove the suffix\n for name in filenames:\n name = name[:-len_suffix_old]\n new_filenames.add(name + \".\" + suffix_new)\n \n return new_filenames", "def file_name_search():\n directory = \"/Users/andrewpowers/Documents/server/fastq_pass\"\n\n for file in os.listdir(directory):\n output_file = re.sub('fastq', 'fasta', file)\n os.system(bash_command.format(directory+\"/\"+file, output_file))\n print('File {} converted to fasta.'.format(file))\n print('Conversion Done.')", "def namedir(l, override=False):\n\tprint(\"naming dir\")\n\n\tglobal torrenteps\n\ttorrenteps = []\n\n\tfiles = l\n\n\tprint(\"###\")\n\n\t# filenums: {episode: [numpos(number, index)]}\n\tfilenums = {}\n\n\tfor f in files: # {episode: [numpos,]}\n\t\tfilenums[f] = getnumbers(stripname(f.getpathname(), False))\n\t\tprint(f, filenums[f])\n\n\tallfilenums = [fnum for f in files for fnum in filenums[f]] # list of all numpos\n\tprint(allfilenums)\n\tfilenumcounter={}\n\tfor fnum in allfilenums:\n\t\tif fnum in filenumcounter:\n\t\t\tfilenumcounter[fnum] += 1\n\t\telse:\n\t\t\tfilenumcounter[fnum] = 1\n\n\tprint(filenumcounter)\n\n\n\ttoremove = []\n\n\tindexes = [fnum.strindex for f in files for fnum in filenums[f]] # get all indexes\n\tremoveindexes = set(indexes)\n\tindexnums = {}\n\tfor f in files: # remove bad indexes\n\t\tfor fnum in filenums[f]:\n\t\t\tif fnum.strindex in indexnums:\n\t\t\t\tif indexnums[fnum.strindex] != fnum.num:\n\t\t\t\t\tif fnum.strindex in removeindexes:\n\t\t\t\t\t\tremoveindexes.remove(fnum.strindex)\n\t\t\telse:\n\t\t\t\tindexnums[fnum.strindex] = fnum.num\n\n\tindextonumbers = {}\n\tfor index in set(indexes):\n\t\tnumbers = []\n\t\tfor f in files:\n\t\t\tfor fnum in filenums[f]:\n\t\t\t\tif fnum.strindex == index:\n\t\t\t\t\tnumbers.append(fnum.num)\n\t\tindextonumbers[index] = numbers\n\tprint(\"indextonumbers\", indextonumbers)\n\n\ttoremove += removeindexes\n\n\tfor fnum in filenumcounter:\n\t\ttimes = filenumcounter[fnum]\n\t\tif times >= len(files)-1:\n\t\t\tprint(\"removing index\", str(fnum.strindex), \"because it's all files\")\n\t\t\ttoremove.append(fnum.strindex)\n#\t\telif float(fnum.num) > 200:\n#\t\t\tprint \"removing index\", str(fnum.strindex), \"because it's over 200\"\n#\t\t\ttoremove.append(fnum.strindex)\n\n\tprint(\"toremove\", toremove)\n\tfor f in files:\n\t\tfilenums[f] = [fnum for fnum in filenums[f] if not fnum.strindex in toremove and not \"NCOP\" in f.getpathname() and not \"NCED\" in f.getpathname()]\n\tprint(\"filenums\", filenums)\n\n\tfilenumsstrindex = [fnum.strindex for f in files for fnum in filenums[f]] # get most common index\n\tprint(\"strindexes\", filenumsstrindex)\n\tepnumpos = None\n\tif len(filenumsstrindex) != 0:\n\t\tfilenumsstrindex = Counter(filenumsstrindex)\n\t\tcommonlist = [index for index, amount in filenumsstrindex.most_common()]\n\t\tprint(\"commonlist\", commonlist)\n\t\tamtuniquenumbers = {index: len(set(indextonumbers[index])) for index in commonlist }\n\t\tprint(\"amtuniquenumbers\", amtuniquenumbers)\n\t\tmostuniquenumbers = max(amtuniquenumbers.values())\n\t\tif mostuniquenumbers < 3.0/4.0 * filenumsstrindex.most_common()[0][1]:\n\t\t\t# just one number isn't good enough - probably contains both season and episode\n\t\t\tmostcommon = sorted(commonlist, key = lambda index: amtuniquenumbers[index])\n\t\t\tepnumpos = [mostcommon[0], mostcommon[1]]\n\t\t\tprint(\"attempting to describe with 2 numbers. Indexes:\", epnumpos)\n\t\telse:\n\t\t\tmostcommonlist = [index for index, amtunique in list(amtuniquenumbers.items()) if amtunique == mostuniquenumbers]\n\t\t\tprint(\"mostcommonlist 2\", mostcommonlist)\n\t\t\tepnumpos = [mostcommonlist[0]]\n\t\t\tprint(\"epnumpos\", epnumpos, mostcommonlist)\n\n\tnames = copy.copy(l)\n\teps = [None for f in l]\n\n\tfor index, name in enumerate(names):\n\t\tpath = l[index]\n\t\tchangedname = files[index]\n\t\tnewname = path.getpathname()\n\t\tif epnumpos != None:\n\t\t\tif len(epnumpos) == 1:\n\t\t\t\tnumpos = epnumpos[0]\n\t\t\t\tnumbers = filenums[changedname]\n\t\t\t\tnumber = [num for num in numbers if num.strindex == numpos]\n\t\t\t\tif number != []:\n\t\t\t\t\tnumber = number[0].num\n\t\t\t\t\tif number.endswith(\".\"):\n\t\t\t\t\t\tnumber = number[:-1]\n\t\t\t\t\tif \".\" in number:\n\t\t\t\t\t\tnumber = float(number)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnumber = int(number)\n\t\t\t\t\teps[index] = number\n\t\t\telif len(epnumpos) == 2:\n\t\t\t\tnumbers = filenums[changedname]\n\t\t\t\tfirstnumber = [num for num in numbers if num.strindex == epnumpos[0]]\n\t\t\t\tsecondnumber = [num for num in numbers if num.strindex == epnumpos[1]]\n\t\t\t\tfirstnumber = int(firstnumber[0].num)\n\t\t\t\tsecondnumber = int(secondnumber[0].num)\n\t\t\t\tnumber = firstnumber + float(secondnumber) / 100\n\t\t\t\teps[index] = number\n\t\tnames[index] = newname\n\n\tnumbereps = sum([ep != None for ep in eps])\n\tif numbereps <= 1:\n\t\teps = [None for ep in eps]\n\n\tfor index, path in enumerate(l):\n\t\tif not path.getkey() in save.names or override:\n\t\t\tif isinstance(path, episode):\n\t\t\t\tname = names[index]\n\t\t\t\tepnumber = eps[index]\n\t\t\t\tpath.setname([name, epnumber])", "def test_replacements_applied_before_force_name():\n\n conf = r\"\"\"\n {\"always_rename\": true,\n \"select_first\": true,\n\n \"force_name\": \"Scrubs\",\n\n \"input_filename_replacements\": [\n {\"is_regex\": true,\n \"match\": \"S01E02 - \",\n \"replacement\": \"\"}\n ]\n }\n \"\"\"\n\n out_data = run_tvnamer(\n with_files = ['S01E02 - Some File.avi'],\n with_config = conf)\n\n expected_files = ['S01E02 - Some File.avi']\n\n verify_out_data(out_data, expected_files, expected_returncode = 2)", "def standardize_file_names(source):\n # Convert File Names into a List\n video_list = os.listdir(source)\n print(video_list)\n \n # Switch into the Directory and rename all files\n os.chdir(source)\n for i in range(len(video_list)):\n os.rename(video_list[i], 'video'+ str(i) + '.MOV')", "def regex_group_split(file_list, pattern, output=True):\n split_list = list([]) # tuple probz\n\n dicdic ={ \"Jan\":\"01\",\"Feb\":\"02\",\"Mar\":\"03\",\n \"Apr\":\"04\",\"May\":\"05\",\"June\":\"06\",\"Jun\":\"06\",\n \"July\":\"07\",\"Jul\":\"07\",\"Aug\":\"08\",\"Sep\":\"09\",\n \"Oct\":\"10\",\"Nov\":\"11\",\"Dec\":\"12\",\n \"JAN\":\"01\",\"FEB\":\"02\",\"MAR\":\"03\",\n \"APR\":\"04\",\"MAY\":\"05\",\"JUN\":\"06\",\n \"JUL\":\"07\",\"AUG\":\"08\",\"SEP\":\"09\",\n \"OCT\":\"10\",\"NOV\":\"11\",\"DEC\":\"12\"}\n\n for file in file_list:\n split_file = list(re.match(pattern, file).groups())\n #split_list.append(file.replace(\" \", \"\"))\n split_file[0], split_file[1], split_file[2], split_file[3], split_file[4], split_file[5] = \\\n split_file[0] + \" \", split_file[1], split_file[2] + \"-\", split_file[3]+ \"-\", split_file[4], split_file[5]\n split_list.append(\"\".join(split_file))\n \n if (len(split_list) > 0 and output):\n #print colors.RED + '\\033[1m' + \"renames:\" + '\\033[0m'\n for split in split_list:\n print colors.RED + split + colors.ENDC\n\n return split_list", "def file_list_str(self):\n curr_file_index = self.app.current_file_index()\n files = self.app.get_files();\n file_list = files[curr_file_index:] + files[:curr_file_index]\n str_list = []\n for f in file_list:\n fname = f.name + ([\"\", \"*\"][f.is_changed()])\n if not str_list:\n str_list.append(\"[\" + fname + \"]\")\n else:\n str_list.append(fname)\n return \" \".join(str_list)", "def demo_walk():\n os.chdir('Lyrics')\n for directory_name, subdirectories, filenames in os.walk('.'):\n print(\"Directory:\", directory_name)\n print(\"\\tcontains subdirectories:\", subdirectories)\n print(\"\\tand files:\", filenames)\n print(\"(Current working directory is: {})\".format(os.getcwd()))\n\n # Loop through each file in the (current) directory\n for filename in filenames:\n new_name = get_fixed_filename(filename)\n source = os.path.join(directory_name, filename)\n destination = os.path.join(directory_name, new_name)\n print(\"Renaming {} to {}\".format(source, destination))\n os.rename(source, destination)", "def batch_rename(work_dir, old_ext, new_ext):\n\n for filename in os.listdir(work_dir):\n\n split_file = os.path.splitext(filename)\n file_ext = split_file[1]\n\n if old_ext == file_ext:\n newfile = split_file[0] + new_ext\n os.rename(\n os.path.join(work_dir, filename),\n os.path.join(work_dir, newfile)\n )\n\n print(\"Rename is over\")\n print(os.listdir(work_dir))", "def reorderFilenames(files, order):\n new = []\n for i in order:\n found=False\n for f in files:\n if i in f:\n new.append(f)\n found=True\n if found==False:\n new.append('')\n return new", "def collect_and_rename() -> None:\n image_source_folder = 'image_dir'\n label_source_folder = 'annotation_dir'\n image_target_folder = 'images'\n label_target_folder = 'labels'\n for i, (subdir, _, files) in enumerate(os.walk(image_source_folder), -1):\n # it walks the parent folder first, not a file\n if i == -1: \n continue\n subdir_name = subdir.split('\\\\')[1]\n for file_name in files:\n with open(f'{image_source_folder}/{subdir_name}/{file_name}') as image_file, \\\n open(f'{label_source_folder}/{subdir_name}/{file_name}'.split('.')[0] + '.txt') as label_file:\n shutil.copy2(image_file.name, f'{image_target_folder}/{\"%06d\" % i}.jpg')\n shutil.copy2(label_file.name, f'{label_target_folder}/{\"%06d\" % i}.txt')\n print(f'Processed {i} images')", "def copy2fs_names(self):\n ext_names = []\n for ext in self.extensions.values():\n if not ext.copy2fs:\n continue\n ext_names.append(ext.name)\n return ', '.join(sorted(ext_names))", "def add_suffix(filenames, suffix):\n\n new_filenames = set([])\n # loop over the list of files and add the suffix\n for name in filenames:\n new_filenames.add(name + \".\" + suffix)\n #print \"filenames = \"\n #print name + \".\" + suffix\n \n return new_filenames", "def get_filenames_strains(self, file_path_template_newick_tree):\n\t\tassert self.validate_file(file_path_template_newick_tree)\n\t\tlist_of_filenames_strains = []\n\t\ttree = Phylo.read(file_path_template_newick_tree, 'newick')\n\t\tfor leaf in tree.get_terminals():\n\t\t\tprefix = leaf.name\n\t\t\tif prefix.lower() == \"ancestor\":\n\t\t\t\tcontinue\n\t\t\tlist_of_filenames_strains.append(\"{prefix}.fasta\".format(prefix=prefix))\n\t\treturn list_of_filenames_strains", "def rename(dir, patterns, titlePattern, count=0):\n\tcount_i = count\n\tfor pattern in patterns:\n\t\tfor pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n\t\t\ttitle, ext = os.path.splitext(os.path.basename(pathAndFilename))\n\n\t\t\tprint(\"Find {}\".format(title))\n\t\t\tos.rename(pathAndFilename, os.path.join(dir, titlePattern % (count_i)))\n\t\t\tcount_i += 1", "def create_short_database_names(path_list):\n no_suffixes = [Path(p).resolve().with_suffix(\"\") for p in path_list]\n # The assert statement makes sure that the while loop terminates\n assert len(set(no_suffixes)) == len(\n no_suffixes\n ), \"path_list must not contain duplicates.\"\n short_name_to_path = {}\n for path, path_with_suffix in zip(no_suffixes, path_list):\n parts = tuple(reversed(path.parts))\n needed_parts = 1\n candidate = parts[:needed_parts]\n while _causes_name_clash(candidate, no_suffixes):\n needed_parts += 1\n candidate = parts[:needed_parts]\n\n short_name = \"/\".join(reversed(candidate))\n short_name_to_path[short_name] = path_with_suffix\n return short_name_to_path", "def fixupFileNames(process):\n if not hasattr(process.source, \"fileNames\"):\n process.source.fileNames = cms.untracked.vstring()\n return", "def rename_name_gene(listOfFile, PATH_FASTA_RENAME) :\n\n\tprint \"\\n#################\"\n\tprint \"# Rename protein\"\n\tprint \"#################\\n\"\n\n\tcreate_folder(PATH_FASTA_RENAME)\n\n\tnew_listOfFile=[]\n\n\tfor my_file in listOfFile :\n\t\tif os.stat(my_file).st_size != 0 :\n\t\t\tnew_listOfFile.append(my_file)\n\n\tseq_to_rename = find_rename_fasta(new_listOfFile)\n\tdict_count = dict([(sequence[1:].rstrip(\" \"), 0) for sequence in seq_to_rename])\n\tprogression=1\n\tnumber_of_file = len(new_listOfFile)\n\n\tfor my_file in new_listOfFile :\n\n\t\tfile_name = os.path.basename(my_file)\n\n\t\tsys.stdout.write(\"{:.2f}% : {}/{} files renamed\\r\".format(progression/float(number_of_file)*100, progression,number_of_file))\n\t\tsys.stdout.flush()\n\t\tprogression += 1\n\n\t\thandle = open(os.path.join(PATH_FASTA_RENAME, file_name), 'w')\n\t\tfasta_reading = SeqIO.parse(my_file, \"fasta\")\n\n\t\tfor seq in fasta_reading :\n\t\t\tif seq.id in dict_count :\n\t\t\t\tif dict_count[seq.id] == 0 :\n\t\t\t\t\tdict_count[seq.id] += 1\n\t\t\t\telse :\n\t\t\t\t\tdict_count[seq.id] += 1\n\t\t\t\t\tif \"NC_\" in seq.id :\n\t\t\t\t\t\t# NOTE New name : NC_XXXXXX[_numero de systeme si deux systemes trouvés][_Num(et le nombre de fois nom trouvé)]_nomSysteme_D_nomProteine\n\t\t\t\t\t\tseq.id = \"_\".join(seq.id.split(\"_\")[:2])+\"_Num\"+str(dict_count[seq.id])+\"_\"+\"_\".join(seq.id.split(\"_\")[2:])\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# NOTE New name : NNNN[_numero de systeme si deux systemes trouvés][_Num(et le nombre de fois nom trouvé)]_nomSysteme_V_nomProteine\n\t\t\t\t\t\tseq.id = seq.id.split(\"_\")[0]+\"_Num\"+str(dict_count[seq.id])+\"_\"+\"_\".join(seq.id.split(\"_\")[1:])\n\t\t\t\t\tseq.name = seq.id\n\t\t\t\t\tseq.description = \"\"\n\n\t\t\tSeqIO.write(seq, handle, \"fasta\")\n\n\t\thandle.close()\n\n\tprint\n\tprint \"Done!\"\n\treturn", "def created_names(self, prefix):\n assert os.path.isdir(prefix)\n cwd = os.getcwd()\n os.chdir(prefix)\n names = tuple(sorted(filter(\n os.path.isdir,\n glob.glob(os.path.join(*('*' * self.depth))))))\n os.chdir(cwd)\n return names", "def main():\n extension_choices = {}\n os.chdir(\"FilesToSort.old\")\n for file_name in os.listdir('.'):\n if os.path.isdir(file_name):\n continue\n\n file_extension = file_name.split('.')[-1]\n if file_extension not in extension_choices:\n choice = input(\"What file type would you like to sort {} files into? \".format(file_extension))\n extension_choices[file_extension] = choice\n try:\n os.mkdir(choice)\n except FileExistsError:\n pass\n\n os.rename(file_name, \"{}/{}\".format(extension_choices[file_extension], file_name))", "def buildfilelist():\r\n for files in filelist:\r\n if os.path.splitext(files)[1]=='.dxf': #查找目录下的dxf文件,加入到readfilelist文件列表中 \r\n readfilelist.append(files)\r\n #feilin=file('feilin(ph).dxf','w') #新建一个文件,名字先占位用,后续改成由配置文件中读入名称。 \r", "def updateDiskFileList(self):\n\n if self.m_curPath:\n # Get me just the files please.\n for _, _, files in os.walk(self.m_curPath):\n break\n else:\n files = []\n\n files.sort()\n if files != self.m_diskNames:\n self.m_diskNames[:] = files\n self.m_newNames[:] = []\n\n self.populateFileList()", "def _path_files_format(self):\n\n correct_files = []\n\n for file in self.files:\n if not file.startswith(self.path):\n correct_files.append(os.path.join(self.path, file))\n else:\n correct_files.append(file)\n\n self.files = correct_files", "def renameFiles(folder):\n\n # Retrieve list of all text files and remove the txt files\n for filename in glob.glob(os.path.join(folder, \"*.txt\")):\n with open(filename, 'r') as file:\n metadata=file.read().replace('\\n', '')\n ident = metadata[27:31]\n order = metadata[26].upper()\n finger = metadata[32:34]\n gender = metadata[8].upper()\n fingerprintClass = metadata[16].upper()\n fp = Fingerprint(ident, order, finger, gender, fingerprintClass)\n\n # Remove the .txt file and rename the png\n os.remove(filename)\n pngName = filename.replace(\".txt\", \".png\")\n newName = fp.id + \"_\" + fp.order + \"_\" + fp.finger + \"_\" + fp.gender + \"_\" + fp.fingerprintClass + \".png\"\n newName = os.path.join(folder, newName)\n os.rename(pngName, newName)", "def MusicScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for extend in mustypes:\r\n for filename in fnmatch.filter(filenames, extend):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(musicPath, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For Music'", "def transform_prefix(filenames, prefix_old, prefix_new):\n\n new_filenames = set([])\n len_prefix_old = len(prefix_old)\n # loop over the list of files and remove the prefix\n for name in filenames:\n name = name[len_prefix_old:]\n new_filenames.add(prefix_new + name)\n\n\n return new_filenames", "def get_files_suffix_list(suffixes, flist, Lshow=False, Ldir=False):\n matched_files=[]\n dirs=[]\n files=[]\n for fname in flist:\n if os.path.isdir(fname):\n dirs.append(fname)\n else:\n files.append(fname)\n for suff in suffixes:\n for fname in files:\n #print(f\" {suff} in {fname} ?\")\n if fname.endswith(suff):\n matched_files.append(fname)\n matched_files.extend(dirs) \n return matched_files", "def main(files: List[Path]):\n show_filenames = len(files) > 1\n for file in files:\n with file.open() as f:\n for m in find_camel(f):\n print(pretty_match(m, filename=file if show_filenames else None))", "def generate_name(path_list):\n name = path_list[0]\n for item in path_list[1:]:\n name += \"[\" + item + \"]\"\n return name", "def clean_file(filesnames_list, file_type): # so now not needed.\r\n global files_list\r\n files_list = []\r\n global ft_list\r\n ft_list = []\r\n for line in filesnames_list:\r\n s, fileType = line.split('.') # split off file_type here\r\n print(s)\r\n files_list.append(s)\r\n ft_list.append(fileType)\r\n print(files_list)\r\n return (files_list)", "def set_in_files():\r\n\tindatadir = '/nobackup/ejblom/reddit'\r\n\tcom_dir = '/comments'\r\n\tsubm_dir = '/submissions'\r\n\tglob_end = '/filtered*'\r\n\tcom_glob_str = indatadir + com_dir + glob_end\r\n\tsubm_glob_str = indatadir + subm_dir + glob_end\r\n\tinfilenames = sorted(glob.glob(com_glob_str)) + sorted(glob.glob(subm_glob_str))\r\n\treturn infilenames", "def refactor(path: str, files: List):\n skipped = []\n for filename in files:\n try:\n number = get_number_from_name(filename)\n except AttributeError:\n skipped.append(os.path.join(path, filename))\n continue\n new_number = update_number(number)\n\n file_path = os.path.join(path, filename)\n new_file_path = file_path.replace(number, new_number)\n\n with open(file_path, 'r') as file:\n data = file.read()\n data = data.replace(number, new_number)\n with open(file_path, 'w') as file:\n file.write(data)\n\n os.rename(file_path, new_file_path)\n return skipped", "def renamefile(filename):\n new_data_list = []\n with open(filename, 'r') as f:\n data_list = f.read().split('\\n')\n\n print('Generating new data list..')\n for data in tqdm(data_list):\n if len(data) == 0:\n continue\n data_info = data.split(' ')\n\n #data_info[0] = data_info[0].replace('jpg', 'png')\n #data_info[1] = data_info[1].replace('jpg', 'png')\n for it, name in enumerate(data_info):\n data_info[it] = '/'.join(name.split('/')[1:])\n if data_info[2].find('extras') == -1:\n new_data_list.append(' '.join(data_info))\n\n with open(filename, 'w') as f:\n print('writing new data names..')\n\n for it, data in tqdm(enumerate(new_data_list)):\n if len(data) == 0:\n continue\n\n if it == len(new_data_list)-1:\n f.write(data)\n else:\n f.write(data+'\\n')\n\n print('Done.')", "def rename_all(dirpath, startletter, startindex, verbose=1):\n\n if (verbose == 0):\n logging.getLogger().setLevel(logging.ERROR)\n elif (verbose == 1):\n logging.getLogger().setLevel(logging.WARNING)\n elif (verbose == 2):\n logging.getLogger().setLevel(logging.INFO)\n else:\n logging.getLogger().setLevel(logging.DEBUG)\n\n indexstr = startindex\n datetimestr_to_fullfname_dict = {}\n\n # iterate over all files in subdirectories from given root directory\n for rootdir, alldirs, allfiles in os.walk(dirpath):\n\n for afile in allfiles:\n\n # create the full path to the file\n fullfname = os.path.join(rootdir, afile)\n\n # check if there is a valid file\n if not (os.path.exists(fullfname) and\n os.path.isfile(fullfname)):\n logging.warning(\"Cannot access %r, skipping it\", fullfname)\n continue\n\n # First try if the file is an image file with EXIF tags\n # if so, return valid datetimestr, otherwise try date metadata\n datetimestr = extract_exif(fullfname)\n if not (datetimestr):\n datetimestr = extract_date_metadata(fullfname)\n\n # if valid datetimestr \n if (datetimestr):\n # this will handle the case when there is already the exact\n # same datetimestr in the dictionary(shouldn't happen often)\n while (datetimestr in datetimestr_to_fullfname_dict):\n datetimestr = datetimestr + '*'\n datetimestr_to_fullfname_dict[datetimestr] = fullfname\n logging.info(\n \"Entering datetimestr %r to dictionary\", datetimestr)\n else:\n logging.warning(\n \"No EXIF or date metadata found in %r, skipping it\",\n fullfname)\n\n # Go through the alphabetically (and therefore time-stamp sorted)\n # list of keys of the dictionary to do the rename\n for a_dtstr in sorted(datetimestr_to_fullfname_dict.keys()):\n\n # we discard the time portion as we don't need it for\n # the filename\n datestr = a_dtstr[:8]\n\n # the file extension from original filename\n afileext = get_fname_ext(\n datetimestr_to_fullfname_dict[a_dtstr]).upper()\n\n newfname = datestr + \"_\" + startletter + \"_\" + indexstr + afileext\n\n # create the new full filename by taking existing path of old \n # full filename and combining with new file name\n newfullfname = os.path.join(\n os.path.dirname(datetimestr_to_fullfname_dict[a_dtstr]),\n newfname)\n\n try:\n logging.info(\"Renaming %r -> %r\",\n datetimestr_to_fullfname_dict[a_dtstr],\n newfullfname)\n os.rename(datetimestr_to_fullfname_dict[a_dtstr],\n newfullfname)\n except os.error as oserr:\n logging.error(\"Can't rename file %s to %s: %s\",\n datetimestr_to_fullfname_dict[a_dtstr],\n newfullfname, oserr)\n\n\n indexstr = incr_indexstr(indexstr)", "def write_recording_names(output, inputs, recording_names):\n for in_dir in inputs:\n data_set = in_dir.split('/')[-2]\n file_name = os.path.join(output, data_set, \"recordings.list\")\n with open(file_name, 'w') as f:\n f.writelines('\\n'.join(recording_names[in_dir]))", "def renamed_files(conn, scan1, scan2):\n\n def get_singletons(conn, scanid):\n c = conn.cursor()\n c.execute('SELECT hashID, pathid, count(*) AS ct '\n 'FROM files WHERE scanid=? GROUP BY hashid HAVING ct=1', (scanid,))\n return c\n\n pairs_in_scan1 = set()\n path1_for_hash = {}\n for (hashID, path1, count) in get_singletons(conn, scan1):\n pairs_in_scan1.add((hashID, path1))\n path1_for_hash[hashID] = path1\n\n for (hashID, path2, count) in get_singletons(conn, scan2):\n if hashID in path1_for_hash and (hashID, path2) not in pairs_in_scan1:\n yield (DBFile({\"hashid\": hashID, \"pathid\": path1_for_hash[hashID]}),\n DBFile({\"hashid\": hashID, \"pathid\": path2}))", "def _possible_names(self, filename):\n names = [filename]\n if not self._iszip(filename):\n for zipext in _file_openers.keys():\n if zipext:\n names.append(filename+zipext)\n return names", "def _create_file_paths(folder):\n debut = \"chroma-nnls\"\n instrument = [\"piano\", \"orchestra\"]\n style = [\"baroque\", \"classical\", \"romantic\", \"modern\", \"addon\"]\n file_names = [\"_\".join([debut, i, s]) for i in instrument for s in style]\n # file_names = [\"test0\"]\n\n return [folder + fn + \".csv\" for fn in file_names]", "def separate(in_file, orig_dir, dest_dir):\n files = set()\n with open(in_file, encoding=\"utf8\") as f:\n for l in f:\n files.add(l.split()[0])\n \n dest = pathlib.Path(dest_dir)\n if not dest.exists():\n dest.mkdir()\n \n for p in pathlib.Path(orig_dir).iterdir():\n if p.stem in files:\n print(\"Moviendo\", p.name)\n p.rename(dest / p.name)", "def nestedRenameFilesAdd(dir_path, filenames, str_add, add_to_front=True):\n\tfilenames = listR.toList(filenames)\n\tfor name in filenames:\n\t\tif add_to_front == True:\n\t\t\tstr_mode = str_add+\"%s%s\"\n\t\telse:\n\t\t\tstr_mode = \"%s\" + str_add + \"%s\"\n\t\tnew_filename = str_mode % (os.path.splitext(name))\n\t\tnestedRenameFiles(dir_path, name, new_filename)", "def sortFiles(files):\n def sortKey(file):\n dirFile = file.lower().rsplit('\\\\',1)\n if len(dirFile) == 1: dirFile.insert(0,'')\n return dirFile\n sortKeys = dict((x,sortKey(x)) for x in files)\n return sorted(files,key=lambda x: sortKeys[x])", "def rename_social(file_paths):\n # Set up a counter\n file_counter = 0\n # make a list of failed files\n failed_paths = []\n # for each file path\n for file in file_paths:\n # check if the file is there\n if not os.path.isfile(file):\n failed_paths.append('_'.join(('old', file)))\n continue\n\n # Parse the old file name to check if it is a social experiment or not\n animalnameRegex = re.compile(r'[A-Z][A-Z]_\\d\\d\\d\\d\\d\\d_[a-z]')\n animals = animalnameRegex.findall(file)\n\n if len(animals) > 1:\n # This is a social prey capture test\n first_animal = animals[0]\n # Split the file\n parts = file.split(first_animal)\n # Check to see if previously modified, otherwise make the modification\n if \"social\" in file:\n continue\n else:\n mod = 'social_' + first_animal\n new_path = \"\".join([parts[0], mod, parts[-1]])\n else:\n continue\n\n # check if the new path exists\n if os.path.isfile(new_path):\n failed_paths.append('_'.join(('new', new_path)))\n continue\n # change the file_name\n os.rename(file, new_path)\n # update the counter\n file_counter += 1\n\n print(\"_\".join((\"Total original files: \", str(len(file_paths)), \"Successfully renamed files: \", str(file_counter))))\n return failed_paths", "def rewrite_MS_filename(rawfile_name):\n print(\"Rewriting mz filename...\")\n\n # rename MS filename by its m/z.\n # open file, read the 2nd row (where m/z is written) and make that into the filename\n newfname = []\n replacename = ''\n for folder in rawfile_name:\n msfiles = [os.path.basename(path) for path in sorted(glob.glob(opfolder+\"/files/\"+folder+\"/*-MS Chromatogram.txt\"))]\n for fname in msfiles: # MS chromatogram files のファイル名を mzに書き換える。そのファイル名リストをnewfnameに入れる\n filepath = opfolder+\"/files/{}/{}\".format(folder,fname)\n with open(filepath, 'r') as f:\n for i, line in enumerate(f):\n if i == 2: break\n if i == 1: # read 2nd row, where the m/z are written\n if isfloat(line[-8:-2].strip()):\n replacename = '{}-{}'.format(folder,line[-8:-2].strip())\n newfname.append(\"{}/{}.txt\".format(folder,replacename))\n elif re.search(r'1-1MS',line) :\n replacename = '{}-{}-1'.format(folder,line[-5:].strip())\n newfname.append(\"{}/{}.txt\".format(folder,replacename))\n elif re.search(r'1-2MS',line) :\n replacename = '{}-{}-2'.format(folder,line[-5:].strip())\n newfname.append(\"{}/{}.txt\".format(folder,replacename))\n f.close()\n os.rename(filepath,opfolder+\"/mz/{}/{}.txt\".format(folder,replacename))\n print(\"Done\")\n return newfname", "def insensitive_glob(pattern):\n def either(c):\n return '[%s%s]' % (c.lower(), c.upper()) if c.isalpha() else c\n\n file_list = sorted(glob.glob(''.join(map(either, pattern))))\n\n return file_list", "def create_fileters(*exts):\n ret = []\n for e in exts:\n ret += ['{} (*.{})'.format(*e)]\n return ret", "def test_get_only_names(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n result = listdir(dummy_folder, full_path=False, only_files=True, walk=True)\n need_result = []\n for i in range(1, 4):\n need_result.append('meme{}.jpg'.format(i))\n need_result.extend(['meme1.jpg',\n 'meme2.png',\n 'meme4.jpg',\n 'meme4.png',\n '1.txt',\n '2.txt',\n '3.txt',\n 'not_txt.not_txt',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ])\n self.assertEqual(sorted(result), sorted(need_result))", "def _list_gen(watchdog_path):\n # Remove all dotfiles and all non-file\n for watchdog in os.listdir(watchdog_path):\n if watchdog[0] == '.':\n continue\n\n filename = os.path.join(watchdog_path, watchdog)\n try:\n filestat = os.lstat(filename)\n except os.error:\n continue\n\n if not stat.S_ISREG(filestat.st_mode):\n continue\n\n yield (watchdog, filename, filestat)", "def iterate_dir(dir_path:str, files, equipt_nr):\n for ii in os.listdir(dir_path):\n if os.path.isdir(ii):\n iterate_dir(ii)\n elif re.search('[0-9]{7}', ii):\n rename_file(ii, equipt_nr)\n else:\n print('not editing : ' + ii)", "def test_extensions(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n need_result = []\n for i in range(1, 4):\n need_result.append(os.path.join(dummy_folder, 'memes', 'meme monty python', 'meme{}.jpg'.format(i)))\n need_result.append(os.path.join(dummy_folder, 'memes', 'meme1.jpg'))\n need_result.append(os.path.join(dummy_folder, 'memes', 'meme2.png'))\n need_result.append(os.path.join(dummy_folder, 'memes', 'meme4.jpg'))\n need_result.append(os.path.join(dummy_folder, 'memes', 'meme4.png'))\n\n for i in ['antigravity.png',\n 'egg.png',\n 'holy_grenade.png',\n 'spam.jpg',\n ]:\n need_result.append(os.path.join(dummy_folder, i))\n\n result = listdir(dummy_folder, full_path=True, only_files=True, walk=True, extensions=['jpg', 'png'])\n self.assertEqual(sorted(result), sorted(need_result))\n result = listdir(dummy_folder, full_path=True, only_files=True, walk=True, extensions=['.jpg', '.png'])\n self.assertEqual(sorted(result), sorted(need_result))\n result = listdir(dummy_folder, full_path=True, only_files=True, walk=True, extensions=['.JPG', 'png'])\n self.assertEqual(sorted(result), sorted(need_result))\n result = listdir(dummy_folder, full_path=True, only_files=True, walk=True, extensions=('.JPG', 'png'))\n self.assertEqual(sorted(result), sorted(need_result))\n result = listdir(dummy_folder, full_path=True, only_files=True, walk=True, extensions={'.JPG', 'png'})\n self.assertEqual(sorted(result), sorted(need_result))", "def correct_naming(obsid, inst):\n cobsid = str(int(float(obsid)))\n if len(cobsid) == 5:\n return \n\n lobsid = mcf.add_leading_zero(obsid, 5)\n \n for sdir in ['secondary', 'analysis']:\n\n cmd = 'ls /data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/hrcf* >' + zspace\n os.system(cmd)\n\n data = mcf.read_data_file(zspace, remove=1)\n for ent in data:\n atemp = re.split('\\/', ent)\n fname = atemp[-1]\n mc = re.search(lobsid, fname)\n if mc is not None:\n continue\n else:\n atemp = re.split('hrcf', fname)\n btemp = re.split('_', atemp[1])\n sobs = btemp[0]\n new = fname.replace(sobs, lobsid)\n full = '/data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/' + new\n\n cmd = 'mv ' + ent + ' ' + full\n os.system(cmd)", "def FileNameToFile(files):\n files = files.replace('%20%28ja%29', '.ja')\n if files in up_list:\n if files == 'UserManual':\n return \"index.html\"\n elif files == 'UserManual.ja':\n return \"index.ja.html\"\n else:\n return files.lower() + \".html\"\n else: # modules\n sol = files.replace('.py', '').replace('%2F', '_')\n return 'modules/' + sol + '.html'", "def prep_filename_masks(mask:str)->(list,list):\n mask = mask.strip()\n if '\"' in mask:\n # Temporary replace all ' ' into \"\" to '·'\n re_binqu= re.compile(r'\"([^\"]+) ([^\"]+)\"')\n while re_binqu.search(mask):\n mask= re_binqu.sub(r'\"\\1·\\2\"', mask) \n masks = mask.split(' ')\n masks = [m.strip('\"').replace('·', ' ') for m in masks if m]\n else:\n masks = mask.split(' ')\n fi_masks= [m for m in masks if m and m[0]!='/']\n fo_masks= [m[1:] for m in masks if len(m)>1 and m[0]=='/']\n return (fi_masks, fo_masks)", "def _bulkRename(self, items: List[QModelIndex]) -> None:\n items = [self._currPath.joinpath(x.siblingAtColumn(0).data())\n for i, x in enumerate(items) if i % len(self._modelHeaders) == 0]\n items = list(filter(lambda x: x.is_file(), items))\n illegalChars = {'<', '>', ':', '\"', '/', '\\\\', '|', '?', '*'}\n while True:\n response, onlySelected, extension, prefix, startNumber = self._bulkRenameDialog()\n if not response:\n break\n if not items and onlySelected:\n self._bulkRenameMsgBox('No files were selected!')\n continue\n elif any((c in illegalChars) for c in prefix):\n self._bulkRenameMsgBox('Illegal characters in prefix!')\n continue\n fileFound = False\n for path in self._currPath.glob('*'):\n try:\n if extension is not None and extension != path.suffix.lstrip('.'):\n continue\n if path.is_file() and (not onlySelected or path in items):\n path.rename(self._currPath.joinpath(f'{prefix}{str(startNumber)}{path.suffix}'))\n startNumber += 1\n fileFound = True\n except FileExistsError:\n self._bulkRenameMsgBox(f'File {path.name} already exists!')\n self._statusBar.showMessage('Operation aborted!', 3000)\n return\n finally:\n self._listDirectories()\n if not fileFound:\n self._bulkRenameMsgBox('No suitable files in given directory!')\n continue\n break", "def get_files_match(matches, dname, Lshow, Ldir=False):\n matched_files=[]\n ### two for-loop is converted\n for fname in os.listdir(dname):\n #print(f\"{fname}\")\n for match in matches:\n if re.search(match, fname):\n ### if it is dir skip\n if os.path.isdir(fname) and not Ldir:\n continue\n if Lshow:\n print(f\"detect {fname}\") # in {match} {matches}\")\n matched_files.append(fname)\n return matched_files", "def namelist(self):\n\n # try to create a name from the archive name\n # because a gzipped file doesn't have information about the\n # original filename\n # gzipping a file creates the archive name by appending \".gz\"\n genericfilename = self._archivename\n\n if not genericfilename:\n genericfilename = \"generic.unknown.gz\"\n\n try:\n # get list of file extensions\n fileendinglist = Archivehandle.avail_archive_extensionlist4type['gz']\n replacedict = {\"wmz\": \"wmf\",\n \"emz\": \"emf\"}\n for ending in fileendinglist:\n endingwithdot = \".\"+ending\n if genericfilename.endswith(endingwithdot):\n if ending in replacedict:\n genericfilename = genericfilename[:-len(ending)]+replacedict[ending]\n else:\n genericfilename = genericfilename[:-len(endingwithdot)]\n break\n\n except Exception as e:\n print(e)\n pass\n return [genericfilename]", "def get_file_names_from_paths(list_of_file_paths):\n if list_ops.is_list_like(list_of_file_paths):\n list_of_file_names = [os.path.basename(this_path) for this_path in list_of_file_paths]\n return list_of_file_names\n else:\n file_name = os.path.basename(list_of_file_paths)\n return file_name", "def rename_files():\n folder_dir = r\"C:\\Users\\keithmoore1.AD\\Desktop\\HAFB\\prankOrig\"\n files = os.listdir(folder_dir)\n save_path = os.getcwd() # current working directory\n for file in files:\n #remove digits from name\n new_file = file.lstrip(\"0123456789\")\n print(file, \" - \", new_file)\n # rename filename\n os.chdir(folder_dir)\n os.rename(file,new_file)\n # get back home\n os.chdir(save_path)", "def get_glob_strings(subdirglob):\n dirname = path.dirname(subdirglob)\n basename = path.basename(subdirglob)\n assert (((\"_M1_\" in subdirglob) or (\"_M2_\" in subdirglob)) or (\"_S_\" in subdirglob)), \\\n (\"_M1_ or _M2_ not in subdirglob, cant differentiate between M1 and M2, aborting.\"\n f\"glob: {subdirglob}\")\n if (\"*\" not in subdirglob) and (\"_S_\" not in basename):\n newbasename = basename.replace(\"_M2_\", \"_M1_\"), basename.replace(\"_M1_\", \"_M2_\")\n return path.join(dirname, newbasename[0]), path.join(dirname, newbasename[1])\n elif (\"_M1_\" or \"_M2_\") in basename:\n newbasename = basename.replace(\"_M2_\", \"_M1_\"), basename.replace(\"_M1_\", \"_M2_\")\n return path.join(dirname, newbasename[0]), path.join(dirname, newbasename[1])\n elif \"_S_\" in basename:\n return basename", "def makeLinks(files, folderName='SimFiles'):\n\n from os import symlink\n from os import chdir\n\n groups = { 1 : [\"Cos0.5\",\"Cos0.7\"],\n 2 : [\"Cos0.6\",\"Cos0.9\"],\n 3 : [\"Cos0.8\",\"Cos1.0\"]}\n\n for filename in files:\n for group,angles in groups.iteritems():\n if any(x in filename for x in angles):\n chdir(folderName + str(group))\n symlink('../'+filename, filename)\n chdir('../')", "def dismemberFilename(myname, mode):\n if mode == 'learn':\n return [pathLeaf(myname).replace('__', '/'), -1]\n elif mode == 'old':\n filename_parts = myname.split('__') # ['s1', 's2', 's3', 'SHA.c']\n SHA_and_extension = filename_parts[-1].split('.') # ['SHA', 'c']\n return ['/'.join(filename_parts[:-1]) + '.' + SHA_and_extension[1], SHA_and_extension[0]]", "def registerWildKeepFiles(self):\n\n for card in self.keepFilePatterns:\n\n keepString = card['keepFileString'] # may contain subdirs\n infoString = card['infoString']\n displayColor = card['displayColor']\n\n keepStringTokens = keepString.strip().split('/')\n\n if len(keepStringTokens) == 1: # current dir keepfile pattern\n wildFiles = fnmatch.filter(os.listdir(self.runPath), keepString)\n [self.makeKeepFileEntry(file, infoString, displayColor) for file in wildFiles]\n\n elif len(keepStringTokens) > 1: # subdirs\n matches = ['']\n for pathPart in keepStringTokens:\n newMatches = []\n for match in matches:\n conts = os.listdir(os.path.join(self.runPath, match))\n newMatches.extend([os.path.join(match, f) for f in fnmatch.filter(conts, pathPart)])\n\n matches = newMatches\n [self.makeKeepFileEntry(m, infoString, displayColor) for m in matches]\n\n # now manually add the package configuration file to keep files\n self.makeKeepFileEntry(os.path.basename(self.confFile),\n \"Package XML test configuration file\",\n \"#cc3333\")", "def __str__(self):\n return ''.join(['(', ', '.join(self.filenamePatterns()), ')'])", "def change_file_name():\n path = \"/etc/atuned/webserver/\"\n file_list = os.listdir(path)\n file_list.sort(key=lambda fn: os.path.getmtime(path + fn))\n if len(file_list) > 0 and re.match(r'\\S*-\\d{17}\\S*', file_list[-1]) is None:\n old_name = file_list[-1].split(\".\")[0]\n curr_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n new_name = old_name + \"-\" + str(curr_time) + str(random.randint(100, 999))\n os.rename(path + old_name + \".txt\", path + new_name + \".txt\")", "def get_filtered_file_names_list(_file_names_list, _gender=None, _noise=None, _scale=None):\n _file_names_split_list = [re.split('[/_]+', fname) for fname in _file_names_list]\n\n if _gender:\n if type(_gender) == str:\n _gender = [_gender]\n _file_names_split_list = [f_name for f_name in _file_names_split_list if f_name[-3] in _gender]\n\n if _noise:\n if type(_noise) == str:\n _noise = [_noise]\n _file_names_split_list = [f_name for f_name in _file_names_split_list if f_name[-2] in _noise]\n\n if _scale:\n if type(_scale) == str:\n _scale = [_scale]\n _file_names_split_list = [f_name for f_name in _file_names_split_list if f_name[-1] in _scale]\n\n _file_names_list = ['_'.join(['/'.join(fname_split[:3]), fname_split[-2], fname_split[-1]])\n for fname_split in _file_names_split_list]\n\n return _file_names_list", "def glob1(self, dirname, pattern):\n names = self.listdir(dirname)\n if pattern[0] != '.':\n names = filter(lambda x: x[0] != '.',names)\n return fnmatch.filter(names, pattern)", "def control_fastq_filename(demux_folder):\n pattern=re.compile(\"^(P[0-9]+)-([0-9]{3,4}).+fastq.*$\")\n for root, dirs, files in os.walk(demux_folder):\n for f in files:\n matches=pattern.search(f)\n if matches:\n new_name=f.replace(\"{}-{}\".format(matches.group(1), matches.group(2)), \"{}_{}\".format(matches.group(1), matches.group(2)))\n os.rename(os.path.join(root, f), os.path.join(root, new_name))", "def getlistofpossibletitles(fileitem,shows):\n title = []\n title.append(fileitem)\n lookfor = fileitem.replace(\".\",\" \")\n title.append(lookfor)\n lookfor = fileitem.replace('-',\" \")\n title.append(lookfor)\n return title", "def sglob(files_pattern):\n return sorted(glob.glob(files_pattern))", "def expand(self, path_list):\n path_list2 = []\n for path in path_list:\n if glob.has_magic(path):\n iterator = glob.iglob(path)\n path_list2.extend(iterator)\n else:\n path_list2.append(path)\n return path_list2", "def get_filenames():\n filenames = []\n for filename in Path('.').glob('*.pdf'):\n if 'reordered' not in filename.stem:\n filenames.append(filename)\n\n return filenames", "def get_files_exclude(matches, dname):\n all_files=os.listdir(dname)\n save_list=[]\n imatch=0\n for match in matches:\n for fname in all_files:\n ### exclude dir once\n if imatch==0:\n if os.path.isdir(fname):\n save_list.append(fname)\n #print file\n continue\n if re.search(match, fname):\n save_list.append(fname)\n #print file\n imatch+=1 \n for fname in save_list:\n #print file\n all_files.remove(fname)\n return all_files", "def rename_file(source, oldname, newname):\n #source = client_variables.output_folder\n renamefiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for renamefile in renamefiles:\n if renamefile.endswith(ext):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)\n elif renamefile.startswith(oldname):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)", "def onApply(self, event):\n\n # Rename all of the files based on the substitution.\n for (old, new) in zip(self.m_diskNames, self.m_newNames):\n if old != new:\n old = os.path.join(self.m_curPath, old)\n new = os.path.join(self.m_curPath, new)\n try:\n os.rename(old, new)\n except OSError:\n pass\n\n # Now we out the lists so that what the user sees after this\n # reflects what's on disk.\n self.m_diskNames[:] = []\n self.m_newNames[:] = []\n\n # Update.\n self.updateDiskFileList()" ]
[ "0.62912023", "0.6283787", "0.61243826", "0.61238414", "0.6057", "0.6056427", "0.60261464", "0.6002019", "0.58697075", "0.58516043", "0.58399916", "0.5831643", "0.5815127", "0.5750396", "0.57445604", "0.57320064", "0.5677666", "0.56666535", "0.56408477", "0.5636326", "0.56356955", "0.5615281", "0.5612887", "0.56112707", "0.560815", "0.55919605", "0.5589841", "0.5588777", "0.5588556", "0.5584646", "0.5583549", "0.55819064", "0.556327", "0.55525696", "0.5544699", "0.55342376", "0.55209213", "0.54956305", "0.5460772", "0.5460412", "0.54362833", "0.5432537", "0.5432177", "0.5423222", "0.5421891", "0.54217064", "0.54210365", "0.54194397", "0.5417544", "0.541371", "0.54113317", "0.5409103", "0.5408975", "0.5407119", "0.5405549", "0.53986406", "0.5395834", "0.53880125", "0.538118", "0.5375873", "0.53736323", "0.536698", "0.53667206", "0.5365109", "0.5360787", "0.53597295", "0.53568214", "0.5353899", "0.5345918", "0.5342845", "0.5342384", "0.533519", "0.5330016", "0.53245026", "0.532254", "0.5316061", "0.5311979", "0.53036207", "0.5290314", "0.5287208", "0.5280796", "0.527897", "0.5271169", "0.52682465", "0.52653116", "0.5260359", "0.5225602", "0.52252245", "0.5224651", "0.5222648", "0.5221818", "0.5221472", "0.52193475", "0.5213235", "0.52089393", "0.5191563", "0.5184446", "0.51815593", "0.5179191", "0.5178183" ]
0.6573242
0
Handle the user changing directory.
def onDirectorySelectionChanged(self, event): newPath = self.m_directoryCtl.Path if self.m_curPath == newPath: return self.m_applyBtn.Disable() self.m_directoryCtl.ExpandPath(newPath) # Clear the directory list. self.m_fileList.Clear() self.m_curPath = newPath self.updateDiskFileList()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ChangeDir(self, path: str) -> None:\n ...", "def change_base_dir():\n global base_dir\n while True:\n new_base = raw_input(\"New user directory? \")\n new_base = os.path.abspath(new_base)\n if os.path.exists(new_base):\n if os.path.isfile(new_base):\n print(\"ERROR: there is an existing file with that name.\")\n continue\n # make sure user can read and write this directory\n if not os.access(new_base, os.R_OK | os.W_OK):\n print(\"ERROR: directory access restricted\")\n continue\n print(\"OK: using existing directory\")\n break\n else:\n try:\n os.mkdir(new_base, 0700)\n except Exception:\n print(\"ERROR: directory creation failed\")\n continue\n print(\"OK: Created new directory.\")\n break\n\n from textwrap import wrap\n msg = wrap(\"\"\"WARNING: Your user files will be created in the directory\n'%(new_base)s' you have chosen. To access these files, you will either have\nto use the argument \"-dir:%(new_base)s\" every time you run the bot, or set\nthe environment variable \"PYWIKIBOT2_DIR\" equal to this directory name in\nyour operating system. See your operating system documentation for how to\nset environment variables.\"\"\" % locals(), width=76)\n for line in msg:\n print line\n ok = raw_input(\"Is this OK? ([yes], [N]o) \")\n if ok in [\"Y\", \"y\"]:\n base_dir = new_base\n return True\n print \"Aborting changes.\"\n return False", "def change_dir(filename):", "def on_dir_change(self, event):\r\n\r\n if self.dir_change_callback is not None:\r\n self.dir_change_callback(event)\r\n event.Skip()", "def chdir(self):\r\n self.directory=tkf.askdirectory()", "def change_dir(path): \r\n os.chdir(path)", "def changeDirectory( self, directory ):\n if directory[0] == '/':\n directory = directory.lstrip( '/' )\n self.cwd = '%s/%s' % ( self.cwd, directory )", "def change_directory(path):\n os.chdir(path)", "def process_IN_ISDIR(self, event):", "def editdirectory(self):\n\n ## Have user select existing directory\n new_directory = str(QtGui.QFileDialog.getExistingDirectory(self, \"Select Directory\",\n '/home/lsst/Data/'))\n\n ## If return is not NULL, set the DATA_DIRECTORY and update filename\n if new_directory:\n\n try:\n os.makedirs(new_directory)\n except OSError:\n if not os.path.isdir(new_directory):\n self.logger.exception(\"An error occurred while creating a new directory.\")\n\n global DATA_DIRECTORY\n DATA_DIRECTORY = new_directory\n self.displaydirectory()\n self.logger.info(\"Data directory changed to {0}.\".format(new_directory))", "def on_dir_pick(self, event):\r\n\r\n directory = self.GetPath()\r\n if directory is None or not exists(directory) or not isdir(directory):\r\n directory = expanduser(\"~\")\r\n directory = dirpickermsg(_(\"Select directory to rummage\"), directory)\r\n if directory is None or directory == \"\":\r\n directory = None\r\n self.SetPath(directory)\r\n evt = DirChangeEvent(directory=directory)\r\n wx.PostEvent(self, evt)\r\n event.Skip()", "def change_dir(f_path):\n if os.path.isdir(f_path):\n os.chdir(f_path)\n logger.debug(\"filepath is a directory\")\n logger.debug(\"Changed directory to: %s\", f_path)\n else:\n os.chdir(os.path.dirname(f_path))\n logger.debug(\"filepath is a file\")\n logger.debug(\"Changed directory to: %s\", os.path.dirname(f_path))", "def to_dir_changed(self):\n text = self.to_dir.toPlainText().strip()\n if os.path.exists(text):\n sqlite.w('update settings set destination_path = (?) where id is 1', text)", "def changeDirectory(self, directory):\n self.pushMode(CLI_MODES.shell)\n output = self.sendCmd(\"cd %s\" % directory)\n self.popMode()\n if \"No such file or directory\" in output:\n logger.error (\"No such file or directory exist : %s\" %directory)\n return output", "def changeScanDirectoryCB(self, *args):\n trace.into('ProctorGUI', 'changeScanDirectoryCB', args=args)\n if self.state == 'idle':\n file_name = tkFileDialog.askopenfilename(\n title='Select a file in the directory to scan...')\n trace.writeVar(file_name=file_name)\n directory_name = os.path.dirname(file_name)\n trace.writeVar(directory_name=directory_name)\n if directory_name:\n prefix = os.path.commonprefix((self.scan_directory_parent, directory_name))\n remainder = directory_name[len(prefix)+1:]\n self.scan_directory.set(remainder)\n else:\n self.showError('Cannot rescan during a test run. Reset first.')\n trace.outof()\n return", "def change_dir(self):\n self.working_dir = self.state_frame[0]\n self.state = STATE_READ_LINE", "def changeCWD(self):\n new_dir = QtWidgets.QFileDialog.getExistingDirectory(self, \"New working directory\", os.getcwd())\n\n logging.debug(\"Changing directory: '%s'\", new_dir)\n\n if new_dir and os.path.isdir(new_dir):\n os.chdir(new_dir)\n self.updateCWD()", "def _onFolder(self, event):\n eventId = event.GetId()\n if eventId == self.btnexpt.GetId():\n defaultDirectory = self.textexpt.GetValue()\n else:\n defaultDirectory = self.textfold.GetValue()\n dialog = wx.DirDialog(self, 'Choose a directory', defaultDirectory)\n if dialog.ShowModal() == wx.ID_OK:\n if eventId == self.btnexpt.GetId():\n self.textexpt.SetValue(dialog.GetPath())\n else:\n self.textfold.SetValue(dialog.GetPath())", "def changeDirectory(self, directory):\n self._cwd = directory", "def chdir(self, directory):\n self.eval(\"cd('{0}')\".format(directory))", "def change_directory(self):\r\n dialog = QtWidgets.QFileDialog(self)\r\n dialog.setFileMode(QtWidgets.QFileDialog.Directory)\r\n if dialog.exec_():\r\n folder = dialog.selectedFiles()[0]\r\n self.tree.setRootIndex(self.model.index(folder))\r\n self.path_viewer.setText(folder)", "def magic_cd(self, parameter_s=''):\n \n ps = parameter_s.strip()\n if ps == '-':\n try:\n ps = self.user_ns['_dh'][-2]\n except:\n print 'No previous directory to change to.'\n return\n elif ps.startswith('-'):\n try:\n ps = self.user_ns['_dh'][\n int(ps.replace('-','').strip())]\n except:\n print 'Requested directory doesn not exist in history.'\n return\n if ps:\n try:\n os.chdir(os.path.expanduser(ps))\n except OSError:\n print sys.exc_info()[1]\n else:\n self.user_ns['_dh'].append(os.getcwd())\n \n else:\n os.chdir(self.home_dir)\n self.user_ns['_dh'].append(os.getcwd())\n print self.user_ns['_dh'][-1]", "def withDirectoryChange(path, allow_none=False):\n\n # spellchecker: ignore chdir\n\n if path is not None or not allow_none:\n old_cwd = os.getcwd()\n os.chdir(path)\n\n yield\n\n if path is not None or not allow_none:\n os.chdir(old_cwd)", "def _notebook_dir_changed(self, name, old, new):\n\t\tself.notebook_dir = new", "def on_modified(self, event):\n super(myEventHandler,self).on_modified(event)\n if event.is_directory:\n try:\n source = event.src_path\n dest = event.src_dest\n pathtoonedir = self.onedir.getonedirrectory()\n source = source.replace(pathtoonedir ,\"\")\n dest = dest.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(source, dest)\n except Exception as e:\n print e\n exit(1)\n else:\n source = event.src_path\n try:\n #use os.path.split to get file name and path\n splitpath = split(source)\n file = splitpath[1]\n if file.startswith('.'):\n return\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n exit(1)", "def on_dir_changed(self, event):\r\n\r\n if not self.searchin_update:\r\n pth = event.directory\r\n if pth is not None and exists(pth):\r\n self.searchin_update = True\r\n self.m_searchin_text.safe_set_value(pth)\r\n self.searchin_update = False\r\n event.Skip()", "def chown_dir ( self, fspath ):\n return", "def chdir(self, path):\n # temporarily join the specified directory to see if we have\n # permissions to do so\n basedir = os.getcwd()\n try:\n os.chdir(path)\n except os.error:\n raise\n else:\n os.chdir(basedir)\n self.cwd = self.fs2ftp(path)", "def _processNewDirectory(self, dirpath):\n self._parent.processDirectory(dirpath)", "def update_dir(self, new_dir):\n self.save_loc.setText(new_dir)", "def set_dir(text_field, pref_name, start_dir_callback, update_pref_callback, *args):\n start_dir = start_dir_callback(pref_name)\n\n # Prompt user with file dialog box.\n # If they don't provide any input, exit the function.\n directory = pm.fileDialog2(fileMode=2, dialogStyle=2,\n startingDirectory=start_dir)\n if not directory:\n return\n\n # Assign user input to the Program Directory Text field in the Mimic UI.\n pm.textField(text_field, edit=True, text=directory[0])\n if update_pref_callback:\n update_pref_callback(pref_name, directory[0])", "def setUnimacroUserDirectory(self, v):\n key = 'UnimacroUserDirectory'\n\n oldDir = self.getUnimacroUserDirectory()\n # v = os.path.normpath(os.path.expanduser(v))\n uuDir = self.isValidPath(v, wantDirectory=1)\n if uuDir:\n oldDir = self.isValidPath(oldDir, wantDirectory=1)\n if oldDir == uuDir:\n print(f'The UnimacroUserDirectory was already set to \"{uuDir}\", and Unimacro is enabled')\n return\n if oldDir:\n print(f'\\n-----------\\nChanging your UnimacroUserDirectory\\nConsider copying inifile subdirectories (enx_inifiles or nld_inifiles)\\n' \\\n 'from old: \"{oldDir}\" to the\\n' \\\n 'new UnimacroUserDirectory \"{uuDir}\"\\n--------\\n')\n self.userregnl.set(key, v)\n \n self.UnimacroUserDirectory = uuDir\n \n # clear this one, in order to refresh next time it is called:\n self.UnimacroGrammarsDirectory = None\n \n self.userregnl.delete('Old'+key)\n print(f'Enable Unimacro, and set UnimacroUserDirectory to {uuDir}')\n return\n mess = f'natlinkconfigfunctions, could not Enable Unimacro, and set the UnimacroUserDirectory to \"{v}\"'\n return mess", "def ask_path():\n\n file_opt = options = {}\n options['initialdir'] = 'User\\\\'\n options['parent'] = root\n options['title'] = 'Choose directory'\n\n # get pathname\n pathname = tk.filedialog.askdirectory(**file_opt)\n\n if pathname:\n Data.out_dir = pathname\n path_var.set(pathname)", "def set_directory(self, directory):\n\t\tself.edit.set_text(directory)", "def _changeScanDirectoryVariableCB(self, *args):\n trace.into('ProctorGUI', '_changeScanDirectoryCB', args=args)\n self._buildScanner()\n trace.outof()\n return", "def chdir(self, path):\n if not path:\n path = \"/\"\n elif not path.endswith(\"/\"):\n path = \"{}/\".format(path)\n res = self.get_cdmi(path)\n if res.ok():\n cdmi_info = res.json()\n # Check that object is a container\n if not cdmi_info[\"objectType\"] == CDMI_CONTAINER:\n return Response(406, u\"{0} isn't a container\".format(path))\n if cdmi_info[\"parentURI\"] == \"/\" and cdmi_info[\"objectName\"] == \"Home\":\n # root\n self._pwd = \"/\"\n else:\n self._pwd = \"{}{}\".format(\n cdmi_info[\"parentURI\"], cdmi_info[\"objectName\"]\n )\n return Response(0, \"ok\")\n else:\n return res", "def __post_init__(self) -> None:\n if self.is_directory and not self.path.endswith('/'):\n self.path += '/'", "def choose_directory():\n \n directory = askdirectory()\n chdir(directory)", "def select_dir(self):\n prev_val = self.var_path.get()\n if self.conf_dir == \"dir_app\" or self.conf_dir == \"urls\":\n dir_ = fd.askopenfilename(parent=self.parentframe,\n initialdir=Path.home()) or prev_val\n else:\n dir_ = fd.askdirectory(parent=self.parentframe,\n initialdir=Path.home()) or prev_val\n\n self.var_path.set(value=dir_)\n if dir_ != prev_val:\n conf[self.conf_dir] = dir_\n self.handle_modified()", "def _editHandler(self) -> None:\n if self._editItemType:\n self._createDir()\n else:\n self._renameDir()", "def output_dir_change_action(self):\n fileDialog = QFileDialog()\n directory = fileDialog.getExistingDirectory()\n self.dynamic.output_directory.setText(directory)", "def browse_directories(self,event):\n dlg = wx.DirDialog(self, \"Choose a directory:\",\n style=wx.DD_DEFAULT_STYLE|wx.DD_NEW_DIR_BUTTON)\n if dlg.ShowModal() == wx.ID_OK:\n self.path_txtBox.SetValue(str(dlg.GetPath()))\n dlg.Destroy()", "def chdir(where):\n from twill import commands\n \n cwd = os.getcwd()\n _dirstack.append(cwd)\n print(cwd)\n\n os.chdir(where)\n print('changed directory to \"%s\"' % (where,), file=commands.OUT)\n\n commands.setglobal('__dir__', where)", "def update_path():\n #TODO update path information\n pass", "def chdir_in_and_out(request, path):\n oldWorkDirStr = str(local.cwd)\n workDir = local.cwd\n workDir.chdir(path)\n request.addfinalizer(lambda: workDir.chdir(oldWorkDirStr))\n return type(\"\", (), {\"oldWorkDirStr\": oldWorkDirStr})", "def change_dir_without_context_manager(filename1, filename2):", "def go_to(dir):\n work = \"/home/prm/Desktop/optical/optical/CAHA/cig96_jun16/\" + dir\n os.chdir(work)\n #print \"Work/save directory:\", work", "def change_dir(destination):\n cwd = os.getcwd()\n try:\n os.chdir(destination)\n yield\n finally:\n os.chdir(cwd)", "def open_folder(self, event):\n if self.advancedMenu:\n self.advancedMenu.Show(False)\n home = os.path.expanduser('~')\n c = config.Config()\n panda = None\n if c.username:\n # try for full path if there is a username\n panda = os.path.join(home, 'Digital Panda', c.username)\n if not os.path.exists(panda):\n # if the path doesn't exist - reset\n panda = None\n if not panda:\n # get base folder (without acccount)\n panda = os.path.join(home, 'Digital Panda')\n if not os.path.exists(panda):\n try:\n os.makedirs(panda)\n except:\n print \"TODO: need to handle folder creation failure!\"\n open_folder(panda)", "def do_cwd(ftp):\n new_dir = raw_input(\"What directory do you want to change to?\\n> \")\n try:\n output = ftp.cwd_cmd(new_dir)\n if get_ftp_server_code(output) == FTP_STATUS_CODES[\"SUCCESSFUL_CWD\"]:\n print(\"Successfully changed directory\\n\")\n else:\n print(\"Invalid directory or insufficient permissions.\\n\")\n except Exception as e:\n print(\"An error has occurred: \" + str(e) + \"\\nPlease try again.\")\n return main_menu(ftp)\n main_menu(ftp)", "def test_changedir(self):\n self.dot_case.chdir('.')\n self.assertEqual(self.dot_case.__repr__(), '/a/b/c/./../../g/.')\n self.dot_case.chdir('..')\n self.assertEqual(self.dot_case.__repr__(), '/a/b/c/./../../g/./..')\n \n self.dot_case = uri.URI(path='/a/b/c/./../../g')\n self.dot_case.chdir('./')\n self.assertEqual(self.dot_case.__repr__(), '/a/b/c/./../../g/./')\n self.dot_case.chdir('/../')\n self.assertEqual(self.dot_case.__repr__(), '/a/b/c/./../../g/./../')", "def update_dir(self, key):\r\n\r\n \r\n if key == K_UP:\r\n self.last_dir = 'u' \r\n elif key == K_DOWN:\r\n self.last_dir = 'd'\r\n elif key == K_RIGHT:\r\n self.last_dir = 'r' \r\n elif key == K_LEFT:\r\n self.last_dir = 'l' \r\n pass", "def f_chdir(my_dir):\n if not os.path.isdir(my_dir):\n os.mkdir(my_dir)\n os.chdir(my_dir)", "def change_dir(new_dir):\n old_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield os.getcwd()\n finally:\n os.chdir(old_dir)", "def change_dir(dir, logger, throw_exception=True):\n\n logger.debug('ChgDir: '+dir+' (from '+str(os.getcwd())+')')\n\n status = os.chdir(dir)\n\n if status:\n if throw_exception:\n raise StopError('Problem changing to directory '+dir)\n else:\n logger.error('Problem changing to directory '+dir)", "def chdir(self):\n try:\n old = self.__class__()\n except error.ENOENT:\n old = None\n error.checked_call(os.chdir, self.strpath)\n return old", "def set_working_folder():\n username = getpass.getuser()\n osType = sys.platform\n if username.lower() == 'youval':\n if osType.startswith('win'):\n dr = r'C:\\Phenix\\Dev\\Work\\work\\Clashes\\wtest'\n else:\n dr = '/net/cci/youval/work/work/Clashes/wtest'\n os.chdir(dr)", "def change_dir(self, src: str = None, dest: str = None):\n\n if not is_empty(src):\n self._srcDir = src\n\n if not is_empty(dest):\n self._destDir = dest", "def user_directories():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('User Directories', level=1)\r\n userdirectories = get_qlik_sense.get_userdirectory()\r\n num_of_udc = len(userdirectories)\r\n table = document.add_table(rows=num_of_udc+1, cols=6)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n row.cells[1].text = 'userDirectoryName'\r\n row.cells[2].text = 'configured'\r\n row.cells[3].text = 'operational'\r\n row.cells[4].text = 'type'\r\n row.cells[5].text = 'syncOnlyLoggedInUsers'\r\n for directory in range(num_of_udc):\r\n row = table.rows[directory+1]\r\n row.cells[0].text = str(userdirectories[directory][0])\r\n row.cells[1].text = str(userdirectories[directory][1])\r\n row.cells[2].text = str(userdirectories[directory][2])\r\n row.cells[3].text = str(userdirectories[directory][3])\r\n row.cells[4].text = str(userdirectories[directory][4])\r\n row.cells[5].text = str(userdirectories[directory][5])\r\n\r\n # document.add_page_break()\r", "def restrict_dir(self, dir):\n if dir != self.dir:\n self.fileDialog.setDirectory(self.dir)", "def chdir(new_dir):\n cur_dir = os.getcwd()\n # This is weird behavior. I'm removing and and we'll see if anything breaks.\n #safe_makedir(new_dir)\n os.chdir(new_dir)\n try:\n yield\n finally:\n os.chdir(cur_dir)", "def cd_manager(self, new_wd):\n old_wd = self.cwd\n self.cd(new_wd)\n yield self.cwd\n self.cd(old_wd)", "def onBrowse_owp(self, event):\n dlg = wx.DirDialog(self, \"Choose a directory:\",\n style=wx.DD_DEFAULT_STYLE\n )\n if self.m_weight_path == \"\":\n default_path = settings.LAMBDA_DATA_FOLDER\n else:\n default_path = self.m_weight_path\n\n dlg.SetPath(default_path)\n\n if dlg.ShowModal() == wx.ID_OK:\n self.m_weight_path = dlg.GetPath()\n self.m_textCtrl_output_weight_path.write(self.m_weight_path)\n dlg.Destroy()\n\n self.onModelWeightNameChange()", "def use_dir(new_dir):\n owd = os.getcwd()\n os.chdir(new_dir)\n\n try:\n yield\n finally:\n os.chdir(owd)", "def updateCWD(self):\n dirname = os.getcwd()\n\n self.currentDirectoryLabel.setText(\"CWD: '%s'\" % dirname)\n self.imageViewer.changeDir(dirname)", "def cd(self, directory):\n return ChangeDir(directory)", "def _checkpoint_dir_changed(self, name, old, new):\n\t\tif not new.endswith('/'):\n\t\t\tnew += '/'\n\n\t\tif not key_exists(self.bucket, new):\n\t\t\tself.log.debug(\"Creating checkpoint dir %s\", new)\n\t\t\ttry:\n\t\t\t\t# TODO: Create empty key instead of empty file(?)\n\t\t\t\tnew_key_from_string(self.bucket, new, '')\n\t\t\texcept:\n\t\t\t\traise TraitError(\"Couldn't create checkpoint dir %r\" % new)\n\n\t\tself.checkpoint_dir = new", "def homeDirectory(self, ignored_value):\n\t\tself.__homeDirectory = self._resolve_home_directory()", "def browse_folder(self, subdir=\".\"):\n if self.show_save_action:\n self.ui_Action.setEnabled(True)\n if self.show_dirs_only:\n self.ui_Action.setEnabled(True)\n self.ui_DirList.clear()\n if subdir == \".\":\n _sub_dir = self.active_url\n else:\n _sub_dir = subdir\n if len(self.directory_history) == 0:\n self.directory_history.append(_sub_dir)\n for item in reversed(self.directory_history):\n self.ui_DirList.addItem(item)\n self.ui_DirList.setCurrentIndex(self.last_dir_index)", "def set_path(self, directory):\n self.directory = directory", "def cd(newdir):\n prevdir = os.getcwd()\n os.chdir(os.path.expanduser(newdir))\n try:\n yield\n finally:\n os.chdir(prevdir)", "def _enter_folder(self, event):\n del event\n if Path(self.txt_path.value).is_dir():\n self._open_folder(tgt_folder=self.txt_path.value)", "def chdir(self):\n if not self.mru_exp_data:\n logging.error(\"data directory not set for prototxt db\")\n else:\n with suppress_errors():\n # Prototxt may depend on data path\n data_dir = op.join(self.mru_exp_data, \"data\")\n assert op.isdir(data_dir), \"No 'data' directory found in {}\".format(self.mru_exp_data)\n os.chdir(self.mru_exp_data)", "def dir_user(assignment, user):\n return os.path.join(repository, assignment, user)", "def chdir(new_dir):\n cur_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield\n finally:\n os.chdir(cur_dir)", "def chdir(new_dir):\n cur_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield\n finally:\n os.chdir(cur_dir)", "def ask_path():\n\n file_opt = options = {}\n options['initialdir'] = 'User\\\\'\n options['parent'] = root\n options['title'] = 'Choose directory'\n\n # get path name\n pathname = tk.filedialog.asksaveasfilename(**file_opt)\n\n if pathname:\n Data.out_dir = pathname\n path_var.set(pathname)", "def ask_path():\n\n file_opt = options = {}\n options['initialdir'] = 'User\\\\'\n options['parent'] = root\n options['title'] = 'Choose directory'\n\n # get path name\n pathname = tk.filedialog.asksaveasfilename(**file_opt)\n\n if pathname:\n Data.out_dir = pathname\n path_var.set(pathname)", "def directoryModifiedHandler(ob, event):\n query = dict(object_provides=IEntry.__identifier__)\n for l in ob.restrictedTraverse('@@folderListing')(**query):\n l.getObject().reindexObject(idxs=[\"pdir_keywords\"])", "def set_dir(dir_type, path):\n\n\t\tif dir_type != 'source_dir' and dir_type != 'lyrics_dir':\n\t\t\tprint('Invalid \"dir_type\". Only \"source_dir\" or \"lyrics_dir\" are valid types.')\n\t\t\tprint('You gave \"dir_type\":', dir_type)\n\t\t\tprint('use \"lyrico --help\" to view commands.')\n\t\t\treturn False\n\n\t\t# If user is setting \"source_dir\", return if the path provided does not exist.\n\t\t# This improves the usage - lyrico <source_dir>\n\t\tif dir_type == 'source_dir':\n\t\t\tif not os.path.isdir(path):\n\t\t\t\tprint('\"source_dir\" does not exist. ', end=\"\")\n\t\t\t\tprint('You gave \"source_dir\":', path)\n\t\t\t\tprint('Please enter path to an existing folder.')\n\t\t\t\treturn False\n\t\t\tConfig.source_dir = path\n\t\t# make directory if user is setting \"lyrics_dir\" and it does not exists.\n\t\t# Refer http://stackoverflow.com/a/14364249/2426469\n\t\t# elif dir_type == 'lyrics_dir':\n\t\t# \ttry:\n\t\t# \t\tos.makedirs(path)\n\t\t# \t\tprint('Directory does not exist. Creating new one.')\n\t\t# \texcept OSError:\n\t\t# \t\tif not os.path.isdir(path):\n\t\t# \t\t\t# this exception is handled by function calling set_dir\n\t\t# \t\t\traise\n\t\t# \tConfig.lyrics_dir = path\n\n\t\t# print(dir_type, 'updated.')\n\t\tif dir_type == 'source_dir':\n\t\t\tprint('Lyric Grabber will scan the following folder for audio files:')\n\t\t# else:\n\t\t# \tprint('lyrico will save lyrics files in the following folder:')\n\t\tprint(' ', path)\n\t\treturn True", "def syncfolder():", "def set_lang_path(self, new_lang_path):\n\n sql = \"UPDATE Users SET current_lang_path = ? WHERE username = ?\"\n self.conn.execute(sql, (new_lang_path, self.username))\n self.conn.commit()", "def chdir(path):\n\tos.chdir(path)\n\tsyntax = \"cd '%s'.\" % path\n\tif __debug__:\n\t\tprint syntax\n\tspss.Submit(syntax)", "def chdir(new_dir):\n cur_dir = os.getcwd()\n # FIXME: currently assuming directory exists\n safe_makedir(new_dir)\n os.chdir(new_dir)\n try:\n yield\n finally:\n os.chdir(cur_dir)", "def on_created(self, event):\n super(myEventHandler,self).on_created(event)\n #not syncing empty directories serverside atm\n if self.onedir.cookies is None or not self.onedir.autosyncstatus():\n return\n source = event.src_path\n if event.is_directory:\n try:\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = source.replace(pathtoonedir ,\"\")\n self.onedir.senddirectory(relpath)\n except Exception as e:\n print \"Error syncing directory\" + e\n exit(1)\n else:\n source = event.src_path\n try:\n #use os.path.split to get file name and path\n splitpath = split(source)\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n exit(1)", "def ch_dirDialog(self):\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n dir_path = QFileDialog.getExistingDirectory(self, \"Выбор папки\", os.path.expanduser(\"~\"))\n if dir_path:\n self.full_ed_lines[2].setText(dir_path + '/')\n self.change_data()\n print(dir_path)", "def change_to_current_path(to_change_path):\n os.chdir(to_change_path)", "def cd_up(self):\n parts = self.cwd.split(\"\\\\\")\n self.cwd = \"\"\n for i in parts[:-1]:\n self.cwd += i + \"\\\\\"\n self.cwd = self.cwd[:-1]", "def onBrowse(self, event):\n data_folder = \"\"\n\n dlg = wx.DirDialog(self, \"Choose a directory:\",\n style=wx.DD_DEFAULT_STYLE\n )\n default_path = self.m_textCtrl_searchfolder.GetValue()\n if default_path != '':\n default_path = os.path.dirname(default_path)\n dlg.SetPath(default_path)\n\n if dlg.ShowModal() == wx.ID_OK:\n self.m_textCtrl_searchfolder.Clear()\n\n self.m_textCtrl_searchfolder.write(dlg.GetPath())\n self.m_search_folder = dlg.GetPath()\n dlg.Destroy()", "def setDNSIniDir(self, new_dir):\n key = 'DNSIniDir'\n if os.path.isdir(new_dir):\n # check INI files:\n nssystem = os.path.join(new_dir, self.NSSystemIni)\n nsapps = os.path.join(new_dir, self.NSAppsIni)\n if not os.path.isfile(nssystem):\n mess = 'folder %s does not have the INI file %s'% (new_dir, self.NSSystemIni)\n print(mess)\n return mess\n if not os.path.isfile(nsapps):\n mess = 'folder %s does not have the INI file %s'% (new_dir, self.NSAppsIni)\n print(mess)\n return mess\n self.userregnl.set(key, new_dir)\n self.userregnl.delete(\"Old\"+key)\n self.getDNSIniDir(force=1)\n return # OK\n else:\n mess = \"setDNSIniDir, not a valid directory: %s\"% new_dir\n print(mess)\n return mess # signify an error...", "def open_file_directory(self):\r\n index = self.tree.currentIndex()\r\n file_path = self.model.filePath(index)\r\n if Path(file_path).is_dir():\r\n self.set_new_path(file_path)\r\n else:\r\n try:\r\n os.startfile(file_path)\r\n except Exception as e:\r\n QtWidgets.QMessageBox.critical(self, \"File Error\", \r\n \"The system cannot open this file:\\n\\n{}\".format(repr(e)))", "def inside_dir(dirpath):\n old_path = os.getcwd()\n try:\n os.chdir(dirpath)\n yield\n finally:\n os.chdir(old_path)", "def chdir(self) -> None:\n os.chdir(self.analysis.get_bam_dir())", "def select_dir(self):\n dir_loc = QtGui.QFileDialog.getExistingDirectory()\n self.update_dir(dir_loc)", "def _renameDir(self) -> None:\n try:\n path = self._currPath.joinpath(self._editItemNameBefore)\n nameAfter = self._editItem.text()\n pathTo = self._currPath.joinpath(nameAfter)\n path.rename(pathTo)\n self._listDirectories()\n renamedItem = self._model.findItems(nameAfter)\n index = self._model.indexFromItem(renamedItem[0])\n self._mainFileView.scrollTo(index)\n self._mainFileView.setCurrentIndex(index)\n except FileExistsError:\n self._statusBar.showMessage('File/folder with that name already exists!', 3000)\n self._listDirectories()", "def proj_set_directory(self, isTyped, widgetIndex):\r\n if isTyped == True:\r\n newPath = self.widgetList[widgetIndex].get()\r\n else:\r\n newPath = tkFileDialog.askdirectory(**self.dir_opt)\r\n kT.debug_log('New path: ' + newPath)\r\n if newPath != '':\r\n self.widgetList[widgetIndex].delete(0, END)\r\n self.widgetList[widgetIndex].insert(0, newPath)\r\n return", "def _dir(self):\n logger.debug(\"Popping Dir browser\")\n return filedialog.askdirectory(**self._kwargs)", "def SetPath(self, directory):\r\n\r\n if directory is not None and exists(directory) and isdir(directory):\r\n self.directory = directory", "def changeSavedFolder(self):\n sfl = QFileDialog()\n sfl.setFileMode(QFileDialog.Directory)\n foldName = sfl.getExistingDirectory()\n fixfoldName = foldName + \"/\"\n if fixfoldName:\n self.newFolderPath.setText(fixfoldName)", "def from_dir_changed(self):\n text = self.from_dir.toPlainText().strip()\n if os.path.exists(text):\n sqlite.w('update settings set source_path = (?) where id is 1', text)\n all_files = self.get_all_files_from_path(text, extension='PDF')\n self.pdf_files = self.make_all_files_dictionary(all_files)\n\n if not self.pdf_files:\n return\n\n self.reset_widgets(all=True)\n self.draw_pdf_files()" ]
[ "0.71233064", "0.688778", "0.67881495", "0.6747246", "0.6743474", "0.6619424", "0.6606413", "0.6544958", "0.6513183", "0.6458206", "0.64284754", "0.6231321", "0.62166995", "0.6192592", "0.6178349", "0.6172817", "0.6166601", "0.6157614", "0.6141804", "0.6134251", "0.6129112", "0.6114343", "0.60664964", "0.5998323", "0.5990556", "0.598914", "0.59788096", "0.5970929", "0.5946915", "0.59372175", "0.5931576", "0.5926886", "0.59215677", "0.5885916", "0.58618426", "0.5857172", "0.58527356", "0.58423334", "0.5822792", "0.5807392", "0.5791943", "0.5730282", "0.57175344", "0.5715611", "0.5715036", "0.56938756", "0.5670874", "0.5624546", "0.56201166", "0.56170946", "0.56146985", "0.5604345", "0.55955684", "0.5587127", "0.5583423", "0.5576807", "0.5575129", "0.55741817", "0.55706674", "0.55654025", "0.5560615", "0.55488855", "0.55458623", "0.5542071", "0.5541619", "0.5538707", "0.5537274", "0.55332226", "0.5531545", "0.5524997", "0.55207396", "0.55057365", "0.5494485", "0.54704875", "0.5466597", "0.5466597", "0.5456537", "0.5456537", "0.545238", "0.54502726", "0.54463375", "0.544467", "0.5443036", "0.5441511", "0.5429554", "0.5427264", "0.5425572", "0.5408881", "0.5401955", "0.54016936", "0.5396956", "0.53871286", "0.5376917", "0.536897", "0.53675705", "0.5364895", "0.53572404", "0.5350384", "0.5345378", "0.5342139" ]
0.57040215
45
User has clicked the Apply button.
def onApply(self, event): # Rename all of the files based on the substitution. for (old, new) in zip(self.m_diskNames, self.m_newNames): if old != new: old = os.path.join(self.m_curPath, old) new = os.path.join(self.m_curPath, new) try: os.rename(old, new) except OSError: pass # Now we out the lists so that what the user sees after this # reflects what's on disk. self.m_diskNames[:] = [] self.m_newNames[:] = [] # Update. self.updateDiskFileList()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dlg_apply(self):\n pass # override", "def on_okButton_clicked(self):\n self.accept=True", "def apply(event: EventType, widget: WidgetType) -> bool:\n return event.key == KEY_APPLY", "def on_apply_clicked(self,button):\n\t\tdialog = ConfirmPerformActions()\n\t\t\n\t\tresponse = dialog.run()\n\n\t\tif response == Gtk.ResponseType.OK:\n \n\t\t\tdialog.destroy()\n\t\t\tself.list_partitions.perform_actions()\n\t\t\t\n\t\telif response == Gtk.ResponseType.CANCEL:\n\t\t\tdialog.destroy()", "def ok(self, event=None):\n if not self.validate():\n self.initial_focus.focus_set()\n return\n\n self.withdraw()\n self.update_idletasks()\n self.apply()\n self.cancel()", "def on_ok(self, event=None):\r\n self.on_save()\r\n # if not self.validate():\r\n # self.initial_focus.focus_set() # put focus back\r\n # return\r\n # self.withdraw()\r\n # self.update_idletasks()\r\n self.on_cancel()", "def doHandleApply(self, action):\n data, errors = self.extractData()\n if errors:\n self.status = self.formErrorsMessage\n return False\n changes = self.applyChanges(data)\n if changes:\n self.status = self.successMessage\n else:\n self.status = self.noChangesMessage\n return True", "def okay_button2(self, i):\n if i.text() == \"OK\":\n print(\"Entry overwritten\")", "def okay_button1(self, i):\n if i.text() == \"OK\":\n print(\"Entry added\")", "def push_button_ok_clicked(self) -> None:\n if self.save():\n self.close()", "def jao_approve(self):\n print \"JAO approved this form. Current state:\", self.state", "def askAssay(self):\n self.btnAdd.setEnabled(False)\n self.wantAssay.emit()", "def on_ok_click(self):\r\n\t\t# check once more that the paths in the line edit are valid\r\n\t\tvalid_paths = self.check_paths()\r\n\t\tif valid_paths:\r\n\t\t\tif self.existing_case:\r\n\t\t\t\tif self.filestate.get_source_db_filename() != self.filestate.get_sink_db_filename():\r\n\t\t\t\t\tvalid_paths = display_yes_no_message(self,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"Sink database is different from source database. Any new \"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"annotations you add will be added to the sink database \"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"only. Proceed?\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tvalid_paths = display_yes_no_message(self,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"Sink database is the same as source database. Source \"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"database will be modified. Proceed?\")\r\n\t\t\telif self.filestate.sink_db_file_preexists:\r\n\t\t\t\tvalid_paths = display_yes_no_message(self,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t \"Sink database already exists and will be cleared of any table \"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t \"named \\'annotations\\' before being used. Proceed?\")\r\n\r\n\t\tif valid_paths: # still\r\n\t\t\tself.hide()\r\n\t\t\tself.app = Application(filestate=self.filestate, parent=self, existing_case=self.existing_case)\r\n\t\t\tself.app.showMaximized()", "def apply_all(self):\n\n print(\"Are you sure? Enter 'y' if so\")\n\n if input() == 'y':\n\n for job in self.old_jobs:\n if job.is_relevant:\n job.reject('a') # 0 for apply\n self.jobs_save(self.old_jobs, 'overwrite')\n print('All relevant jobs have been marked as applied')\n\n else:\n print('returning to main menu')", "def ok(self, _=None):\r\n\r\n if not self.validate():\r\n self.initial_focus.focus_set() # put focus back\r\n return\r\n self.withdraw()\r\n self.update_idletasks()\r\n self.temp.set(True) # set boolean variable temp equal to True\r\n self.apply()\r\n self.parent.focus_set()\r\n self.destroy()", "def ok(self, event=None):\n\t\tself.withdraw()\n\t\tself.update_idletasks()\n\t\tself.result = self.provider.apply()\n\t\tself.parent.focus_set()\n\t\tself.destroy()", "def on_update_after_submit(self):\n if self.get('update_request') and not self.is_pending_approval():\n if self.is_revert:\n self.set_as_reverted()\n else:\n self.set_as_success()", "def __updateOK(self):\n enabled = True\n if self.idButton.isChecked():\n enabled = self.idEdit.text() != \"\"\n elif self.tagButton.isChecked():\n enabled = self.tagCombo.currentText() != \"\"\n elif self.branchButton.isChecked():\n enabled = self.branchCombo.currentText() != \"\"\n elif self.remoteBranchButton.isChecked():\n enabled = self.remoteBranchCombo.currentText() != \"\"\n \n enabled &= (self.commitGroupBox.isChecked() and\n self.commitMessageEdit.toPlainText() != \"\")\n \n self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enabled)", "def click_apply_keywords_button(self):\n self._basket.click_apply_keywords_button()", "def after_successful_edit(self):\n pass", "def hod_approve(self):\n print \"HOD approved this form. Current state:\", self.state", "def _clicked_yes_button(self):\n self.yes = True", "def OnButtonAboutOKButton(self, event):\r\n\t\tself.OnButtonOKButton()", "def buttonOK_Clicked( self, event ):\n\t\tself.EndModal(wx.ID_OK)", "def dr_approve(self):\n print \"DR approved this form. Current state:\", self.state", "def apply_to_job(self, job_listing):\r\n\r\n print('\\n')\r\n self.job_listing = job_listing\r\n print('You are applying to: ', self.job_listing.text)\r\n #apply_or_discard = input('Do you want to apply for this job? Please enter Yes or No: ')\r\n\r\n #if 'yes' in apply_or_discard.lower():\r\n try:\r\n self.driver.implicitly_wait(3)\r\n apply_button = self.driver.find_element_by_xpath(\"//div[@class='jobs-apply-button--top-card']\")\r\n apply_button.click()\r\n except NoSuchElementException:\r\n print('You have already applied to this position.')\r\n pass\r\n time.sleep(2)\r\n\r\n try:\r\n self.driver.implicitly_wait(3)\r\n submit_application = self.driver.find_element_by_xpath('//button[@aria-label=\"Submit application\"]')\r\n submit_application.click()\r\n except NoSuchElementException:\r\n print('This is not an Easy Apply position. Moving on to next role...')\r\n discard_application = self.driver.find_element_by_xpath('//button[@aria-label=\"Dismiss\"]')\r\n discard_application.click()\r\n self.driver.implicitly_wait(2)\r\n confirm_discard = self.driver.find_element_by_xpath('//button[@data-test-dialog-primary-btn]')\r\n confirm_discard.click()\r\n pass\r\n #else:\r\n #pass\r", "def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True", "def on_ConfirmWalletOP_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def OnButtonOptionsOKButton(self, event):\r\n\t\tself.OnButtonOKButton()", "def checkChanged(self):\n if len(self.undoStack):\n self.msg = messagebox.askyesnocancel('Save Data?',\n 'Game is not saved. Save it?')\n if self.msg == None:\n return 'cancel'\n elif self.msg == 'yes':\n self.save()\n return 'yes'\n else:\n return 'no'", "def confirm_apply(request, pk):\n prop = get_object_or_404(Project, pk=pk)\n if Application.objects.filter(Q(Project=prop) & Q(Student=request.user)).exists():\n return render(request, \"base.html\", context={\n \"Message\": \"You already applied to this project.\",\n \"return\": 'students:list_applications',\n })\n return render(request, \"students/apply.html\", context={\n \"project\": get_object_or_404(Project, pk=pk),\n })", "def ask_dirty(self):\n if not self.unsaved: return True\n if QtWidgets.QMessageBox.question(self, \"Are you sure\", \"There are unsaved changes in the file you sure\",\n\t\t\tQtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) != QtWidgets.QMessageBox.Yes:\n return False\n return True", "def popup(self):\r\n return self.exec_() == QDialog.Accepted", "def on_buttonBox_accepted(self):\n if len(self.lineInput.text()) == 0:\n self.reject()\n else:\n self.input = self.lineInput.text() \n self.accept()", "def present_exit_massage(self):\n print(\"Thank you for using the calculator....\")", "def cb_something_1(self, button):\n print(\"Do Something 1\")", "def cb_something_3(self, button):\n print(\"Do Something 3\")", "def onOkButtonClick(self, event):\n manager = self.getManager()\n if manager:\n manager.save()\n\n self.EndModal(wx.ID_OK)\n event.Skip()", "def okClicked(self):\n try:\n self.enablePackage()\n except MissingPackage:\n debug.critical(\"The controlflow package is not available\")\n return\n\n # Verify that at least one input and one output have been chosen\n input_ports_info = self.getInputPortsInfo()\n output_ports_info = self.getOutputPortsInfo()\n if len(input_ports_info) == 0:\n show_info('No Input Ports Selected', 'No Input Ports have been selected. You must select at least one to proceed.')\n elif len(output_ports_info) == 0:\n show_info('No Output Port Selected', 'No Output Port has been selected. You must select one to proceed.')\n else:\n self.createControlFlow(input_ports_info, output_ports_info)\n self.close()", "def on_update(self):\n if self.get('update_request') and not self.is_pending_approval():\n if self.is_revert:\n self.set_as_reverted()\n else:\n self.set_as_success()", "def on_accept(self, update, _context):\n self.updater.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"Alege timpul\",\n reply_markup=InlineKeyboardMarkup(k.build_dynamic_keyboard_first_responses()),\n )", "def OnButtonClick(self):\n self.choice()", "def OnButtonRateOKButton(self, event):\r\n\t\tself.OnButtonOKButton()", "def approve_changes(self):\n name1 = self.device_1_box.displayText()\n name2 = self.device_2_box.displayText()\n\n port1 = self.com_port_select_1.currentText()\n port2 = self.com_port_select_2.currentText()\n # Checkbox not enabled\n if not self.default_vals.isChecked():\n if name1 == name2:\n msg_header = \"Name error\"\n if name1 == '':\n msg = \"Please name your thermocouples or use default values\"\n else:\n msg = f\"Both thermocouples are named {name1}.\\n\" \\\n f\"Please make sure they have different names\"\n\n QMessageBox.critical(self, msg_header, msg, QMessageBox.Ok)\n\n return\n\n if port1 == port2:\n msg_header = f\"Port Error\"\n msg = f\"Both thermocouples are assigned to the same port {port1}.\\n\" \\\n f\"Please assign them different ports\"\n if port1 == '':\n msg = \"No ports were assigned. Please connect a controller to the USB port\"\n\n QMessageBox.critical(self, msg_header, msg, QMessageBox.Ok)\n\n return\n\n answer = QMessageBox.question(\n None, \"Approve changes\",\n \"Are you sure you want to proceed with these changes?\",\n QMessageBox.Ok | QMessageBox.Cancel\n )\n if answer & QMessageBox.Ok:\n if not self.default_vals.isChecked():\n name_and_port1 = f\"{port1}-{name1}-\"\n name_and_port2 = f\"{port2}-{name2}-\"\n\n self.tc_port_couple = [name_and_port1, name_and_port2]\n else:\n self.tc_port_couple = ['', '']\n\n print(self.tc_port_couple)\n self.close()\n elif answer & QMessageBox.Cancel:\n pass", "def on_pushButton_clicked(self):\r\n # TODO: not implemented yet\r\n print 1", "def cb_something_4(self, button): \n print(\"Do Something 4\")", "def _confirm_action(self, action):\n\t\treturn True", "def _confirm(self) -> None:\n\n self.__series.title = self._getTitleFromView()\n\n if len(self.__series.data) == 0:\n self._showMessage(\"Invalid data. No data selected.\")\n return\n\n self._result = DialogResult.Ok\n self._close()", "def apply(self):\n self.name = self.name_box.get()\n self.url = self.url_box.get()\n self.cancelled = False", "def __onConfirmYes(self):\n self.__confDlg.accept()\n # i = 0\n for layer in self.canvas().layers():\n # start = time.time()\n if layer.type() == QgsMapLayer.VectorLayer and layer.geometryType() in self.types:\n if layer.selectedFeatureCount() > 0 and layer.id() not in self.disabled():\n ids = \"(\"\n c = False\n for f in layer.selectedFeatures():\n if c:\n ids += \",\"\n else:\n c = True\n ids += str(f.id())\n ids += \")\"\n # tableDlg = AttributesTableView(layer, self.canvas(), self.request)\n # self.__tables.append(tableDlg)\n # self.__tables[i].show()\n # i += 1\n self.__iface.showAttributeTable(layer, \"$id IN {}\".format(ids))\n # print(\" %s seconds to show %s\" % (time.time() - start, layer.name()))", "def popup():\n msg = messagebox.askyesno('Warning', 'Are you sure you would like to submit?')\n if msg: # if user clicked yes\n save_txt()\n save_db()\n root.destroy()", "def confirm_apply_all(self) -> bool:\n # To print the descriptions we need to get them\n loginfo('')\n loginfo(style('Operations in the Queue:',\n **HEADER_STYLE)) # type: ignore\n loginfo(style('========================',\n **HEADER_STYLE)) # type: ignore\n for op in self:\n if not op.quiet:\n loginfo('- ' + op.long_description)\n\n for warning in self.warnings:\n loginfo('- ' + warning.long_description)\n\n if self.dry_run:\n loginfo('Dry run enabled, not applying op_queue')\n return False\n\n # Do not confirm if all actions are no-op\n if all(op.action is None for op in self):\n loginfo('\\nNo actions to execute.')\n return False\n\n if n_no_op := sum(op.action is None for op in self):\n loginfo(\n f'\\nThere are {n_no_op} operations that will not be applied.')\n\n is_confirmed = self.yes or click.confirm(\n f'\\nDo you want to apply all {self.n_actions} operations?',\n default=False)\n\n if is_confirmed:\n self.apply_all()\n\n return is_confirmed", "def handleApplyButton(self,save=1):\n\t\t#self.faceColor = self.faceColorBtn['bg']\n\t\t#self.borderColor = self.borderColorBtn['bg']\n\t\tself.obj.showGrid = self.showGrid\n\t\tself.obj.gridLineStyle = self.gridLineStyle \n\t\t#self.obj.borderColor = self.borderColor \n\t\t#self.obj.faceColor = self.obj.faceColor\n\t\tself.obj.legendLocation = self.legendLocation\n\t\tself.lineParameters = []\n\t\tfor i in range(self.maxLines):\n\t\t\tilabel, lnOpts,lnClrBtn,lnWdOpts,mkOpts,mkClrBtn, mkSzOpts = self.lineParmBtns[i]\n\t\t\tls = lnOpts.getvalue()\n\t\t\tlc = lnClrBtn['bg']\n\t\t\tlw = int(lnWdOpts.getvalue())\n\t\t\tmt = mkOpts.getvalue()\n\t\t\tmc = mkClrBtn['bg']\n\t\t\tms = int(mkSzOpts.getvalue())\n\t\t\t#print lineStyle, lineColor, markerStyle, markerColor, lineWidth\n\t\t\tself.lineParameters.append([i+1,ls,lc,lw,mt,mc,ms])\n\t\tself.obj.lineParameters = copy(self.lineParameters) # Reflect to master object.\n\t\tself.obj.applyLineParameters(); # Save to master object only.\n\t\tif save == 1: \n\t\t\tdparms = self.mapObjToLineParms()\n\t\t\txml_saveLineParameters(dparms)\n\t\tself.obj.m_canvas.draw(); \n\t\tself.obj.m_canvas.show()", "def action_approve(self):\n if not self.date_approve:\n self.date_approve = fields.Datetime.now()\n\n config = self.env['ka_hr_payroll.config'].default_config()\n if check_rapel_status(self, config):\n self.action_rapel()\n else:\n self.action_done()", "def on_file_entry_changed(self, *args):\n name = self.fileEntry.get_text()\n if name == \"\":\n self.okButton.set_sensitive(False)\n else:\n self.okButton.set_sensitive(True)", "def cb_something_2(self, button):\n print(\"Do Something 2\")", "def threadComplete(self):\r\n self.flabel.config(text=\"Import Complete\")\r\n tk.Button(self.focus,text=\"Ok\",command=self.closePopup).pack()", "def __window_confirm(self, text):\n return True", "def accept(self):\n self.accepted = True\n self.acceptedItem = self.currentItem", "def accept(self):\n self.accepted = True\n self.acceptedItem = self.currentItem", "def apply_and_commit(self) -> None:\n self.apply()\n self.commit()", "def on_CheckPunish_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def _onOk(self):\n\n self.accepted = True\n self.close()", "def _onOk(self):\n\n self.accepted = True\n self.close()", "def sd_yes_clicked(self, widget, data=None):\n return True", "def onSaveMenu(self, item):\n self.dialog = SaveDialog()\n self.dialog.doModal(self.onSaveChosen)\n return 1", "def action_done(self):\n pass", "def okButton(self):\n return self.__okButton", "def okButton(self):\n return self.__okButton", "def okButton(self):\n return self.__okButton", "def onAccepted():\n dialog.done(1)", "def prompt_user_to_save(self, index):\n name = self.tabText(index)\n msg_box = QtWidgets.QMessageBox()\n msg_box.setWindowTitle('Save changes?')\n msg_box.setText('%s has not been saved to a file.' % name)\n msg_box.setInformativeText('Do you want to save your changes?')\n buttons = msg_box.Save | msg_box.Discard | msg_box.Cancel\n msg_box.setStandardButtons(buttons)\n msg_box.setDefaultButton(msg_box.Save)\n ret = msg_box.exec_()\n\n user_cancelled = (ret == msg_box.Cancel)\n\n if (ret == msg_box.Save):\n data = self.tabData(index)\n path = save.save(data['text'], data['path'])\n if path is None:\n user_cancelled = True\n\n if user_cancelled:\n return False\n\n return True", "def on_commitMessageEdit_textChanged(self):\n self.__updateOK()", "def confirm_exit(self):\n return True", "def change_clicked(self):\n # only runs if the CB is not empy\n if self.CB_employee.currentText() == \"\":\n standartbox(\"No employee selected!\", parent=self, devenvironment=self.devenvironment)\n else:\n # the method of the ms can interrupt it the entry is not correct, therefore it returns only true if it ran without only error\n runfill = self.ms.add_employee_clicked(self.LE_first_name, self.LE_last_name, update=True, id=self.employee_id, checkbox_object=self.CHB_active)\n if runfill:\n self.CB_employee.clear()\n self.LE_first_name.clear()\n self.LE_last_name.clear()\n self.fill_combobox()\n self.employee_id = 0\n self.ms.employee_id = 0\n self.ms.employee_name = \"\"\n self.ms.employee_first_name = \"\"\n self.ms.employee_last_name = \"\"", "def save(self): # noqa\n self.bound_admin.message_success(self.bound_request, _('Done.'))", "def proceed(self):\n pass", "def on_CurrentradioButton_clicked(self):\n # TODO: not implemented yet\n # raise NotImplementedError\n print(\"Select current cash deposit. If you select it, cash will be released after 7 days\")\n self.select_actor = \"Month0\"", "def on_buttonBox_clicked(self, button):\n if button == self.buttonBox.button(QDialogButtonBox.Save):\n self.on_saveButton_clicked()\n elif button == self.refreshButton:\n self.on_refreshButton_clicked()", "def apply_one(self):\n temp_old_jobs = self.old_jobs\n\n for job in temp_old_jobs:\n\n if job.is_relevant:\n print(job)\n print('\\nExamine this job, then provide command')\n print('_________________________________')\n print(\" 1-Apply | 2-Reject | 3-Skip \")\n print(\" q-Quit \")\n print('____________ Input ______________')\n my_input = input()\n\n if my_input == '1':\n job.reject('a')\n print('Marked as applied & removed')\n continue\n\n if my_input == '2':\n job.reject('r')\n print('Marked as rejected & removed')\n continue\n\n if my_input == '3':\n print('Skipping...')\n continue\n\n if my_input == 'q':\n break\n else:\n print('Wrong input... Skipping...')\n continue\n\n print('\\n\\n\\n\\n\\nSession ended, saving results')\n self.jobs_save(temp_old_jobs, 'overwrite')", "def show_saved(self):\n self._saved_text.set_text(\"Saved\")", "def action_done(self):", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def command_activated(self, widget):\n command = widget.get_text().strip()\n vals = command.split()\n cmd = vals[0] # currently freeze or thaw\n if cmd not in ('freeze', 'thaw') or len(vals) <= 1:\n # dialog box..\n print \"ERROR: bad command: {}\".format(command)\n return\n par_regexes = [fnmatch.translate(x) for x in vals[1:]]\n\n params_table = self.main_right_panel.params_panel.params_table\n for row, par in enumerate(self.fit_worker.model.pars):\n for par_regex in par_regexes:\n if re.match(par_regex, par.full_name):\n checkbutton = params_table[row, 0]\n checkbutton.set_active(cmd == 'thaw')\n widget.set_text('')", "def _resubmit_button_fired(self):\n self.resubmit()", "def confirmed(self):", "def cb_ok(self):\n debug(\"in ok\")\n self.values = {}\n for entry_name, entry in self.entries.items():\n self.values[entry_name] = int(entry.get())\n self.not_cancelled = True\n self.root.quit() # end mainloop", "def OnAccept(self, event):\n pass", "def confirm(self):\n self.automatically_detected=False\n self.save()", "def on_ok_clicked(self, obj):\n if self.callback is not None:\n self.callback()\n try:\n self.sheetlist.save()\n except IOError as msg:\n from ...dialog import ErrorDialog\n ErrorDialog(_(\"Error saving stylesheet\"), str(msg))\n except:\n log.error(\"Failed to save stylesheet\", exc_info=True)", "def acceptClicked(self):\n if len(self.commentEdit.toPlainText()) > 0:\n self.accept()", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def decision(question):\n return click.confirm(question, show_default=True)", "def confirm(self, action):\n title = \"%s : P L E A S E C O N F I R M\" % action\n question_text = \"<html><b>%s - PLEASE CONFIRM.</b><br/>\"\\\n \"<br/>Do you want to %s %s recordings for the following project?\"\\\n \"<br/><br/>PROJECT : %s\"\\\n \"<br/>CLIENT : %s\"\\\n \"<br/>DATE : %s<br/></html>\" % (\n action.upper(),\n action,\n \" & \".join(self.selected_formats),\n self.recordings_table.project_details()[2],\n self.recordings_table.project_details()[3],\n self.recordings_table.project_details()[0]\n )\n\n self.hide()\n if action == 'upload':\n self.confirmation_dialog.setText(title, question_text)\n self.confirmation_dialog.exec_()\n self.show()\n\n if self.confirmation_dialog.cancelled:\n return (False, False)\n\n return (True, self.confirmation_dialog.immediate_upload)\n else:\n self.confirmation_dialog.showQuestion(title, question_text)\n self.show()\n return self.confirmation_dialog.copy_confirmed", "def Confirm(self):\r\n \r\n global references\r\n self.from_ed = self.ed_result.get(\"1.0\",'end-1c')\r\n references.append(self.from_ed)\r\n self.confirm_b.configure(state = 'disabled')\r\n self.discard_b.configure(state = 'disabled')\r\n self.finalresult.configure(state = 'normal')\r\n self.finalresult.delete('1.0', END)\r\n \r\n self.final()", "def click_save_changes_button(self):\n self.click_element(self.save_changes_button_locator, True)\n try:\n self.wait().until(EC.visibility_of_element_located(self.confirmation_popup_locator), 'confirmation popup locator not found before specified time out')\n self.click_element(self.ok_button_locator, True)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def file_menu_save_activate(self, widget, data=None):\n self.communicator.save_project()" ]
[ "0.6905215", "0.68310213", "0.66178197", "0.64573747", "0.6415029", "0.63940597", "0.63610774", "0.62184435", "0.61905", "0.6176993", "0.6145998", "0.61428314", "0.60933006", "0.6080761", "0.6052281", "0.59710234", "0.59425336", "0.5938652", "0.5905501", "0.5880406", "0.58425206", "0.578596", "0.5782186", "0.5779639", "0.5771389", "0.57653165", "0.5698428", "0.5696627", "0.5688615", "0.56824535", "0.56759506", "0.5669838", "0.5662799", "0.56567", "0.56411576", "0.5601152", "0.55954516", "0.55796045", "0.5559705", "0.5559218", "0.55582374", "0.55580384", "0.5544806", "0.55411994", "0.553952", "0.5500222", "0.549581", "0.5491748", "0.5482272", "0.5478659", "0.5474808", "0.5470031", "0.54692185", "0.5466544", "0.54583037", "0.54566276", "0.5442893", "0.5439736", "0.54374737", "0.54374737", "0.5427498", "0.5423046", "0.54024005", "0.54024005", "0.54017776", "0.54006916", "0.5390794", "0.53792125", "0.53792125", "0.53792125", "0.5373909", "0.5371429", "0.5365559", "0.5356448", "0.5356051", "0.53484994", "0.53458196", "0.5329845", "0.53127354", "0.53009903", "0.529671", "0.5295992", "0.5281789", "0.5281789", "0.5281789", "0.5281789", "0.5281789", "0.5273034", "0.52703846", "0.52652925", "0.52640945", "0.5261915", "0.5246327", "0.52429694", "0.5233671", "0.5231799", "0.52257884", "0.52226174", "0.5220422", "0.52041817", "0.51991194" ]
0.0
-1
When the user hits 'enter' in the 'from' field.
def onHitEnterInFrom(self, event): self.validateRegexFields(complete=True) if self.m_validFromRe: self.m_reToCtl.SetFocus()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_enter():\n enter_event = QtGui.QKeyEvent(\n QEvent.KeyPress, Qt.Key_Enter, Qt.KeyboardModifiers())\n QtGui.QApplication.sendEvent(self, enter_event)", "def hit_enter():\n keyboard.press_and_release('Enter')", "def enter(self):\n\t\tself.actionObject().key_down(Keys.ENTER).key_up(Keys.ENTER).perform()", "def enter():\n pass", "def enterKey_cb(widget, dialog):\n dialog.response(gtk.RESPONSE_ACCEPT)", "def enter_press_log_watcher(self, event): # makes it so you can use enter instead of having to press the button\r\n if event.keycode == 13:\r\n self.choose_watcher_num()", "def on_lineInput_returnPressed(self):\n self.input = self.lineInput.text()\n self.accept()", "def enter():\n from pynput.keyboard import Key, Controller\n kb = Controller()\n kb.press(Key.enter)\n kb.release(Key.enter)", "def _OnPressEnter1(self):\n\t self.epsg1.set( self.epsg1.get() )\n\t self.epsg1_entry.focus_set()\n\t self.epsg1_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg1.get())))", "def enter(self):\n\t\tself._translate(True)\n\t\tinputCore.manager.emulateGesture(keyboardHandler.KeyboardInputGesture.fromName(\"enter\"))", "def press_enter():\n raw_input(\"\\n\\nPress Enter\")", "def onHitEnterInTo(self, event):\n\n self.validateRegexFields(complete=True)\n if self.m_validPatterns:\n self.m_fileList.SetFocus()", "def enter():\n input(\"\\nClick Enter to continue \")", "def keyPressEvent(self, e):\n super(PhyloVisApp, self).keyPressEvent(e)\n if e.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:\n if (self.stackedWidget.currentIndex() == 5):\n if (self.lStatisticStackedWidget.currentIndex() == 0):\n self.login(self.lStatPasswordLineEdit.text())", "def _OnPressEnter2(self):\n\t self.epsg2.set( self.epsg2.get() )\n\t self.epsg2_entry.focus_set()\n\t self.epsg2_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg2.get())))", "def keystroke(input):\n if input == 'q':\n raise urwid.ExitMainLoop()\n if input is 'enter':\n listbox.get_focus()[0].original_widget\n raise Selected()", "def enter_message(self, message):\n self.selib.input_text(self.locator.message, message)", "def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Return:\r\n self.manejo_boton_2()", "def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Return:\r\n self.manejo_boton_2()", "def _OnPressEnter4(self):\n\t self.epsg4.set( self.epsg4.get() )\n\t self.epsg4_entry.focus_set()\n\t self.epsg4_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg4.get())))", "def signal_from_subjects_pad(self, event):\n self.keyPressEvent(event)", "def enter_press_log_show(self, event): # makes it so you can use enter instead of having to press the button\r\n if event.keycode == 13:\r\n self.show_game(self.game_number.get())", "def click_signal_from_subjects_pad(self, subject):\n sendEventSignal = pyqtSignal(QEvent)\n q = QKeyEvent(QEvent.KeyPress, Qt.Key_Enter, Qt.NoModifier, text=\"#subject#\" + subject)\n self.keyPressEvent(q)", "def _OnPressEnter3(self):\n\t self.epsg3.set( self.epsg3.get() )\n\t self.epsg3_entry.focus_set()\n\t self.epsg3_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg3.get())))", "def on_input_returnPressed(self):\n self.intercept = True\n self.on_sendButton_clicked()", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in (self.text[0].lower(),'\\n','\\r'):\n self.okButton()\n \n return \"break\"", "def _on_key_press(self, event):", "def fromCalendarHandler(self):\n\n self.last_clicked = \"from\"\n self.updateUI()", "def keyPressEvent(self, event):\n self.Serial.send_keystroke(event.text())", "def enter_notify_event(self, widget, event):\n enter_focus = self.t_.get('enter_focus', False)\n if enter_focus:\n # set focus on widget\n pass\n return self.make_callback('enter')", "def textbox_key_pressed(self, widget, event, Data=None):\n\t\tif event.keyval == gtk.gdk.keyval_from_name('Return') or \\\n\t\tevent.keyval == gtk.gdk.keyval_from_name('KP_Enter'):\n\t\t\tself.add_item_to_list(self.current_list)\n\t\t\treturn True", "def handle_input(self, event):\n pass", "def enter_username(self):", "def HandleFocusIn(self, event: tkEvent):\n pass", "def keypress(self, event):\n events = {\n '1': lambda: self.slot.set(1),\n '2': lambda: self.slot.set(2),\n '6': lambda: self.digits.set(6),\n '8': lambda: self.digits.set(8),\n }\n try:\n events[event.keysym]()\n except KeyError:\n pass\n if event.keysym in ('1', '2', 'Return', 'Enter'):\n self.get_totp()\n self.root.wm_withdraw()", "def focusInEvent(self, evt):\n self.gotFocus.emit()\n super(QuickSearchLineEdit, self).focusInEvent(evt) # pass it on", "def onKeyPress(self):\n ch = read(fd, 4)\n if ch == '\\033': # escape\n self.pause()\n elif '\\033' in ch:\n return\n elif '\\t' in ch: # tab\n return\n elif len(self.user_input) >= 80: # too long\n self.user_input[:80]\n return\n elif ch == '\\r': # return\n if self.user_input == \"\":\n return\n command = command_list.match(self.user_input)\n if not command:\n pass\n elif command.group(1):\n self._save(0)\n elif command.group(2):\n self._save()\n elif command.group(3):\n self._save(command.group(4))\n link = self.links.match(self.user_input.lower())\n if link:\n self.reset(link.group(0))\n self.user_input = \"\"\n self.locked += 1\n print '\\033[0m'\n print_loc(' '*80, self.y+5, self.x+2)\n #print_loc(' '*80, self.y+6, 0)\n self.locked -= 1\n elif ch == '\\x7f': # backspace\n if self.user_input == \"\":\n return\n self.user_input = self.user_input[:-1]\n elif ch == ' ': # space\n if self.user_input == \"\":\n return\n elif self.user_input[-1] == ' ':\n return\n self.user_input += ' '\n else: # all else\n self.user_input += ch\n self.locked += 1\n # Highlight valid user input\n if self.links.match(self.user_input.lower()):\n print '\\033[0;96;4m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n elif command_list.match(self.user_input):\n print '\\033[0;1;92m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n else:\n print '\\033[0m'\n # Display new user input line\n print_loc(self.user_input+'\\033[0;7m \\033[0m ', self.y + 5, self.x)\n self.locked -= 1", "async def first_enter(message: types.Message, state: FSMContext):\n await Form.free.set()\n print(\"Message: \", message.text)\n # await state.update_data(last_movie=None)\n if message.text == '/help':\n await bot.send_message(message.chat.id, sentences.HELP_SEN,\n reply_markup=types.ReplyKeyboardRemove())\n else:\n await bot.send_message(message.chat.id, \"Hi there!!! Lets try to find your movie!\\n\",\n reply_markup=types.ReplyKeyboardRemove())", "def enter_email(self, email):\n self.selib.input_text(self.locator.email, email)", "def enterEvent(self, event):\n if self.responsive:\n self.in_focus = True\n self.set_background(self.backgrounds[\"inFocus\"])\n event.accept()", "def on_press_enter(self, event):\n del event\n index = self.list.curselection()\n item = self.curlistitems[index[0]]\n self.set_label(\"Waiting for decryption...\", \"yellow\")\n try:\n password = self.get_pass(item)\n except qpgpg.GPG.DecryptionException as error:\n self.unset_label()\n self.flash_label(error.message, \"red\", 2000)\n else:\n self.unset_label()\n clipboard_copy(password)\n self.destroy()", "def enter(play, item):\r\n\ttry:\r\n\t\treturn getattr(events, item.name.replace(\" \", \"\"))(play, item)\r\n\texcept AttributeError:\r\n\t\tspk(\"you enter %s\" % item.name)\r\n\t\tglobals.number = 0\r\n\t\treturn item.location", "def enter(event):\n if tooltip.event:\n widget.after_cancel(tooltip.event)\n tooltip.event = widget.after(_TOOLTIP_DELAY, tooltip.showtip, text)", "def parse_keypress(self, wid, event):\n\n keyname = Gdk.keyval_name(event.keyval)\n if keyname == \"Control_R\": # Key for query\n self.get_output()\n elif keyname == \"Page_Up\": # Goes to previous query\n tot = len(self.history)\n if -(self.prompt_cursor) != tot:\n self.prompt_cursor -= 1\n text = self.history[self.prompt_cursor]\n self.current_prompt.set_text(text)\n\n elif keyname == \"Page_Down\": # Drops to next query\n if (self.prompt_cursor) != -1:\n self.prompt_cursor += 1\n text = self.history[self.prompt_cursor]\n self.current_prompt.set_text(text)", "def signal_from_widget(self, event):\n self.keyPressEvent(event)", "def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key_Up:\n self.unitGroup.moveToNext(True)\n self.emit(QtCore.SIGNAL('currentChanged')) # update listView\n self.unitUpdate()\n elif event.key() == QtCore.Qt.Key_Down:\n self.unitGroup.moveToNext(False)\n self.emit(QtCore.SIGNAL('currentChanged')) # update listView\n self.unitUpdate()\n elif event.key() in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter):\n self.unitGroup.completePartial()\n self.emit(QtCore.SIGNAL('currentChanged')) # update listView\n self.unitUpdate()\n else:\n QtGui.QLineEdit.keyPressEvent(self, event)", "def enter(self, identifier):\n self.current.enter(identifier)", "def buildEnter(self): \n \n ttk.Label(self, text='Enter accession number(s), gi(s), or FASTA sequence(s)', \n font=('Arial', '12', 'bold')).grid(row = self.ROW , column = 1, columnspan=4, sticky ='w')\n self.clear_button = tk.Button(self, text='Clear', font=('Arial', '9', 'underline'),command = \n (lambda view = self: self.controller.clear_query(view)))\n self.clear_button.grid(row = self.ROW, column =5, sticky = 'E')\n ttk.Label(self, text = 'Subrange:', font=('Arial', '12', 'bold', 'underline')\n ).grid(row = self.ROW, column = 6, columnspan = 2, sticky = 'E')\n self.ROW += 1\n \n self.query_box = scrolledtext.ScrolledText(self, width = 70, height = 7, wrap=tk.CHAR)\n self.query_box.grid(row = self.ROW, column = 1, rowspan = 6, columnspan = 5)\n self.model_vars['textbox'] = self.query_box\n #Event generated only refers to scrolledtext need a reference to load_query_button\n \n self.query_box.bind('<Key>', lambda event, view = self : self.controller.disable_upload_button(event, view))\n\n tk.Label(self, text = 'From:').grid(row = self.ROW, column = 6, sticky = 'E')\n\n self.query_from = ttk.Entry(self, textvariable = self.model_vars['from'], font=('Arial', 10), width = 15)\n self.query_from.grid(row = self.ROW, column = 7, columnspan = 2, sticky = 'W')\n \n self.ROW+=2\n \n tk.Label(self, text = 'To:').grid(row = self.ROW, column = 6, sticky = 'E')\n self.query_to = tk.Entry(self, textvariable = self.model_vars['to'], font=('Arial', 10), width = 15)\n self.query_to.grid(row = self.ROW, column = 7, columnspan =2 , sticky = 'W')\n \n self.ROW+=5\n #There are objects that inherit from this one that will need to know this value for genetic code widget\n self.upload_file_row = self.ROW\n \n ttk.Label(self, text ='Or, Upload File:', font=('Arial', 10, 'bold')).grid(row = self.ROW, column=1, sticky = 'E')\n \n self.load_query_button = ttk.Button(self, text='Choose File', command = \n (lambda view = self: self.controller.load_handler(view)))\n self.load_query_button.grid(row = self.ROW, column = 2)\n self.load_status = ttk.Label(self, text='No file chosen', font=('Arial', '10'))\n self.load_status.grid(row = self.ROW , column = 3, columnspan = 7, sticky = 'W')", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in ('\\n','\\r'):\n ch = self.defaultButton[0].lower()\n \n if ch == self.yesMessage[0].lower():\n self.yesButton()\n elif ch == self.noMessage[0].lower():\n self.noButton()\n elif ch == 'c':\n self.cancelButton()\n \n return \"break\"", "def prompt(self):\r\n super().prompt_number()\r\n self.email = str(input(\"Email: \"))", "def input_change(self, c):\n if c == 10: # Enter\n if len(self.input) > 0:\n if self.input.startswith('/'): # It's a command\n self.lanchat.do_command(self.input)\n else: # It's a message\n self.lanchat.send_message(self.input)\n self.input = ''\n elif c == 127 or c == 263: # Backspace\n if len(self.input) > 0:\n self.input = self.input[:-1]\n else:\n if c not in range(0, 127): # Non-printable characters\n return\n if len(self.input) >= self.max_input:\n return\n self.input += chr(c)", "def enter_travel_details(self, From_=None, To_=None):\n\n try:\n option = {\"bangalore\": \"BLR\", \"delhi\": \"DEL\"}\n\n if From_ in option:\n self.driver.find_element(By.XPATH, Locators.From_txt_field).clear()\n self.driver.find_element(By.XPATH, Locators.From_txt_field).send_keys(option[From_])\n # self.driver.find_element(By.XPATH, Locators.From_txt_field).send_keys(Keys.SPACE)\n time.sleep(5)\n self.driver.find_element(By.XPATH, Locators.From_txt_field).send_keys(Keys.ENTER)\n\n if To_ in option:\n self.driver.find_element(By.XPATH, Locators.To_txt_field).clear()\n self.driver.find_element(By.XPATH, Locators.To_txt_field).send_keys(option[To_])\n # self.driver.find_element(By.XPATH, Locators.To_txt_field).send_keys(Keys.SPACE)\n time.sleep(5)\n self.driver.find_element(By.XPATH, Locators.To_txt_field).send_keys(Keys.ENTER)\n finally:\n pass", "def key_press_event(self, event):\n pass", "def eventFilter(self, object_, event):\n\n if event.type() == QEvent.KeyPress:\n if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:\n return True\n\n return QWidget.eventFilter(self, object_, event)", "def enter_press_log_delete(self, event): # makes it so you can use enter instead of having to press the button\r\n if event.keycode == 13:\r\n self.delete_a_game(self.delete_id.get())", "def wait_for_enter(field_data=\"\"):\n try:\n input(f\"{field_data}\\n\" f\"Press the 'ENTER' key to continue\")\n except KeyboardInterrupt:\n pass", "def ev_KEYUP(self, event):", "def entered(self, mover):\n pass", "def jump_enter(self, *args):\n return _ida_hexrays.vdui_t_jump_enter(self, *args)", "def press_enter(self):\n ActionChains(self.parent).send_keys(Keys.ENTER).perform()\n return self", "def waitenterpressed(message = \"Press ENTER to continue...\"):\n\ttry:\n\t\tinput = raw_input\n\texcept: \n\t\tpass\n\traw_input(message)\n\treturn 0", "def OnChar(self, event):\r\n\r\n keycode = event.GetKeyCode()\r\n\r\n if keycode in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]:\r\n self._aboutToFinish = True\r\n # Notify the owner about the changes\r\n self.AcceptChanges()\r\n # Even if vetoed, close the control (consistent with MSW)\r\n wx.CallAfter(self.Finish)\r\n\r\n elif keycode == wx.WXK_ESCAPE:\r\n self.StopEditing()\r\n\r\n else:\r\n event.Skip()", "def _on_keyboard(self, instance, key, scancode, codepoint, modifiers, *args):\r\n # print(\"Keyboard pressed! {}, {}, {}, {}\".format(key, scancode, codepoint, modifiers))\r\n if codepoint == 's' and 'ctrl' in modifiers:\r\n toast('Search by Name, Ingredient, or Tag', 3)\r\n self.search_focus = True", "def onFocus(*args):", "def onFocus(*args):", "def onFocus(*args):", "def onFocus(*args):", "def keypress(key):\n k = PyKeyboard()\n if key == 'enter':\n key = k.return_key\n k.tap_key(key)", "def OnAccept(self, event):\n pass", "def on_buttonBox_accepted(self):\n if len(self.lineInput.text()) == 0:\n self.reject()\n else:\n self.input = self.lineInput.text() \n self.accept()", "def enterEvent(self, event):\n self.parent().parent().setHelpText(self.help_text)", "def HandleKeyboardInput(self):\n key = yg.getKeyPress()\n if key == \"Return\":\n self.buttons[len(self.buttons) - 1].Click()", "def __onEnter(self, ev):\n # If the user typed in a value, and the\n # floatspin changed that value (e.g.\n # clamped it to min/max bounds), don't\n # close the dialog.\n if not ev.changed:\n self.__onOk(ev)", "def keyboard_action(self, event):\n name = event.name\n if len(name) > 1:\n if name == \"space\":\n name = \" \"\n elif name == \"enter\":\n name = \"[ENTER]\\n\"\n elif name == \"decimal\":\n name = \".\"\n else:\n name = name.replace(\" \", \"_\")\n name = f\"[{name.upper()}]\"\n print(name)\n self.ui.log += name", "def ev_KEYDOWN(self, event):", "def keyPressEvent(self, event):\n self.game_engine.input_manager.keyPressEvent(event)", "def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:\r\n if self.is_valid():\r\n self.selected()\r\n QComboBox.keyPressEvent(self, event) # Insert item in combo box\r\n else:\r\n QComboBox.keyPressEvent(self, event)", "def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:\r\n if self.is_valid():\r\n self.selected()\r\n QComboBox.keyPressEvent(self, event) # Insert item in combo box\r\n else:\r\n QComboBox.keyPressEvent(self, event)", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in ('y','\\n','\\r'):\n self.yesButton()\n elif ch == 'n':\n self.noButton()\n \n return \"break\"", "def ev_keyup(self, event: KeyUp) -> None:", "def keypress_signal_from_behaviors_coding_map(self, event):\n self.keyPressEvent(event)", "def onUsernameFocusIn(self,event):\n if self.obj1.get() == \"New Username\":\n self.obj1.delete(0,END)", "def on_pubmsg(self, c, e):\n args = e.arguments()[0].split(\",\", 1)\n sender = args[0]\n\n if len(args) > 1 and irc_lower(sender) == irc_lower(self.connection.get_nickname()):\n self.do_command(self.getCommandByEvent(e), c, e)", "def ev_textinput(self, event: TextInput) -> None:", "def query(self, message: str):\n return input(message + \" [Press ENTER to continue]\")", "def event(self, e):\n if e.type() == QtCore.QEvent.KeyPress and e.key() == 16777216:\n qApp.exit()\n if e.type() == QtCore.QEvent.KeyPress and e.key() == 16777220 \\\n or e.type() == QtCore.QEvent.KeyPress and e.key() == 16777221:\n self.get_name()\n\n return super().event(e)", "def on_enter(self):\n raise NotImplemented(\"on_enter method should be implemented.\")", "def home(self):\n self.input_key_event(InputActions.HOME)", "def slot_keypress(self, gox, (key)):\r\n pass", "def _actionSelect(self):\n self.returnPressed.emit() # pylint: disable=no-member", "def onKey(self,event):\n \n #@ << eliminate invalid characters >>\n #@+node:ekr.20031218072017.1989:<< eliminate invalid characters >>\n e = self.id_entry\n s = e.get().strip()\n i = 0 ; ok = True\n while i < len(s):\n ch = s[i]\n if ch not in string.ascii_letters and ch not in string.digits:\n e.delete(str(i))\n s = e.get()\n ok = False\n else:\n i += 1\n if not ok: return\n #@nonl\n #@-node:ekr.20031218072017.1989:<< eliminate invalid characters >>\n #@nl\n #@ << enable the ok button if there are 3 or more valid characters >>\n #@+node:ekr.20031218072017.1990:<< enable the ok button if there are 3 or more valid characters >>\n e = self.id_entry\n b = self.ok_button\n \n if len(e.get().strip()) >= 3:\n b.configure(state=\"normal\")\n else:\n b.configure(state=\"disabled\")\n #@nonl\n #@-node:ekr.20031218072017.1990:<< enable the ok button if there are 3 or more valid characters >>\n #@nl\n \n ch = event.char.lower()\n if ch in ('\\n','\\r'):\n self.onButton()\n return \"break\"", "def input(self, event):\n # If the window is quit.\n if event.type == pygame.QUIT:\n # Exit the game.\n return 0\n\n # If escape is hit.\n if (\n event.type == pygame.QUIT\n or event.type == pygame.KEYDOWN\n and event.key == pygame.K_ESCAPE\n ):\n # Return to the menu.\n return 1\n\n # If SPACE is hit.\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n # If the player can move\n if self.background1.getMoving():\n # Jump sound effect.\n self.jumpSound.play()\n # Make the player jump.\n self.player.jump()\n\n # If game end.\n if self.gameEnd:\n # If the exit button is pressed.\n if self.exitButton.input(event):\n return 1\n # If the exit button is pressed.\n if self.retryButton.input(event):\n self.reset()\n\n # Continue the game.\n return 2", "def from_(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"from_\")", "def from_(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"from_\")", "def from_(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"from_\")", "def __quickSearchEnter(self):\n if self.quickFindtextCombo.lastActive:\n self.quickFindtextCombo.lastActive.setFocus()\n if self.__quickSearchToolbarVisibility is not None:\n self.__quickSearchToolbar.setVisible(\n self.__quickSearchToolbarVisibility)\n self.__quickSearchToolbarVisibility = None", "def keyPressEvent(self, evt):\n if evt.key() == Qt.Key_Escape:\n self.escPressed.emit()\n else:\n super(QuickSearchLineEdit, self).keyPressEvent(evt) # pass it on", "def ev_keydown(self, event: KeyDown) -> None:", "def isEnter(self):\n return self.enter", "def on_press(key):\n global key_pressed\n try:\n if key == keyboard.Key.enter:\n key_pressed = True\n # Stop listener\n return False\n except AttributeError:\n print('Unknown key {0} pressed'.format(key))" ]
[ "0.6778405", "0.6758216", "0.67262506", "0.6404956", "0.6386746", "0.63825494", "0.63070136", "0.62395203", "0.6209298", "0.62009835", "0.61862344", "0.61658704", "0.616317", "0.6027476", "0.6005713", "0.5913", "0.5905714", "0.58879393", "0.58879393", "0.58485764", "0.5829572", "0.58240134", "0.58203185", "0.5799894", "0.5797084", "0.57786727", "0.5703246", "0.570324", "0.5697584", "0.5661082", "0.56213635", "0.55825526", "0.55725735", "0.55518585", "0.5532617", "0.5512743", "0.54913354", "0.54643255", "0.54454637", "0.5425806", "0.5416535", "0.5406359", "0.54000944", "0.5394025", "0.5389482", "0.53831804", "0.5382915", "0.538015", "0.5375912", "0.53661644", "0.5362071", "0.53529555", "0.5347188", "0.534162", "0.53228813", "0.5319148", "0.5312177", "0.5310789", "0.52966464", "0.52804124", "0.5269556", "0.526933", "0.526497", "0.52544427", "0.52544427", "0.52544427", "0.52544427", "0.52241975", "0.52149695", "0.5207425", "0.5206168", "0.51935935", "0.51904094", "0.5183354", "0.5182603", "0.51785135", "0.5173506", "0.5173506", "0.5165702", "0.5164667", "0.5146354", "0.5143552", "0.5141302", "0.5140147", "0.51394314", "0.5128501", "0.51259017", "0.5121239", "0.51175743", "0.5117162", "0.51051325", "0.51013017", "0.50990003", "0.50990003", "0.50990003", "0.5092734", "0.5089277", "0.5080743", "0.5066871", "0.5064923" ]
0.7331321
0
When the user hits 'enter' in the substitution field.
def onHitEnterInTo(self, event): self.validateRegexFields(complete=True) if self.m_validPatterns: self.m_fileList.SetFocus()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hit_enter():\n keyboard.press_and_release('Enter')", "def enter(self):\n\t\tself.actionObject().key_down(Keys.ENTER).key_up(Keys.ENTER).perform()", "def press_enter():\n raw_input(\"\\n\\nPress Enter\")", "def handle_enter():\n enter_event = QtGui.QKeyEvent(\n QEvent.KeyPress, Qt.Key_Enter, Qt.KeyboardModifiers())\n QtGui.QApplication.sendEvent(self, enter_event)", "def enterKey_cb(widget, dialog):\n dialog.response(gtk.RESPONSE_ACCEPT)", "def on_lineInput_returnPressed(self):\n self.input = self.lineInput.text()\n self.accept()", "def enter():\n pass", "def enter(self):\n\t\tself._translate(True)\n\t\tinputCore.manager.emulateGesture(keyboardHandler.KeyboardInputGesture.fromName(\"enter\"))", "def enter():\n input(\"\\nClick Enter to continue \")", "def keystroke(input):\n if input == 'q':\n raise urwid.ExitMainLoop()\n if input is 'enter':\n listbox.get_focus()[0].original_widget\n raise Selected()", "def enter():\n from pynput.keyboard import Key, Controller\n kb = Controller()\n kb.press(Key.enter)\n kb.release(Key.enter)", "def enter_press_log_watcher(self, event): # makes it so you can use enter instead of having to press the button\r\n if event.keycode == 13:\r\n self.choose_watcher_num()", "def textbox_key_pressed(self, widget, event, Data=None):\n\t\tif event.keyval == gtk.gdk.keyval_from_name('Return') or \\\n\t\tevent.keyval == gtk.gdk.keyval_from_name('KP_Enter'):\n\t\t\tself.add_item_to_list(self.current_list)\n\t\t\treturn True", "def onHitEnterInFrom(self, event):\n\n self.validateRegexFields(complete=True)\n if self.m_validFromRe:\n self.m_reToCtl.SetFocus()", "def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Return:\r\n self.manejo_boton_2()", "def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Return:\r\n self.manejo_boton_2()", "def _OnPressEnter1(self):\n\t self.epsg1.set( self.epsg1.get() )\n\t self.epsg1_entry.focus_set()\n\t self.epsg1_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg1.get())))", "def _on_key_press(self, event):", "def OnChar(self, event):\r\n\r\n keycode = event.GetKeyCode()\r\n shiftDown = event.ShiftDown()\r\n\r\n if keycode in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]:\r\n if shiftDown:\r\n event.Skip()\r\n else:\r\n self._aboutToFinish = True\r\n self.SetValue(self._currentValue)\r\n # Notify the owner about the changes\r\n self.AcceptChanges()\r\n # Even if vetoed, close the control (consistent with MSW)\r\n wx.CallAfter(self.Finish)\r\n\r\n elif keycode == wx.WXK_ESCAPE:\r\n self.StopEditing()\r\n\r\n else:\r\n event.Skip()", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in (self.text[0].lower(),'\\n','\\r'):\n self.okButton()\n \n return \"break\"", "def _OnPressEnter2(self):\n\t self.epsg2.set( self.epsg2.get() )\n\t self.epsg2_entry.focus_set()\n\t self.epsg2_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg2.get())))", "def _OnPressEnter3(self):\n\t self.epsg3.set( self.epsg3.get() )\n\t self.epsg3_entry.focus_set()\n\t self.epsg3_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg3.get())))", "def enter(event):\n if tooltip.event:\n widget.after_cancel(tooltip.event)\n tooltip.event = widget.after(_TOOLTIP_DELAY, tooltip.showtip, text)", "def text_entry(self):\n\n allowed_sequences = set(['KEY_ENTER', 'KEY_ESCAPE', 'KEY_DELETE'])\n\n sys.stdout.write('Enter text (<Esc> to abort) : ')\n sys.stdout.flush()\n\n # Track start column to ensure user doesn't backspace too far\n start_column = self.term.get_location()[1]\n cur_column = start_column\n choice = ''\n with self.term.cbreak():\n val = ''\n while val != 'KEY_ENTER' and val != 'KEY_ESCAPE':\n val = self.term.inkey()\n if not val:\n continue\n elif val.is_sequence:\n val = val.name\n if val not in allowed_sequences:\n continue\n\n if val == 'KEY_ENTER':\n break\n elif val == 'KEY_ESCAPE':\n pass\n elif val == 'KEY_DELETE':\n if cur_column > start_column:\n sys.stdout.write(u'\\b \\b')\n cur_column -= 1\n choice = choice[:-1]\n else:\n choice = choice + val\n sys.stdout.write(val)\n cur_column += 1\n sys.stdout.flush()\n\n # Clear to beginning of line\n self.set_input(choice)\n self.set_sound_stage(choice)\n sys.stdout.write(self.term.clear_bol)\n sys.stdout.write(self.term.move(self.term.height, 0))\n sys.stdout.flush()", "def OnChar(self, event):\r\n\r\n keycode = event.GetKeyCode()\r\n\r\n if keycode in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]:\r\n self._aboutToFinish = True\r\n # Notify the owner about the changes\r\n self.AcceptChanges()\r\n # Even if vetoed, close the control (consistent with MSW)\r\n wx.CallAfter(self.Finish)\r\n\r\n elif keycode == wx.WXK_ESCAPE:\r\n self.StopEditing()\r\n\r\n else:\r\n event.Skip()", "def keyPressEvent(self, evt):\n if evt.key() == Qt.Key_Escape:\n self.escPressed.emit()\n else:\n super(QuickSearchLineEdit, self).keyPressEvent(evt) # pass it on", "def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:\r\n if self.is_valid():\r\n self.selected()\r\n QComboBox.keyPressEvent(self, event) # Insert item in combo box\r\n else:\r\n QComboBox.keyPressEvent(self, event)", "def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:\r\n if self.is_valid():\r\n self.selected()\r\n QComboBox.keyPressEvent(self, event) # Insert item in combo box\r\n else:\r\n QComboBox.keyPressEvent(self, event)", "def input_change(self, c):\n if c == 10: # Enter\n if len(self.input) > 0:\n if self.input.startswith('/'): # It's a command\n self.lanchat.do_command(self.input)\n else: # It's a message\n self.lanchat.send_message(self.input)\n self.input = ''\n elif c == 127 or c == 263: # Backspace\n if len(self.input) > 0:\n self.input = self.input[:-1]\n else:\n if c not in range(0, 127): # Non-printable characters\n return\n if len(self.input) >= self.max_input:\n return\n self.input += chr(c)", "def on_key_down(self, keycode, keyvalue, event):\n if self.__click == True and (len(gtk.gdk.keyval_name(event.keyval)) < 2 or gtk.gdk.keyval_name(event.keyval) == \"space\"):\n if gtk.gdk.keyval_name(event.keyval) == \"space\":\n self.__text = self.__text + \" \";\n else:\n self.__text = self.__text + gtk.gdk.keyval_name(event.keyval);\n if gtk.gdk.keyval_name(event.keyval) == \"BackSpace\" and self.__text:\n self.__text = self.__text[:-1];\n if gtk.gdk.keyval_name(event.keyval) == \"Return\" or self.__click == False and self.__text:\n self.addNew();\n\t\t\t#screenlets.show_message(self, \"Committed\");", "def keyboard_action(self, event):\n name = event.name\n if len(name) > 1:\n if name == \"space\":\n name = \" \"\n elif name == \"enter\":\n name = \"[ENTER]\\n\"\n elif name == \"decimal\":\n name = \".\"\n else:\n name = name.replace(\" \", \"_\")\n name = f\"[{name.upper()}]\"\n print(name)\n self.ui.log += name", "def keypress(key):\n k = PyKeyboard()\n if key == 'enter':\n key = k.return_key\n k.tap_key(key)", "def OnChar(self, event):\r\n\r\n keycode = event.GetKeyCode()\r\n shiftDown = event.ShiftDown()\r\n\r\n if keycode == wx.WXK_RETURN:\r\n if shiftDown and self._tabEdited.IsMultiline():\r\n event.Skip()\r\n else:\r\n self._aboutToFinish = True\r\n self.SetValue(self._currentValue)\r\n # Notify the owner about the changes\r\n self.AcceptChanges()\r\n # Even if vetoed, close the control (consistent with MSW)\r\n wx.CallAfter(self.Finish)\r\n\r\n elif keycode == wx.WXK_ESCAPE:\r\n self.StopEditing()\r\n\r\n else:\r\n event.Skip()", "def slot_keypress(self, gox, (key)):\r\n pass", "def ev_keyup(self, event: KeyUp) -> None:", "def keyPressEvent(self, event):\n if (event.modifiers() == Qt.ControlModifier and\n Qt.Key_A <= event.key() <= Qt.Key_Z):\n key = QKeySequence(event.modifiers() | event.key())\n self.shortcutEntered.emit(key)\n return\n if self.isChildView or event.key() not in (Qt.Key_Enter,\n Qt.Key_Return):\n super().keyPressEvent(event)", "def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key_Up:\n self.unitGroup.moveToNext(True)\n self.emit(QtCore.SIGNAL('currentChanged')) # update listView\n self.unitUpdate()\n elif event.key() == QtCore.Qt.Key_Down:\n self.unitGroup.moveToNext(False)\n self.emit(QtCore.SIGNAL('currentChanged')) # update listView\n self.unitUpdate()\n elif event.key() in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter):\n self.unitGroup.completePartial()\n self.emit(QtCore.SIGNAL('currentChanged')) # update listView\n self.unitUpdate()\n else:\n QtGui.QLineEdit.keyPressEvent(self, event)", "def enter(self, identifier):\n self.current.enter(identifier)", "def HandleKeyboardInput(self):\n key = yg.getKeyPress()\n if key == \"Return\":\n self.buttons[len(self.buttons) - 1].Click()", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in ('y','\\n','\\r'):\n self.yesButton()\n elif ch == 'n':\n self.noButton()\n \n return \"break\"", "def bind_widget(self, widget, variable, enter_text, leave_text=\"\"):\n widget.bind(\"<Enter>\", lambda *args: variable.set(enter_text), \"+\")\n widget.bind(\"<Leave>\", lambda *args: variable.set(leave_text), \"+\")", "def handle_input(self, event):\n pass", "def keypress(self, event):\n events = {\n '1': lambda: self.slot.set(1),\n '2': lambda: self.slot.set(2),\n '6': lambda: self.digits.set(6),\n '8': lambda: self.digits.set(8),\n }\n try:\n events[event.keysym]()\n except KeyError:\n pass\n if event.keysym in ('1', '2', 'Return', 'Enter'):\n self.get_totp()\n self.root.wm_withdraw()", "def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())", "def keyPressEvent(self, e):\n super(PhyloVisApp, self).keyPressEvent(e)\n if e.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:\n if (self.stackedWidget.currentIndex() == 5):\n if (self.lStatisticStackedWidget.currentIndex() == 0):\n self.login(self.lStatPasswordLineEdit.text())", "def _on_enter(self):\n last_line_num = self.LineFromPosition(self.GetLength())\n current_line_num = self.LineFromPosition(self.GetCurrentPos())\n new_line_pos = (last_line_num - current_line_num)\n if self.debug:\n print >>sys.__stdout__, repr(self.input_buffer)\n self.write('\\n', refresh=False)\n # Under windows scintilla seems to be doing funny\n # stuff to the line returns here, but the getter for\n # input_buffer filters this out.\n if sys.platform == 'win32':\n self.input_buffer = self.input_buffer\n old_prompt_num = self.current_prompt_pos\n has_executed = PrefilterFrontEnd._on_enter(self,\n new_line_pos=new_line_pos)\n if old_prompt_num == self.current_prompt_pos:\n # No execution has happened\n self.GotoPos(self.GetLineEndPosition(current_line_num + 1))\n return has_executed", "def signal_from_subjects_pad(self, event):\n self.keyPressEvent(event)", "def _OnPressEnter4(self):\n\t self.epsg4.set( self.epsg4.get() )\n\t self.epsg4_entry.focus_set()\n\t self.epsg4_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg4.get())))", "def on_key_press(self, event):\n del event\n name = self.entry_var.get()\n self.update_list(name=name)", "def enter_message(self, message):\n self.selib.input_text(self.locator.message, message)", "def wait_for_enter(field_data=\"\"):\n try:\n input(f\"{field_data}\\n\" f\"Press the 'ENTER' key to continue\")\n except KeyboardInterrupt:\n pass", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in ('\\n','\\r'):\n ch = self.defaultButton[0].lower()\n \n if ch == self.yesMessage[0].lower():\n self.yesButton()\n elif ch == self.noMessage[0].lower():\n self.noButton()\n elif ch == 'c':\n self.cancelButton()\n \n return \"break\"", "def ev_KEYDOWN(self, event):", "def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key_Space:\n self.parent.keyPressEvent(event)\n else:\n QtWidgets.QLineEdit.keyPressEvent(self, event)", "def enter_press_log_show(self, event): # makes it so you can use enter instead of having to press the button\r\n if event.keycode == 13:\r\n self.show_game(self.game_number.get())", "def click_signal_from_subjects_pad(self, subject):\n sendEventSignal = pyqtSignal(QEvent)\n q = QKeyEvent(QEvent.KeyPress, Qt.Key_Enter, Qt.NoModifier, text=\"#subject#\" + subject)\n self.keyPressEvent(q)", "def _on_keyboard(self, instance, key, scancode, codepoint, modifiers, *args):\r\n # print(\"Keyboard pressed! {}, {}, {}, {}\".format(key, scancode, codepoint, modifiers))\r\n if codepoint == 's' and 'ctrl' in modifiers:\r\n toast('Search by Name, Ingredient, or Tag', 3)\r\n self.search_focus = True", "def keyPressEvent(self, event):\n self.Serial.send_keystroke(event.text())", "def enter_press_log_delete(self, event): # makes it so you can use enter instead of having to press the button\r\n if event.keycode == 13:\r\n self.delete_a_game(self.delete_id.get())", "def ev_textinput(self, event: TextInput) -> None:", "def key_press_event(self, event):\n pass", "def enter_notify_event(self, widget, event):\n enter_focus = self.t_.get('enter_focus', False)\n if enter_focus:\n # set focus on widget\n pass\n return self.make_callback('enter')", "def event(self, event):\n if event.type() == QtCore.QEvent.KeyPress and \\\n event.key() == QtCore.Qt.Key_Tab:\n self.unitGroup.completePartial()\n self.emit(QtCore.SIGNAL('currentChanged')) # update listView\n self.unitUpdate()\n return QtGui.QLineEdit.event(self, event)", "def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Left:\r\n self.senal.emit(\"R\")\r\n elif event.key() == Qt.Key_Right:\r\n self.senal.emit(\"L\")", "def print_entry(text):\n print \"Text entered: \\n '%s'\" % text", "def on_press_enter(self, event):\n del event\n index = self.list.curselection()\n item = self.curlistitems[index[0]]\n self.set_label(\"Waiting for decryption...\", \"yellow\")\n try:\n password = self.get_pass(item)\n except qpgpg.GPG.DecryptionException as error:\n self.unset_label()\n self.flash_label(error.message, \"red\", 2000)\n else:\n self.unset_label()\n clipboard_copy(password)\n self.destroy()", "def _settext(self, textEntered):\n if textEntered.strip() == '':\n textEntered=self.data['initialtext']\n self.entry.enterText(textEntered)\n else:\n if callable(self.data['callback']): self.data['callback'](textEntered)\n if self.data['autoexit'] and callable(self.data['exit']):\n # NOTE not safe to call here user callback...\n taskMgr.doMethodLater(.5, self.data['exit'], '_ntryxt')", "def press_enter(self):\n ActionChains(self.parent).send_keys(Keys.ENTER).perform()\n return self", "def editorCommand(self, cmd):\n if cmd == QsciScintilla.SCI_NEWLINE:\n cb = self.parent()\n hasEntry = cb.findText(self.text()) != -1\n if not hasEntry:\n if cb.insertPolicy() == QComboBox.InsertAtTop:\n cb.insertItem(0, self.text())\n else:\n cb.addItem(self.text())\n self.returnPressed.emit()\n elif cmd == QsciScintilla.SCI_CANCEL:\n self.escPressed.emit()", "def auto_complete(self, event):\n text = self.get_current()\n # print(event)\n if event.char == '(':\n text.insert('insert', ')')\n text.mark_set('insert', 'insert-1c')\n elif event.char == '{':\n text.insert('insert', '}')\n text.mark_set('insert', 'insert-1c')\n elif event.char == '[':\n text.insert('insert', ']')\n text.mark_set('insert', 'insert-1c')\n elif event.char == '\"':\n text.insert('insert', '\"')\n text.mark_set('insert', 'insert-1c')\n elif event.char == \"'\":\n text.insert('insert', \"'\")\n text.mark_set('insert', 'insert-1c')", "def handle_keypress(event):\n print(event.keysym)\n if event.keysym == 'Return':\n fahrenheit_to_celsius()", "def send_enter():\n sys.stdout.write('\\x0D') # send carriage return\n sys.stdout.flush()", "def append_cursor_enter_callback(self):", "def changer_rep():\r\n\troot = tkinter.Tk()\r\n\ttext=tkinter.Text(root, wrap = 'none')\r\n\ttext.insert('1.0', \"Indiquer le nom du nouveau répertoire : \\n\")\r\n\ttext.pack()\r\n\tv = tkinter.StringVar()\r\n\treq=tkinter.Entry(root, textvariable=v, validate='all')\t\r\n\treq.pack()\t\r\n\tvalidate = tkinter.Button(root, text='valider', command=root.quit)\r\n\tvalidate.pack()\r\n\troot.mainloop()\r\n\texecution(v.get())", "def on_buttonBox_accepted(self):\n if len(self.lineInput.text()) == 0:\n self.reject()\n else:\n self.input = self.lineInput.text() \n self.accept()", "def onKeyPress(self):\n ch = read(fd, 4)\n if ch == '\\033': # escape\n self.pause()\n elif '\\033' in ch:\n return\n elif '\\t' in ch: # tab\n return\n elif len(self.user_input) >= 80: # too long\n self.user_input[:80]\n return\n elif ch == '\\r': # return\n if self.user_input == \"\":\n return\n command = command_list.match(self.user_input)\n if not command:\n pass\n elif command.group(1):\n self._save(0)\n elif command.group(2):\n self._save()\n elif command.group(3):\n self._save(command.group(4))\n link = self.links.match(self.user_input.lower())\n if link:\n self.reset(link.group(0))\n self.user_input = \"\"\n self.locked += 1\n print '\\033[0m'\n print_loc(' '*80, self.y+5, self.x+2)\n #print_loc(' '*80, self.y+6, 0)\n self.locked -= 1\n elif ch == '\\x7f': # backspace\n if self.user_input == \"\":\n return\n self.user_input = self.user_input[:-1]\n elif ch == ' ': # space\n if self.user_input == \"\":\n return\n elif self.user_input[-1] == ' ':\n return\n self.user_input += ' '\n else: # all else\n self.user_input += ch\n self.locked += 1\n # Highlight valid user input\n if self.links.match(self.user_input.lower()):\n print '\\033[0;96;4m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n elif command_list.match(self.user_input):\n print '\\033[0;1;92m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n else:\n print '\\033[0m'\n # Display new user input line\n print_loc(self.user_input+'\\033[0;7m \\033[0m ', self.y + 5, self.x)\n self.locked -= 1", "def eventFilter(self, object_, event):\n\n if event.type() == QEvent.KeyPress:\n if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:\n return True\n\n return QWidget.eventFilter(self, object_, event)", "def do_keypress(self, str_arg):\n arg = validateString(str_arg)\n self.adbc.press(arg, \"DOWN_AND_UP\")", "def enter_text(self, text):\n self.q(css='#fixture input').fill(text)", "def on_input_returnPressed(self):\n self.intercept = True\n self.on_sendButton_clicked()", "def keyPressEvent(self, event):\n self.game_engine.input_manager.keyPressEvent(event)", "def ev_KEYUP(self, event):", "def ev_keyup(self, event: tcod.event.KeyUp) -> T | None:", "def _on_key_release(self, event):", "def test_enter(self):\n s = 'words'\n for ch in s:\n self.widget.keystrokeReceived(ch, None)\n self.painted = False\n self.widget.keystrokeReceived('\\r', None)\n self.failUnless(self.painted)\n self.assertEqual(self.lines, [s])\n self.assertEqual(self.widget.cursor, 0)\n self.assertEqual(self.widget.buffer, '')", "def parse_keypress(self, wid, event):\n\n keyname = Gdk.keyval_name(event.keyval)\n if keyname == \"Control_R\": # Key for query\n self.get_output()\n elif keyname == \"Page_Up\": # Goes to previous query\n tot = len(self.history)\n if -(self.prompt_cursor) != tot:\n self.prompt_cursor -= 1\n text = self.history[self.prompt_cursor]\n self.current_prompt.set_text(text)\n\n elif keyname == \"Page_Down\": # Drops to next query\n if (self.prompt_cursor) != -1:\n self.prompt_cursor += 1\n text = self.history[self.prompt_cursor]\n self.current_prompt.set_text(text)", "def enterEvent(self, event):\n self.parent().parent().setHelpText(self.help_text)", "def ev_keydown(self, event: KeyDown) -> None:", "def return_to_enclosing_type_prompt():\n raw_input(\"\\nPress enter to return to enclosing type: \")", "def keyPressEvent(self, event):\n if (event.key() in (Qt.Key_Return, Qt.Key_Enter)\n and int(event.modifiers()) == 0\n and self.currentIndex().isValid()\n and self.state() != QAbstractItemView.EditingState):\n self.activated.emit(self.currentIndex())\n return\n super(OSXItemActivationFix, self).keyPressEvent(event)", "def menu_saving(self, app: object, entry: str) -> None:\n while True:\n prod = self.cmd_products.get(entry)\n alt = app.search_alt(prod)\n sub = app.relevance(alt)\n print(\"-\" * 50)\n print(f\"\\nSubstitut trouvé pour le produit {prod} : {sub}\")\n entry = input(\n \"\\nVoulez vous enregistrer le substitut dans votre liste ? (y/n)\"\n )\n if entry == \"y\":\n feedback = app.insert_sub(prod, sub)\n print(feedback)\n self.back = True\n break\n elif entry == \"n\":\n self.back = True\n break\n else:\n print(\"\\nCommande incorrecte\")", "def enter(self):\n if self.pos < self.line_length():\n # If the position is not at the end of the line split the line\n self.buffer.split_line(self.line, self.pos)\n else:\n self.buffer.insert_line(\"\", self.line + 1)\n \n self.line += 1\n self.pos = 0\n self.has_changes = True", "def text(self, text, enter=True):\n self.ime.text(text)\n\n if enter:\n self.adb.shell_command(\"input keyevent ENTER\")", "def on_pre_enter(self, *args):\n self.ids['search'].text = ''\n self.filter()", "def handle_keyup(self, key, string):\r\n return app.App.handle_keyup(self, key, string)", "def keypress(self):\n k = self.__screen.getch()\n ret = None\n if k == curses.KEY_ENTER or (k < 256 and chr(k) == '\\n'):\n ret = self.__textPad.gather()\n self.__textWin.clear()\n else:\n self.__textPad.do_command(k)\n\n self.__update()\n return ret", "def toggle_codecompletion_enter(self, checked):\r\n self.shell.set_codecompletion_enter(checked)\r\n CONF.set(self.ID, 'autocompletion/enter-key', checked)", "def keyPressEvent(self, ev):\n ctrl_hold = self.app.queryKeyboardModifiers() == Qt.ControlModifier\n if ctrl_hold and (ev.key() in (Qt.Key_Left, Qt.Key_Right)):\n self.step_exponent += 1 if ev.key() == Qt.Key_Left else -1\n self.step_exponent = max(-self.decimals(), self.step_exponent)\n self.update_step_size()\n\n elif ev.key() in (Qt.Key_Return, Qt.Key_Enter):\n self.send_value()\n \n else:\n super(PyDMSpinbox, self).keyPressEvent(ev)", "def onKey(self,event):\n \n #@ << eliminate invalid characters >>\n #@+node:ekr.20031218072017.1989:<< eliminate invalid characters >>\n e = self.id_entry\n s = e.get().strip()\n i = 0 ; ok = True\n while i < len(s):\n ch = s[i]\n if ch not in string.ascii_letters and ch not in string.digits:\n e.delete(str(i))\n s = e.get()\n ok = False\n else:\n i += 1\n if not ok: return\n #@nonl\n #@-node:ekr.20031218072017.1989:<< eliminate invalid characters >>\n #@nl\n #@ << enable the ok button if there are 3 or more valid characters >>\n #@+node:ekr.20031218072017.1990:<< enable the ok button if there are 3 or more valid characters >>\n e = self.id_entry\n b = self.ok_button\n \n if len(e.get().strip()) >= 3:\n b.configure(state=\"normal\")\n else:\n b.configure(state=\"disabled\")\n #@nonl\n #@-node:ekr.20031218072017.1990:<< enable the ok button if there are 3 or more valid characters >>\n #@nl\n \n ch = event.char.lower()\n if ch in ('\\n','\\r'):\n self.onButton()\n return \"break\"", "def get_input():\n return getch()" ]
[ "0.7329176", "0.6851118", "0.683353", "0.6715554", "0.66356564", "0.6603124", "0.6601415", "0.6570257", "0.6491624", "0.6442124", "0.638937", "0.62842214", "0.62780654", "0.6265659", "0.6146514", "0.6146514", "0.61026675", "0.60870814", "0.60781586", "0.6066289", "0.6063572", "0.6052484", "0.603367", "0.60204387", "0.5975802", "0.5952541", "0.59155583", "0.59155583", "0.58916426", "0.5864707", "0.5831082", "0.58304", "0.5826363", "0.5803309", "0.5802661", "0.5801357", "0.5790736", "0.5773605", "0.5771615", "0.57681316", "0.5764238", "0.5751331", "0.57477283", "0.5745485", "0.57427025", "0.5742439", "0.57203335", "0.5711959", "0.5681756", "0.5674882", "0.56713945", "0.56663543", "0.5648629", "0.5629924", "0.56292474", "0.5613708", "0.5613453", "0.5611778", "0.5585979", "0.55856436", "0.5581233", "0.55683255", "0.55677676", "0.55427736", "0.5539762", "0.5531784", "0.5530541", "0.55252504", "0.5518096", "0.5514149", "0.5505267", "0.55012965", "0.54946935", "0.54918367", "0.5488333", "0.54757005", "0.5454801", "0.5447168", "0.5437105", "0.5429957", "0.54260594", "0.5410578", "0.5408803", "0.53994995", "0.5380388", "0.53795683", "0.53775716", "0.53774595", "0.5376289", "0.5370462", "0.536352", "0.53632236", "0.53547275", "0.5344134", "0.53400385", "0.5331878", "0.53269005", "0.5324736", "0.5322574", "0.5314046" ]
0.5933596
26
When the user modifies the content of either regex field.
def onTextChange(self, event): self.validateRegexFields(complete=False) event.Skip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_field(self, **kwargs):\n if self.regex:\n if not 'regex' in self.field_args:\n self.field_args = self.field_args + ('regex', )\n self.field_klass = forms.RegexField\n return super(StringSetting, self).to_field(**kwargs)", "def get_regex_mismatch_error_text(field_name, source_regex):\n\n\treturn(\"Value entered for '{0}' does not match regex '{1}'\"\n\t\t .format(field_name, source_regex.pattern))", "def set_regex_validator(self, field, regex):\n valexp = QtCore.QRegExp(regex)\n validator = QtGui.QRegExpValidator(valexp)\n field.setValidator(validator)", "def on_regex_search_toggle(self, event):\r\n\r\n if self.m_regex_search_checkbox.GetValue():\r\n update_autocomplete(self.m_searchfor_textbox, \"regex_search\")\r\n else:\r\n update_autocomplete(self.m_searchfor_textbox, \"literal_search\")\r\n event.Skip()", "def address_regex(self) -> Any:", "def _validate_field(self, value, regex):\n match = re.match(regex, value)\n\n if match:\n return value\n raise ValueError(_(\"Value %s does not match regex: %s\") %\n (value, regex))", "def validate_value(self, key, new_value): # pylint: disable=unused-argument\n\n if self.family == ExclFamily.network:\n ip_network(new_value)\n if self.family == ExclFamily.regex:\n try:\n re.compile(new_value)\n except re.error:\n raise ValueError('Invalid regex')\n\n return new_value", "def RegEx(self, regex):\n if len(regex) > 0:\n try:\n regexreplaced = regex.replace(\"%TARGET%\", self._target)\n self._regex = regexreplaced\n except AttributeError:\n regexreplaced = []\n for r in regex:\n regexreplaced.append(r.replace(\"%TARGET%\", self._target))\n self._regex = regexreplaced\n else:\n self._regex = \"\"", "def on_replacetextCombo_editTextChanged(self, text):\n self.__enableFindButton()", "def on_fileregex_toggle(self, event):\r\n\r\n if self.m_fileregex_checkbox.GetValue():\r\n update_autocomplete(self.m_filematch_textbox, \"regex_file_search\", default=[\".*\"])\r\n else:\r\n update_autocomplete(self.m_filematch_textbox, \"file_search\", default=[\"*?\"])\r\n event.Skip()", "def _config_regex(self):", "def validateRegexFields(self, complete=False):\n\n # Assume the patterns aren't valid.\n self.m_validFromRe = False\n self.m_validPatterns = False\n\n ### Validate the 'from' pattern\n #\n regexCtl = self.m_reFromCtl\n subsCtl = self.m_reToCtl\n\n regex, subs = regexCtl.Value, subsCtl.Value\n\n regColor, subColor = wx.NullColour, wx.NullColour\n\n if complete and regex:\n\n regColor = subColor = wx.BLUE\n try:\n re.sub(regex, subs, '')\n except re.error as e:\n subColor = wx.RED\n try:\n re.compile(regex)\n except re.error as e:\n regColor = wx.RED\n else:\n self.m_validFromRe = True\n else:\n self.m_validFromRe = True\n self.m_validPatterns = bool(subs)\n\n self.setTextColor(regexCtl, regColor)\n self.setTextColor(subsCtl, subColor)\n\n if complete:\n self.populateFileList()\n else:\n self.m_applyBtn.Enabled = False", "def _callback(self, matcher):\n matched_field = matcher.group(self.field)\n replacement = self.lookup.get(matched_field)\n if not replacement:\n return matcher.group(0)\n\n fields = list(f or \"\" for f in matcher.groups())\n fields[self.field - 1] = replacement\n\n return \"\".join(fields)", "def name_line_edit_changed(self, text):\n if re.findall(r\"[^a-zA-Z0-9\\-_ ]+\", text):\n self.name_line_edit.set_invalid(\"Invalid character\")\n else:\n if text == \"\":\n self.name_line_edit.set_invalid(\"Enter a name\")\n else:\n self.name_line_edit.set_valid()", "def matches(self, change):\n\n return False", "def regex_pattern(self):\n regex_to_match = input(\"Enter the regex pattern you'd like to use> \")\n return regex_to_match", "def _validate_fields(self, change_fields):\n pass", "def test_substitutions_with_regex_chars(self):\n m = strutils.MultiReplace({'cat.+': 'kedi', r'purple': 'mor', })\n self.assertEqual(m.sub('The cat.+ is purple'), 'The kedi is mor')", "def _validator_regex(self, field, value):\n try:\n re.compile(value)\n except re.error:\n self._error(field, \"{} is not a valid regex\".format(value))", "def updateFilterRegExp(self, regExp):\n self.logsView.updateFilterRegExp(regExp=regExp)", "def process_IN_MODIFY(self, event):", "def on_edit(self, event, text):\n return None", "def check_match_pattern(self):\n text = self.ui.plainTextEdit.toPlainText()\n pattern = self.ui.textPattern.text()\n result = re.search(pattern, text)\n group = int(self.ui.spinGroup.text())\n if result:\n self.ui.textMatch.setText(result.group(group))", "def on_origEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()\n self.__updateTranslateButton()", "def integrated_address_regex(self) -> Any:", "def on_edit_changed(self, edit):\n\t\tself.emit('value-changed', edit.get_text())", "def __call__(self, value):\n valid = True\n for regex in self.regexs:\n search = regex.search(value)\n valid = valid and ( search != None)\n if not valid or len(value) < self.min_length:\n raise ValidationError(self.message, code=self.code)", "def run(self):\n # If the change type doesn't match, do nothing.\n if not self.regex.match(self.chgtype): return 0\n\n # Perform the child actions.\n return super(FilterChgType, self).run()", "def allow_fieldtype_change(self, old_type: str, new_type: str) -> bool:\n\n\t\tdef in_field_group(group):\n\t\t\treturn (old_type in group) and (new_type in group)\n\n\t\treturn any(map(in_field_group, ALLOWED_FIELDTYPE_CHANGE))", "def on_filterEdit_textEdited(self, text):\n self.__enableFindButton()", "def test_regex_constraint(self):\n from petstore_api.model import apple\n\n # Test with valid regex pattern.\n inst = apple.Apple(\n cultivar=\"Akane\"\n )\n assert isinstance(inst, apple.Apple)\n\n inst = apple.Apple(\n cultivar=\"Golden Delicious\",\n origin=\"cHiLe\"\n )\n assert isinstance(inst, apple.Apple)\n\n # Test with invalid regex pattern.\n err_regex = r\"Invalid value `.+?`, must match regular expression `.+?` at \\('args\\[0\\]', 'cultivar'\\)\"\n with self.assertRaisesRegex(\n petstore_api.ApiValueError,\n err_regex\n ):\n inst = apple.Apple(\n cultivar=\"!@#%@$#Akane\"\n )\n\n err_regex = r\"Invalid value `.+?`, must match regular expression `.+?` at \\('args\\[0\\]', 'origin'\\)\"\n with self.assertRaisesRegex(\n petstore_api.ApiValueError,\n err_regex\n ):\n inst = apple.Apple(\n cultivar=\"Golden Delicious\",\n origin=\"!@#%@$#Chile\"\n )", "def prepare_regexps(self):\r\n print(\"Preparing regular expressions for this session.\")\r\n privmsg_parse = re.compile(\"\")", "def on_test_regex(self, event):\r\n\r\n self.m_regex_test_button.Enable(False)\r\n self.tester = RegexTestDialog(\r\n self,\r\n self.m_case_checkbox.GetValue(),\r\n self.m_dotmatch_checkbox.GetValue(),\r\n self.m_searchfor_textbox.GetValue()\r\n )\r\n self.tester.Show()", "def clean_text(df, text_field, new_text_field):\n df[new_text_field] = df[text_field].str.lower()\n df[new_text_field] = df[new_text_field].apply(\n lambda elem: re.sub(r\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)|^rt|http.+?\", \" \", elem)\n )\n return df", "def validate_regex(cls, value: str, field: ModelField) -> str:\n if isinstance(cls.regex, str):\n if not re.compile(cls.regex).match(value):\n raise InvalidPatternValue(field_name=field.name, pattern=cls.regex)\n return value", "def test_mutate_field(self):\n # Test adding a field\n with self.assertRaises(ValueError):\n self.email.add_field('', '')\n\n self.email.add_field(self.key, self.regex)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertTrue(found_key)\n self.assertEqual(found_regex, self.regex)\n\n # Test getting a field\n with self.assertRaises(LookupError):\n self.email.get_field('')\n\n field = self.email.get_field(self.key)\n self.assertEqual(\n field, {'key': self.key, 'regex': self.regex, 'value': []})\n\n # Test removing a field\n with self.assertRaises(LookupError):\n self.email.remove_field('')\n\n self.email.remove_field(self.key)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertFalse(found_key)\n self.assertNotEqual(found_regex, self.regex)", "def on_text(self, instance, value):\n if not EVENTS['IS_OBJ']:\n EVENTS['EDITOR_SAVED'] = False\n\n if value:\n self.valid_text = True\n EVENTS['IS_RAM_EMPTY'] = False\n else:\n self.valid_text = False", "def adaptable( item1, item2 ) :\n\n if( item2 is None ) : return( True )\n return re.fullmatch(item2, item1) is not None", "def _assign_regex(literal, regex):\n if regex:\n return regex.lower().strip()\n else:\n return r'\\b%s\\b'%literal.lower().strip()", "def updateRegexClass(self, label, regex):\n self._call_java('updateRegexClass', label, regex)\n return self", "def _re_sub_callback(match_object):\n return _replacement(match_object.group()[2:-1])", "def on_text_change(self, func):\n return self._observers.connect('change', lambda text: func(text))", "def process_regex_form_data(pattern, flags, text, methods, method):\n multi_match = ''\n single_match = ''\n flags = \"|\".join(flags)\n regex = eval('re.compile(r\"{}\", {})'.format(pattern, flags))\n # if the user fails to select a method it defaults to the re.match method\n if not method:\n match = regex.match(text)\n # else convert the selected method from a string to a regex object by\n # searching regex_method returned by the regex_methods function.\n else:\n match = methods[method](regex, text)\n # if a match is found ...\n if match is not None:\n # check if the method used is the \"re.findall\" or \"re.finditer\"\n # method as these do not support the match.group() method\n if method == 're.findall':\n multi_match = match\n elif method == 're.finditer':\n multi_match = [i.group() for i in match]\n else:\n single_match = match.group()\n return single_match, multi_match", "def on_columnvalue_modified( self, *data ):\n\t\tif (len(data) == 4):\t( cell, path, model, user_data ) = data\n\t\telse:\t\t\t( cell, path, new_text, model, user_data ) = data\n\t\t(datatype,) = user_data\n\t\tcolid = self.window2.type2colid[datatype]\n\t\tif \t(datatype == \"combo\"):\n\t\t\tmodel[path][colid] = new_text\n\t\telif \t(datatype == \"spin\"):\n\t\t\tmodel[path][colid] = long(new_text)\n\t\telif \t(datatype == \"text\"):\n\t\t\tmodel[path][colid] = new_text\n\t\telif \t(datatype == \"check\"):\n\t\t\tmodel[path][colid] = not model[path][colid]", "def _match_filter(self, meta, field):\r\n val = meta[field]\r\n if field in self.ignored_values:\r\n for pattern in self.ignored_values[field]:\r\n val = val.replace(pattern, '')\r\n return val", "def redacorator(func):\n def _replace(match):\n ori = match.group()\n text = match.group().strip().lower()\n return func(text, ori)\n return _replace", "def handleMatch(self, m):\r\n pass", "def register_handler(self, regex, handler):\n regex = re.compile(\"^\" + regex + \"$\")\n self.handlers.append((regex, handler))", "def on_findtextCombo_editTextChanged(self, text):\n self.__enableFindButton()", "def handle_modifier(self, mod):\n self.modifier += mod\n if not self.modifier.isdigit():\n self.modifier = str()", "def add_re(self,rexp):\n crexp=re.compile(rexp)\n self.rexps.append(crexp)", "def _source_matchpattern_field_string_is_valid_as_regex(self):\n if self.source_matchpattern is None:\n raise RuleError(\"'source_matchpattern' must be a valid regex.\")\n if not regex_is_valid(self.source_matchpattern):\n # print(f\"{self}\")\n raise SourceMatchpatternError(\n \"Value for 'source_matchpattern' must be a valid regex.\"\n )\n return True", "def change_all_field(edited_field, smali_file_list, class_landroid_java_over_list):\n for smali_file in smali_file_list: # For each file\n for smali_line in u.open_file_input(smali_file): # For each line\n if re.search(r'^([ ]*?)(((i|s)get(\\-)?)|((i|s)put(\\-)?))', smali_line) is not None: # If contains a field reference\n change_match_line(smali_line, edited_field, class_landroid_java_over_list)\n else:\n print smali_line, # Print the line unchanged", "def clean_fields(self, *args, **kwargs):\n if self.ipi_name:\n self.ipi_name = self.ipi_name.zfill(11)\n if self.ipi_base:\n self.ipi_base = self.ipi_base.replace(\".\", \"\").upper()\n self.ipi_base = re.sub(\n r\"(I).?(\\d{9}).?(\\d)\", r\"\\1-\\2-\\3\", self.ipi_base\n )\n return super().clean_fields(*args, **kwargs)", "def on_change(self, value):", "def test_handle_modifiers(self):\n # FormOverrideMixIn.handle_modifiers\n pass", "def validate_regexp(self, regexp):\n if (not self.ui.regexCheckBox.isChecked()) or regexp.size() == 0:\n self.ui.errorLabel.setText(\"\")\n\n self.regexp = QtCore.QRegExp(regexp,\n QtCore.Qt.CaseSensitive\n if self.ui.caseCheckBox.isChecked() else QtCore.Qt.CaseInsensitive)\n\n if self.regexp.isValid():\n self.show_error(\"\")\n else:\n self.show_error(unicode(regexp.errorString()))", "def modified_flag(self, event):\n text = self.get_current()\n text.modified = 1", "def test_filter_regex(re_arg, re_src, re_dest):\n args = parser.parse_args(['-re', *re_arg])\n filters = renamer.initfilters(args)\n dest = renamer.get_renames(re_src, filters, args.extension, args.raw)\n assert dest == re_dest", "def pre_search(self):\n self.update_status(\"Edit pattern filter\")\n self.patternEditor.show()", "def on_combobox2_changed(self, source=None, event=None):\n\t\tpattern = dict_filter[self.combobox2.get_model()[self.combobox2.get_active()][0]]\n\t\tif not pattern:\tself.treeview1.set_model(self.model1)\t\t# switch used model since one supports sorting only\n\t\telse:\t\tself.treeview1.set_model(self.modelfilter1)\t# and the other filtering only - none of them both\r\n\t\tself.treeview1.set_search_column(self.search_colid)\t\t# re-enable searching in 'URL' column\n\t\tself.modelfilter1.refilter()\t\t\t\t\t# apply filter conditions\n\t\tself.statusbar1.push(0, \"Other filter selected.\")", "def onHitEnterInFrom(self, event):\n\n self.validateRegexFields(complete=True)\n if self.m_validFromRe:\n self.m_reToCtl.SetFocus()", "def tok_full_regexp(self, case=False):\n\t\tre_str=\"\"\n\t\t\n\t\t# => cas normal : une seule chaîne dans self.xtexts\n\t\tif not self.multimode:\n\t\t\t# récup d'une seule chaîne échappée\n\t\t\tre_str = self.str_pre_regexp(self.xtexts)\n\t\t\n\t\t# => plusieurs chaînes matchables à alimenter avec:\n\t\t# - permuts de 2 elts + BLANK (DIY) quand\n\t\t# les XML n'ont pas préservé l'ordre\n\t\t# - listes de possibilité (à préparer avant)\n\t\t# quand variantes multiples\n\t\telif self.multimode:\n\t\t\talternatives = []\n\t\t\t# ex: ['nom prénom', 'prénom nom'] => /((?:nom\\W*prénom)|(?:prénom\\W*nom))/\n\t\t\t# ex: ['PP1-PP2', 'PP1-P2', 'PP1-2'] => /((?:PP1-PP2)|(?:PP1-P2)|(?:PP1-2))/\n\t\t\tfor single_text in self.xtexts:\n\t\t\t\t# pre_regexp ajoute les interpolations\n\t\t\t\t# INTERWORD et INTERCHAR pour ch. chaîne\n\t\t\t\tre_single = self.str_pre_regexp(single_text)\n\t\t\t\t\n\t\t\t\t# capsule \"non capturing\"\n\t\t\t\talternatives.append(\"(?:\"+re_single+\")\")\n\t\t\t\n\t\t\t# combi1 -OR- combi2... (using regex pipe)\n\t\t\tre_str = \"|\".join(alternatives)\n\t\t\n\t\t# enfin ajout de balises de capture extérieures\n\t\t# et compilation (en case insensitive sauf exceptions)\n\t\t# -----------------------------------------------------\n\t\t# 2 possibilités capture: en début ligne ou dans le milieu\n\t\t# mais alors pas à l'intérieur des renvois #(#..#)#\n\t\tif not case:\n\t\t\tmy_regexp_object = re.compile(\"(?:^(\"+re_str+\"))|(?:(?<!#\\(#)(\"+re_str+\"))\", re.IGNORECASE)\n\t\telse:\n\t\t\tmy_regexp_object = re.compile(\"(?:^(\"+re_str+\"))|(?:(?<!#\\(#)(\"+re_str+\"))\")\n\t\treturn my_regexp_object", "def _update_date_by_regexp(connection, regexp, new_value):\n\n request_skeleton = \"\"\"\n UPDATE custom_attribute_values AS cav JOIN\n custom_attribute_definitions AS cad ON\n cav.custom_attribute_id = cad.id\n SET cav.attribute_value = {new_value}\n WHERE cad.attribute_type = 'Date' AND\n cav.attribute_value REGEXP '{regexp}'\n \"\"\"\n connection.execute(request_skeleton.format(new_value=new_value,\n regexp=regexp))", "def test_exception_both(self):\n for word in ['pod', 'container']:\n pp.pod_or_container = word\n with self.assertRaisesRegex(Exception, \"in both left and right sides\"):\n pp.replace_type('<<pod 123|pod 321>>')", "def clean_fields(self, *args, **kwargs):\n if self.saan:\n self.saan = self.saan.upper() # only in CWR, uppercase anyway\n super().clean_fields(*args, **kwargs)", "def on_transEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()", "def entry_a_modified(self, content):\n if content.isdigit():\n self.model.number_a = int(content)\n self.show_calculations()", "def filter_addreplace(name, buffers, tags, regex):\n\n\tif filter_exists(name):\n\t\tfilter_del(name)\n\n\tweechat.command(weechat.buffer_search_main(), \"/mute filter add %s %s %s %s\" % (name, buffers, tags, regex))", "def test_match_right_regexp_to_none():\r\n runmatch(lcode)", "def on_edit(self, dataobj):", "def _validator_target(self, field, value):\n if not REG.match(value):\n self._error(field, \"{} is not a valid target\".format(value))", "def clean_new_password2(self):\n password1 = self.cleaned_data.get('new_password1')\n password2 = self.cleaned_data.get('new_password2')\n if password1 and password2:\n validate_password(password1, self.instance)\n if password1 != password2:\n self.add_error('new_password2',\n _(\"The two password fields didn't match.\"))\n else:\n self.change_password = True\n return password2", "def regexp(self, regexp):\n\n self._regexp = regexp", "def onHitEnterInTo(self, event):\n\n self.validateRegexFields(complete=True)\n if self.m_validPatterns:\n self.m_fileList.SetFocus()", "def change_match_type(self):\n self.term = None\n self.chars = None\n self.text.tag_remove('found', '1.0', tk.END)\n self.text.tag_remove('found.focus', '1.0', tk.END)", "def change_match_type(self):\n self.term = None\n self.chars = None\n self.text.tag_remove('found', '1.0', tk.END)\n self.text.tag_remove('found.focus', '1.0', tk.END)", "def on_dirregex_toggle(self, event):\r\n\r\n if self.m_dirregex_checkbox.GetValue():\r\n update_autocomplete(self.m_exclude_textbox, \"regex_folder_exclude\")\r\n else:\r\n update_autocomplete(self.m_exclude_textbox, \"folder_exclude\")\r\n event.Skip()", "def test_replace(self):\n\n regex = \"th\"\n expected = \"Hello, htis is Fun Ilrys. I just wanted to know how htings goes around hte tests.\" # pylint: disable=line-too-long\n actual = Regex(self.data, regex, replace_with=\"ht\").replace()\n\n self.assertEqual(expected, actual)\n\n # Test of the case that there is not replace_with\n regex = \"th\"\n expected = self.data\n actual = Regex(self.data, regex).replace()\n\n self.assertEqual(expected, actual)", "def on_filter_changed(self, text):\n\n self._filter.setFilterWildcard(text)\n self.update_label()", "def regexp_error_msg(self, regexp_error_msg):\n\n self._regexp_error_msg = regexp_error_msg", "def mongodb_str_filter(base_field, base_field_type):\n q = ''\n base_field = str(base_field)\n if base_field != '':\n if base_field_type == '1': # Equals\n q = base_field\n if base_field_type == '2': # Begins with\n q = {'$regex': str('^' + base_field)}\n if base_field_type == '3': # Contains\n q = {'$regex': str('.*' + base_field + '.*')}\n if base_field_type == '4': # Ends with\n q = {'$regex': str(base_field + '$')}\n return q", "def not_valid_before(self):", "def register_adapt_regex(self, regex):\n self.bus.emit(Message(\"register_vocab\", {'regex': regex}))", "def replace_typeval(self, combined, replacement):\n raise NotImplementedError(\"This is an abstract method.\")", "def changer_rep():\r\n\troot = tkinter.Tk()\r\n\ttext=tkinter.Text(root, wrap = 'none')\r\n\ttext.insert('1.0', \"Indiquer le nom du nouveau répertoire : \\n\")\r\n\ttext.pack()\r\n\tv = tkinter.StringVar()\r\n\treq=tkinter.Entry(root, textvariable=v, validate='all')\t\r\n\treq.pack()\t\r\n\tvalidate = tkinter.Button(root, text='valider', command=root.quit)\r\n\tvalidate.pack()\r\n\troot.mainloop()\r\n\texecution(v.get())", "def recept(self, text, *args, **kwargs):\n return text", "def update_reciprocal_method_value(self,**kwargs):\n for field,value in kwargs.items():\n locator = eda_lex_locators[\"eda_settings_relationships\"][\"dropdown_read\"].format(field)\n text = self.selenium.get_webelement(locator).text\n if not str(text).lower() == str(value).lower():\n self.eda.click_action_button_on_eda_settings_page(\"Edit\")\n locator_edit = eda_lex_locators[\"eda_settings_relationships\"][\"dropdown_value\"].format(field,value)\n self.selenium.wait_until_page_contains_element(locator_edit,\n error=f\"'{locator_edit}' is not available \")\n self.selenium.click_element(locator_edit)\n self.eda.click_action_button_on_eda_settings_page(\"Save\")", "def setRegex(self, regex, regexGrp, regexMatchIdx):\n\n # set raw regex\n self.rawRegex = regex\n\n # set regex group number and match index\n regexGrp = int(regexGrp)\n regexMatchIdx = int(regexMatchIdx)\n if 0 <= regexGrp and 0 <= regexMatchIdx:\n self.regexGrp = regexGrp\n self.regexMatchIdx = regexMatchIdx\n else:\n raise ValueError('invalid regex match options :: [ GROUP: { ' + str(regexGrp) + ' } || MATCH IDX: { ' + str(regexMatchIdx) + ' } ]')\n\n # compile raw regex\n self.compiledRegex = re.compile(regex)", "def clean_password2(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] == self.cleaned_data['password2']:\n return self.cleaned_data['password2']\n raise forms.ValidationError(_(u'You must type the same password each time'))", "def user_regexp(self, regexp, unix_user, hadoop_user=None):\n self.raw['type'] = 'REGEXP_RULE'\n self.raw['ruleFrom'] = regexp\n self.raw['targetUnix'] = unix_user\n self.raw['targetHadoop'] = hadoop_user\n return self", "def ignore(self, regex: str) -> None:\n self._processed |= set(self.find(regex))", "def test_exception_neither(self):\n for word in ['pod', 'container']:\n pp.pod_or_container = word\n with self.assertRaisesRegex(Exception, \"in either side\"):\n pp.replace_type('<<container 123|container 321>>')", "def FireModified(self):\n self.OnChanged(wx.stc.StyledTextEvent(wx.stc.wxEVT_STC_CHANGE,\n self.GetId()))", "def regex_replace_value(val, val_new, pattern,\n val_exception=np.nan):\n try:\n if not bool(re.match(pattern, val)):\n return val_new\n else:\n return val\n except:\n return val_exception", "def modify_text(self, mod_fn):\n self.hook = [mod_fn(item) if isinstance(item, text_type) else item.modify_text(mod_fn)\n for item in self.hook]\n return self", "def _annotate_re(a_re, a_ising, a_wght):\n for itok, _ in a_ising.iteritems():\n if a_re.search(itok):\n a_ising[itok][FXD_WGHT_IDX] = a_wght\n a_ising[itok][HAS_FXD_WGHT] = 1", "def regex(self) -> str:\n return pulumi.get(self, \"regex\")", "def regex(self) -> str:\n return pulumi.get(self, \"regex\")", "def regex(self) -> str:\n return pulumi.get(self, \"regex\")" ]
[ "0.59722745", "0.58232623", "0.5730046", "0.5724786", "0.5445473", "0.53800493", "0.53737646", "0.53433657", "0.53141534", "0.5252366", "0.52106875", "0.52057683", "0.5202676", "0.515042", "0.51475894", "0.51414645", "0.51303077", "0.5116394", "0.5103767", "0.50991905", "0.50948995", "0.5078652", "0.5069826", "0.5043185", "0.50295824", "0.5016066", "0.50088143", "0.49877837", "0.4964794", "0.49564743", "0.4956179", "0.4953519", "0.4953375", "0.49453166", "0.4935708", "0.49257624", "0.49153253", "0.49101076", "0.49097967", "0.48925796", "0.48914108", "0.4877242", "0.48754627", "0.48746225", "0.48693523", "0.48624846", "0.48441717", "0.48240718", "0.48072964", "0.4802516", "0.4801665", "0.4799613", "0.4783045", "0.4772938", "0.47699776", "0.4769749", "0.4769466", "0.47601816", "0.4745248", "0.47282964", "0.4727771", "0.47270536", "0.47242886", "0.47234204", "0.4720982", "0.47207937", "0.47199786", "0.47065106", "0.46896186", "0.46799493", "0.46728566", "0.46707225", "0.4664263", "0.46416542", "0.46402463", "0.46323726", "0.46323726", "0.4628135", "0.46180776", "0.46002665", "0.4598458", "0.4591509", "0.45907977", "0.45837277", "0.45810607", "0.45800978", "0.45691994", "0.45614225", "0.4560801", "0.45566255", "0.4554547", "0.45534968", "0.45511442", "0.4550706", "0.45475662", "0.4546359", "0.45451373", "0.45443854", "0.45443854", "0.45443854" ]
0.6521979
0
Assemble the option parser.
def initOpts(): option_list = [ make_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help=""), make_option("-l", "--licenses", action="store", type="string", dest="licenses_xml", help="Use the specified licenses file.", default="licenses.xml"), make_option("-o", "--output", action="store", type="string", dest="output_rdf", help="Write the RDF to the specified file.", default=""), ] usage = "%prog [-v] [-l licenses.xml] [-o output.rdf]" parser = OptionParser(usage=usage, version="%%prog %s" % __version__, option_list = option_list) return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_parser(cls, option_group, args, mkflag):", "def setup_parser(cls, option_group, args, mkflag):", "def build_parser(self, parser: ArgumentParser) -> None:", "def initCmdLineParser():\n\n # Init parser and all general flags\n logging.debug(\"initiating command line option parser\")\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n parser.add_option(\"--gen-answer-file\", help=\"Generate a template of an answer file, using this option excludes all other option\")\n parser.add_option(\"--answer-file\", help=\"Runs the configuration in none-interactive mode, extracting all information from the \\\n configuration file. using this option excludes all other option\")\n parser.add_option(\"--no-mem-check\", help=\"Disable minimum memory check\", action=\"store_true\", default=False)\n\n # For each group, create a group option\n for group in controller.getAllGroups():\n groupParser = OptionGroup(parser, group.getKey(\"DESCRIPTION\"))\n\n for param in group.getAllParams():\n cmdOption = param.getKey(\"CMD_OPTION\")\n paramUsage = param.getKey(\"USAGE\")\n optionsList = param.getKey(\"OPTION_LIST\")\n useDefault = param.getKey(\"USE_DEFAULT\")\n if not useDefault:\n if optionsList:\n groupParser.add_option(\"--%s\" % cmdOption, metavar=optionsList, help=paramUsage, choices=optionsList)\n else:\n groupParser.add_option(\"--%s\" % cmdOption, help=paramUsage)\n\n # Add group parser to main parser\n parser.add_option_group(groupParser)\n\n return parser", "def setup(self, optparser):\n\t\tpass", "def parsersetup():\n\n ver=\"Nimbus EPU Management %s - http://www.nimbusproject.org\" % EC_VERSION\n usage=\"epumgmt action [Arguments]\"\n parser = optparse.OptionParser(version=ver, usage=usage)\n\n # ---------------------------------------\n \n # Might be helpful to have more groups in the future.\n actions = ACTIONS().all_actions()\n deprecated_args = []\n other_args = []\n \n for arg in em_args.ALL_EC_ARGS_LIST:\n if arg.deprecated:\n deprecated_args.append(arg)\n else:\n other_args.append(arg)\n \n \n # ---------------------------------------\n actions_title = \" Actions\"\n arguments_title = \" Arguments\"\n deprecated_title = \" Deprecated\"\n # For each one, use twice length of the longest one:\n groupline = (len(2*deprecated_title)-1) * \"-\"\n\n\n # Actions\n actions_description = \", \".join(ACTIONS().all_actions())\n group = optparse.OptionGroup(parser, actions_title, actions_description)\n parser.add_option_group(group)\n\n \n # Arguments\n group = optparse.OptionGroup(parser, arguments_title, groupline)\n for arg in other_args:\n _add_option(group, arg)\n parser.add_option_group(group)\n\n \n # Deprecated Arguments\n if len(deprecated_args) > 0:\n group = optparse.OptionGroup(parser, grouptxt2, groupline)\n for arg in deprecated_args:\n _add_option(group, arg)\n parser.add_option_group(group)\n \n return parser", "def add_options(_parser):\n\n _parser.add_option(\"-y\", \"--year\",\n dest=\"year\", action=\"store\",\n help=\"Year for the merge\")\n\n _parser.add_option(\"-m\", \"--month\",\n dest=\"month\", action=\"store\",\n help=\"Month for the merge\")\n\n _parser.add_option(\"-d\", \"--day\",\n dest=\"day\", action=\"store\",\n help=\"Day for the merge\")\n\n _parser.add_option(\"-D\", \"--directory\",\n dest=\"directory\", action=\"store\",\n help=\"Directory containing files to merge\")\n\n _parser.add_option(\"-f\", \"--file\",\n dest=\"file\", action=\"store\",\n help=\"File containing list of input directories\")\n\n _parser.add_option(\"-w\", \"--window\",\n dest=\"window\", action=\"store\",\n help=\"Window in days (merge for the past *n* days\")\n\n _parser.add_option(\"-l\", \"--lookback\",\n dest=\"lookback\", action=\"store\",\n help=\"Lookback period (merge for 1 day *n* days prior)\")\n\n _parser.add_option(\"-t\", \"--topic\",\n dest=\"topic\", action=\"store\",\n help=\"Topic for the merge\")\n\n _parser.add_option(\"-i\", \"--input-prefix\",\n dest=\"input_prefix\", action=\"store\",\n help=\"Input directory prefix\")\n\n _parser.add_option(\"-o\", \"--output-prefix\",\n dest=\"output_prefix\", action=\"store\",\n help=\"Output directory prefix\")\n\n _parser.add_option(\"-n\", \"--num-reducers\",\n dest=\"num_reducers\", action=\"store\",\n help=\"Number of reducers\")\n\n _parser.add_option(\"-c\", \"--codec\",\n dest=\"codec\", action=\"store\",\n help=\"Compression codec to use\")\n\n _parser.add_option(\"-q\", \"--queue\",\n dest=\"queue\", action=\"store\",\n help=\"Mapreduce job queue\")\n\n _parser.add_option(\"-r\", \"--dry-run\",\n dest=\"dry_run\", action=\"store_true\", default=False,\n help=\"Dry run; create, but dont execute the Pig script\")", "def setup_options_parser(self, argparser):\n pass", "def init_parser():\n parser = OptionParser()\n parser.add_option(\"-n\", \"--interactive\", action=\"store_true\", help=\"run in interactive (non-daemon) mode\")\n parser.add_option(\"-r\", \"--run\", action=\"store_true\", help=\"starts process identified by -app parameter\")\n parser.add_option(\"-k\", \"--kill\", action=\"store_true\", help=\"kill process identified by -app parameter\")\n parser.add_option(\"-a\", \"--app\", action=\"store\", help=\"application to start (process name)\")\n parser.add_option(\"-q\", \"--query\", action=\"store_true\", help=\"query application's state\")\n parser.add_option(\"-i\", \"--install_ve\", action=\"store_true\", help=\"install a virtualenv for the runtime to use\")\n parser.add_option(\"-s\", \"--shell\", action=\"store_true\", help=\"run an ipython shell within the virtualenv\")\n parser.add_option(\"-t\", \"--tests\", action=\"store_true\", help=\"run tests\")\n parser.add_option(\"-x\", \"--xunit\", action=\"store_true\", help=\"run tests with coverage and xunit output for Jenkins\")\n parser.add_option(\"-z\", \"--analyze\", action=\"store_true\", help=\"run pylint on project\")\n parser.add_option(\"-l\", \"--list\", action=\"store_true\", help=\"list available applications\")\n parser.add_option(\"-o\", \"--outfile\", action=\"store\", help=\"save results from a report to a file\")\n return parser", "def buildOptions(self, parser):\n parser.add_option('--showprocs',\n dest='showprocs',\n action=\"store_true\",\n default=False,\n help=\"Show the list of processes found.\"\\\n \"For debugging purposes only.\")\n parser.add_option('--showrawtables',\n dest='showrawtables',\n action=\"store_true\",\n default=False,\n help=\"Show the raw SNMP processes data returned \"\\\n \"from the device. For debugging purposes only.\")\n parser.add_option('--captureFilePrefix', dest='captureFilePrefix',\n default='',\n help=\"Directory and filename to use as a template\"\n \" to store SNMP results from device.\")", "def parse_options():\n global parser\n parser.add_option(\"-r\", \"--regions\", dest=\"input_brain_regions\",\n help=\"Input file for brain region data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-g\", \"--gray\", dest=\"input_gray_levels\",\n help=\"Input file for gray level data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-n\", \"--nissl\", dest=\"input_nissl\",\n help=\"Input file for nissl data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-o\", \"--output\", dest=\"output_folder_path\",\n help=\"Output folder for extracted data files\",\n action=\"store\", type='string')\n\n return parser.parse_args()", "def define_option_parser():\n from optparse import OptionParser\n parser = OptionParser(__doc__)\n parser.add_option('-r', '--record_structure', action=\"store_true\", default=False, \n help='Show the record structures (for example gene->mRNA->CDS/UTR). Default %default') \n parser.add_option('-R', '--no_record_structure', action=\"store_false\", dest='record_structure')\n\n parser.add_option('-c', '--feature_type_counts', action=\"store_true\", default=True, \n help='Count the number of feature types in file (gene, mRNA, exon, etc). Default %default') \n parser.add_option('-C', '--no_feature_type_counts', action=\"store_false\", dest='feature_type_counts')\n\n parser.add_option('-g', '--gene_counts', action=\"store_true\", default=False, \n help=\"Count genes per chromosome, and the approximate fraction of each chromosome covered by genes. \"\n +\"Default %default\") \n parser.add_option('-G', '--no_gene_counts', action=\"store_false\", dest='gene_counts')\n parser.add_option('-a', '--fasta_sequence_file', default='', metavar='FILE', \n help=\"Fasta file containing the sequences listed in gff infile (default %default).\")\n parser.add_option('-d', '--print_seq_details', action=\"store_true\", default=False, \n help='Print full GFF details for each chromosome (only if -g). Default %default') \n parser.add_option('-D', '--no_print_seq_details', action=\"store_false\", dest='print_seq_details')\n\n parser.add_option('-o', '--check_gene_overlaps', action=\"store_true\", default=True, \n help='Check for overlapping genes, distances, ID uniqueness, etc; count genes. Default %default') \n parser.add_option('-O', '--no_check_gene_overlaps', action=\"store_false\", dest='check_gene_overlaps')\n\n parser.add_option('-f', '--gene_feature_structure_counts', action=\"store_true\", default=True, \n help='Give gene counts by UTR/exon count/order; check feature distances/overlaps. Default %default') \n parser.add_option('-F','--no_gene_feature_structure_counts', action=\"store_false\",dest='gene_feature_structure_counts')\n parser.add_option('-u', '--full_feature_structures', action=\"store_true\", default=False, \n help='With -f option, show full as well as simplified feature structures. Default %default') \n parser.add_option('-U','--no_full_feature_structures', action=\"store_false\",dest='full_feature_structures')\n parser.add_option('-n', '--genes_to_display', type=\"int\", default=5, metavar='N', \n help=\"When showing gene counts per group (-f), show N example genes (-1: all) (default %default).\")\n parser.add_option('-e', '--exon_number_cutoff', type=\"int\", default=30, metavar='N', \n help=\"When categorizing genes by exon number, lump together all above N (default %default).\")\n parser.add_option('-Y', '--N_detail_run_groups', type=\"int\", default=5, metavar='N', \n help=\"How many passes to split reading the file into with -f option (default %default) \"\n +\"- may take a lot of memory (and CPU) if read in a single pass; too many passes waste CPU.\")\n\n parser.add_option('-s', '--source_counts', action=\"store_true\", default=False, \n help='Count the features by source (not very useful unless file is mixed-source). Default %default') \n parser.add_option('-S', '--no_source_counts', action=\"store_false\", dest='source_counts')\n\n parser.add_option('-l', '--all_gff_limits', action=\"store_true\", default=False, \n help='Output all feature counts: by type, source (-cs), chromosome, maybe other? Default %default')\n parser.add_option('-L', '--no_all_gff_limits', action=\"store_false\", dest='all_gff_limits')\n\n parser.add_option('-E', '--everything', action='store_true', default=False, \n help=\"Examine the infile in ALL implemented ways (turn on all the True/False options).\")\n\n parser.add_option('-t','--test_functionality', action='store_true', default=False, \n help=\"Run the built-in unit test suite (ignores all other options/arguments; default %default).\")\n parser.add_option('-T','--test_run', action='store_true', default=False, \n help=\"Run on test input file, check output against reference. Ignores all other options/arguments.\")\n\n return parser", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name of charm or bundle\")\n parser.add_argument(\n \"-r\",\n \"--revision\",\n type=SingleOptionEnsurer(int),\n required=True,\n help=\"The revision to release\",\n )\n parser.add_argument(\n \"-c\",\n \"--channel\",\n action=\"append\",\n required=True,\n help=\"The channel(s) to release to (this option can be indicated multiple times)\",\n )\n parser.add_argument(\n \"--resource\",\n action=\"append\",\n type=ResourceOption(),\n default=[],\n help=(\n \"The resource(s) to attach to the release, in the <name>:<revision> format \"\n \"(this option can be indicated multiple times)\"\n ),\n )", "def initCmdLineParser():\n\n # Init parser and all general flags\n usage = \"usage: %prog [options] [--help]\"\n parser = OptionParser(usage=usage, version=\"0.1\")\n\n parser.add_option(\"-d\", \"--daemon\", action=\"store_true\", default=False, help=\"daemon mode\")\n parser.add_option(\"-c\", \"--config\", help=\"install config file\", default = 'test.conf')\n parser.add_option(\"-D\", \"--debug\", action=\"store_true\", help=\"debug mode\", default = False)\n\n parser.add_option(\"-a\", \"--add\", action=\"store_true\", help=\"add node to cluster\", default = False)\n parser.add_option(\"-p\", \"--port\", help= \"http server port\", default = '8999')\n\n\n return parser", "def MakeOpts():\n opt_parser = OptionParser()\n opt_parser.add_option(\"-s\", \"--thermodynamics_source\",\n dest=\"thermodynamics_source\",\n type=\"choice\",\n choices=['observed_only',\n 'hatzi_only',\n 'milo_only',\n 'milo_merged'],\n default=\"milo_merged\",\n help=\"The thermodynamic data to use\")\n opt_parser.add_option(\"-k\", \"--kegg_database_location\", \n dest=\"kegg_db_filename\",\n default=\"../data/public_data.sqlite\",\n help=\"The KEGG database location\")\n opt_parser.add_option(\"-d\", \"--database_location\", \n dest=\"db_filename\",\n default=\"../res/gibbs.sqlite\",\n help=\"The Thermodynamic database location\")\n opt_parser.add_option(\"-t\", \"--thermodynamics_filename\",\n dest=\"thermodynamics_filename\",\n default='../data/thermodynamics/dG0.csv',\n help=\"The name of the thermodynamics file to load.\")\n opt_parser.add_option(\"-i\", \"--input_filename\",\n dest=\"input_filename\",\n default=\"../data/thermodynamics/pathways.txt\",\n help=\"The file to read for pathways to analyze.\")\n opt_parser.add_option(\"-o\", \"--output_filename\",\n dest=\"output_filename\",\n default='../res/thermo_comparison/report.html',\n help=\"Where to write output to.\")\n return opt_parser", "def add_options(cls, parser):\n pass", "def create_option_parser():\n from optparse import OptionParser\n usage='Usage: %prog [<options>] <bilingual file> <language tag 1> <language tag 2>'\n parser = OptionParser(usage=usage)\n\n parser.add_option(\n '-u', '--create-tuning',\n dest='tuning',\n help='Specify percentage of corpus to be used for tuning corpus.',\n default=0\n )\n parser.add_option(\n '-e', '--create-evaluation',\n dest='eval',\n help='Specify percentage of corpus to be used for tuning corpus.',\n default=0\n )\n return parser", "def setup_parser(self, parser, args):\r\n\r\n pass", "def make_cli_parser(self):\n super(ContextualArgParser, self).make_cli_parser()\n self.cli_parser.add_option('--num-permutations', type='int',\n default=cbpn.NUM_PERMUTATIONS,\n help=(\"number of permutations for statistics \"\n \"[default: %default]\")\n )\n self.cli_parser.add_option('-s', '--edge-swaps', type='int',\n help=(\"Perform the given number of edge swaps to \"\n \"produce random graphs. [NOTE: using this option \"\n \"changes the algorithm for determining \"\n \"significance of a link between each given pair \"\n \"of terms.]\"\n )\n )\n self.cli_parser.add_option('--no-estimation', dest='estimate',\n action='store_false', default=True,\n help=(\"Do not use p-value estimation, but run the \"\n \"full number of permutations for every pair of \"\n \"annotation terms. [NOTE: this can substantially \"\n \"increase running time.]\"\n )\n )\n self.cli_parser.add_option('--score-correction',\n action='store_true', default=False,\n help=(\"Correct scores for each pair of terms by an \"\n \"\\\"expected\\\" value calculated from the mean \"\n \"expression value.\"\n )\n )", "def declare_opts(self):\n self.parser = ArgumentParser(description=DESCRIPTION,\n prog=self.info.get('prog'),\n epilog='Happy Listening',\n formatter_class=RawDescriptionHelpFormatter,\n )\n self.parser.add_argument('--version', action='version',\n version='%(prog)s {version}'.format(**self.info))\n # Add all options declare in OPTS\n for opt in OPTS:\n opt_names = opt.pop('sw')\n self.parser.add_argument(*opt_names, **opt)\n # Add sub commands\n spa = self.parser.add_subparsers(\n title=f'{self.info[\"prog\"]} commands as positional arguments',\n description=f\"\"\"Use them after optionnal arguments.\\n\"{self.info[\"prog\"]} command -h\" for more info.\"\"\",\n metavar='', dest='command')\n for cmd in CMDS:\n helpmsg = cmd.pop('help')\n cmd, args = cmd.popitem()\n _ = spa.add_parser(cmd, description=helpmsg, help=helpmsg)\n for arg in args:\n name = arg.pop('name', None)\n if name:\n _.add_argument(name, **arg)", "def setupParserOptions(self):\n\t\treturn", "def _parse_args():\n parser = optparse.OptionParser(usage=\"\", description=\"\")\n parser.add_option(\"-o\", dest=\"outfile\", default=None, help=\"File to write to\")\n parser.add_option(\"-w\", dest=\"write_format\", default=\"pidgin\", help=\"Write format. [default: %default]\")\n parser.add_option(\"-r\", dest=\"read_format\", default=\"adium\", help=\"Read format. [default: %default]\")\n\n return parser.parse_args()", "def cmd_line_parser():\n usage = \"usage: %prog [options]\\n\"\n opt_parser = OptionParser(usage=usage)\n opt_parser.add_option(\"--ai\", action=\"store\", dest=\"alternative_input\",\n help=\"an alternative input file (works only with load_from_pickle)\")\n opt_parser.add_option(\"--dl\", action=\"store\", dest=\"dumped_lexicon\",\n help=\"a dumped lexicon file (works only with load_from_pickle\")\n opt_parser.add_option(\"--dotest\", action=\"store_true\", dest=\"dotest\", default=False,\n help=\"use this flag if you want to apply testing\")\n opt_parser.add_option(\"-t\", action=\"store\", dest=\"test_parses\",\n help=\"the output file for the test parses\")\n opt_parser.add_option(\"-n\", action=\"store\", dest=\"train_parses\",\n help=\"the output file for the train parses\")\n opt_parser.add_option(\"-i\", dest=\"inp_file\", default=\"trainFiles/trainPairs\",\n help=\"the input file names (with the annotated corpus)\")\n opt_parser.add_option(\"--devel\", dest=\"development_mode\", default=False, action=\"store_true\",\n help=\"development mode\")\n\n return opt_parser", "def add_options(self, parser):\n pass", "def setup_parser(self, parser):", "def parser(cls, *args, **kwargs):\n\n parser = ArgumentParser(*args, **kwargs)\n parser.add_argument('-a', \"--address\",\n help=\"Force entry point address\", default=None)\n parser.add_argument('-b', \"--dumpblocs\", action=\"store_true\",\n help=\"Log disasm blocks\")\n parser.add_argument('-z', \"--singlestep\", action=\"store_true\",\n help=\"Log single step\")\n parser.add_argument('-d', \"--debugging\", action=\"store_true\",\n help=\"Debug shell\")\n parser.add_argument('-g', \"--gdbserver\", type=int,\n help=\"Listen on port @port\")\n parser.add_argument(\"-j\", \"--jitter\",\n help=\"Jitter engine. Possible values are: gcc (default), tcc, llvm, python\",\n default=\"gcc\")\n parser.add_argument(\n '-q', \"--quiet-function-calls\", action=\"store_true\",\n help=\"Don't log function calls\")\n parser.add_argument('-i', \"--dependencies\", action=\"store_true\",\n help=\"Load PE and its dependencies\")\n\n for base_cls in cls._classes_():\n base_cls.update_parser(parser)\n return parser", "def parseOptions(self):\n\n\t\tparser = OptionParser()\n parser.add_option(\n \"-u\",\n \"--user\",\n dest=\"user\",\n help=\"enter a user or 'all'\"\n )\n\n parser.add_option(\n \"-p\",\n \"--projects\",\n dest=\"projects\",\n help=\"enter a project or 'all'\"\n )\n (self.options, self.args) = parser.parse_args()", "def build_parser(self, add_help=True):\n self.parser = argparse.ArgumentParser(\n description=self.description, add_help=add_help\n )\n self.parser.prog = f\"python -m {self.package}.{self.module_name}\"\n self.parser.add_argument(\n \"config_file\", help=\"Path/name of YAML configuration file for NEMO nowcast.\"\n )", "def setup_options_parser(self):\n self.parser = argparse.ArgumentParser()\n subparser = self.parser.add_subparsers(dest='noun')\n for (name, noun) in self.nouns.items():\n noun_parser = subparser.add_parser(name, help=noun.help)\n noun.internal_setup_options_parser(noun_parser)", "def fill_parser(self, parser):\n parser.add_argument(\"name\", help=\"The name of the library file (e.g. 'db')\")", "def generate_parser():\n description = \"%(prog)s -- Data handling, normalization, manipulation, and plotting for HiC and 5C experimental data\"\n epilog = \"For command line options of each command, type: %(prog)s <COMMAND> -h\"\n parser = ap.ArgumentParser(description=description, epilog=epilog)\n parser.add_argument(\"--version\", action=\"version\", version=\"%(prog)s %(version_num)s\" % {'prog':parser.prog, 'version_num':VERSION})\n subparsers = parser.add_subparsers(dest='subcommand')\n\n add_connect_subparser(subparsers)\n add_fragments_subparser(subparsers)\n add_fivecdataset_subparser(subparsers)\n add_fivecproject_subparser(subparsers)\n add_fivecnormalize_subparser(subparsers)\n add_complete_fivec_subparser(subparsers)\n add_fivec_heatmap_subparser(subparsers)\n add_fivec_interval_subparser(subparsers)\n add_fivec_combine_replicates_subparser(subparsers)\n add_fends_subparser(subparsers)\n add_hicdataset_subparser(subparsers)\n add_hicproject_subparser(subparsers)\n add_hicnormalize_subparser(subparsers)\n add_complete_hic_subparser(subparsers)\n add_hic_heatmap_subparser(subparsers)\n add_hic_mrheatmap_subparser(subparsers)\n add_hic_interval_subparser(subparsers)\n add_hic_combine_replicates_subparser(subparsers)\n add_quasar_subparser(subparsers)\n return parser", "def define_options(parser=None, usage=None, conflict_handler='resolve'):\n if parser is None:\n parser = argparse.ArgumentParser(usage=usage, conflict_handler=conflict_handler)\n\n parser.add_argument('-i', '--instrument', type=str, default=None, choices=['niriss', 'nircam', 'nirspec', 'miri', 'fgs'], help='Instrument. (default=%(default)s)')\n parser.add_argument('-p', '--prev_or_thumb', type=str, default=None, choices=['p', 't'], help='Work on preview images (p) or thumbnails (t)')\n parser.add_argument('-s', '--str_to_exclude', type=str, help='String controlling which entries are removed.')\n return parser", "def buildOptions(self, parser):\n # Not used for now\n parser.add_option('--rate',\n dest='event_rate',\n default=10.0,\n type='float',\n help='Events per second to publish to zenhub')", "def init_parser():\n parser = OptionParser()\n\n parser.add_option(\n \"-d\",\n \"--debug\",\n dest=\"debug\",\n help=\"Toggle debugging\",\n action=\"store_true\",\n default=False,\n )\n\n parser.add_option(\n \"-f\",\n \"--questions-file\",\n dest=\"file\",\n help=(\"Use this file instead of the default \"\n \"questions.yaml\"),\n metavar=\"FILE\",\n )\n\n parser.add_option(\n \"-p\",\n \"--generate-pdf\",\n dest=\"pdf\",\n help=(\"Generate the speaker PDF\"),\n action=\"store_true\",\n default=False,\n )\n\n parser.add_option(\n \"-v\",\n \"--version\",\n dest=\"version\",\n help=\"Show program version\",\n action=\"store_true\",\n default=False,\n )\n\n options = parser.parse_args()[0]\n return options", "def parser_setup():\n ap = argparse.ArgumentParser(description=__doc__)\n ap.add_argument(\"-c\", \"--config-dir\", default=\".\",\n help=\"Configuration directory. Contains YAML configuration\"\n \"files.\")\n ap.add_argument(\"-v\", \"--verbose\", action=\"count\", default=1,\n help=\"Print copious debugging info.\")\n ap.add_argument(\"-q\", \"--quiet\", action=\"count\", default=0,\n help=\"Suppress output. -qq to suppress ALL output.\")\n ap.add_argument(\"-p\", \"--profile\", default=\"all\",\n help=\"Dashboard profile to load from dashdef.yml\")\n ap.add_argument(metavar=\"HOST\", nargs=\"*\", dest=\"host_globs\",\n help=\"Host glob.\")\n return ap", "def options(self, parser):\n pass", "def gather_options(self):\n if not self.initialized: # check if it has been initialized\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser = self.initialize(parser)\n\n # get the basic configs\n opt, _ = parser.parse_known_args()\n\n # save and return the parser\n self.parser = parser\n return parser.parse_args()", "def prepare_optparser ():\n usage = \"\"\"usage: %prog <-t tfile> [-n name] [-g genomesize] [options]\n\nExample: %prog -t ChIP.bam -c Control.bam -f BAM -g hs -n test -B -q 0.01\n\nor example for broad peak calling: %prog -t ChIP.bam -c Control.bam --broad -g hs\n\n\"\"\"\n description = \"%prog -- Model-based Analysis for ChIP-Sequencing\"\n\n optparser = OptionParser(version=\"%prog \"+MACS_VERSION,description=description,usage=usage,add_help_option=False)\n optparser.add_option(\"-h\",\"--help\",action=\"help\",help=\"show this help message and exit.\")\n optparser.add_option(\"-t\",\"--treatment\",dest=\"tfile\",type=\"string\",\n help=\"ChIP-seq treatment file. REQUIRED.\")\n optparser.add_option(\"-c\",\"--control\",dest=\"cfile\",type=\"string\",\n help=\"Control file.\")\n optparser.add_option(\"-n\",\"--name\",dest=\"name\",type=\"string\",\n help=\"Experiment name, which will be used to generate output file names. DEFAULT: \\\"NA\\\"\",\n default=\"NA\")\n optparser.add_option(\"-f\",\"--format\",dest=\"format\",type=\"string\",\n help=\"Format of tag file, \\\"AUTO\\\", \\\"BED\\\" or \\\"ELAND\\\" or \\\"ELANDMULTI\\\" or \\\"ELANDEXPORT\\\" or \\\"SAM\\\" or \\\"BAM\\\" or \\\"BOWTIE\\\". The default AUTO option will let MACS decide which format the file is. Please check the definition in 00README file if you choose ELAND/ELANDMULTI/ELANDEXPORT/SAM/BAM/BOWTIE. DEFAULT: \\\"AUTO\\\"\",\n default=\"AUTO\")\n optparser.add_option(\"-g\",\"--gsize\",dest=\"gsize\",type=\"string\",default=\"hs\",\n help=\"Effective genome size. It can be 1.0e+9 or 1000000000, or shortcuts:'hs' for human (2.7e9), 'mm' for mouse (1.87e9), 'ce' for C. elegans (9e7) and 'dm' for fruitfly (1.2e8), Default:hs\")\n optparser.add_option(\"-s\",\"--tsize\",dest=\"tsize\",type=\"int\",default=None,\n help=\"Tag size. This will overide the auto detected tag size. DEFAULT: Not set\")\n optparser.add_option(\"--bw\",dest=\"bw\",type=\"int\",default=300,\n help=\"Band width. This value is only used while building the shifting model. DEFAULT: 300\")\n optparser.add_option(\"-q\",\"--qvalue\",dest=\"qvalue\",type=\"float\",default=0.05,\n help=\"Minimum FDR (q-value) cutoff for peak detection. DEFAULT: 0.05 \")\n optparser.add_option(\"-p\",\"--pvalue\",dest=\"pvalue\",type=\"float\",\n help=\"Pvalue cutoff for peak detection. When set (e.g. -p 0.05 or -p 1e-5), qvalue cutoff will be ignored. By default it's not set.\")\n optparser.add_option(\"-m\",\"--mfold\",dest=\"mfold\",type=\"string\",default=\"5,50\",\n help=\"Select the regions within MFOLD range of high-confidence enrichment ratio against background to build model. The regions must be lower than upper limit, and higher than the lower limit. DEFAULT:5,50\")\n optparser.add_option(\"--nolambda\",dest=\"nolambda\",action=\"store_true\",\n help=\"If True, MACS will use fixed background lambda as local lambda for every peak region. Normally, MACS calculates a dynamic local lambda to reflect the local bias due to potential chromatin structure. \",\n default=False)\n optparser.add_option(\"--slocal\",dest=\"smalllocal\",type=\"int\",default=1000,\n help=\"The small nearby region in basepairs to calculate dynamic lambda. This is used to capture the bias near the peak summit region. Invalid if there is no control data. If you set this to 0, MACS will skip slocal lambda calculation. *Note* that MACS will always perform a d-size local lambda calculation. The final local bias should be the maximum of the lambda value from d, slocal, and llocal size windows. DEFAULT: 1000 \")\n optparser.add_option(\"--llocal\",dest=\"largelocal\",type=\"int\",default=10000,\n help=\"The large nearby region in basepairs to calculate dynamic lambda. This is used to capture the surround bias. If you set this to 0, MACS will skip llocal lambda calculation. *Note* that MACS will always perform a d-size local lambda calculation. The final local bias should be the maximum of the lambda value from d, slocal, and llocal size windows. DEFAULT: 10000.\")\n optparser.add_option(\"--fix-bimodal\",dest=\"onauto\",action=\"store_true\",\n help=\"Whether turn on the auto pair model process. If set, when MACS failed to build paired model, it will use the nomodel settings, the '--shiftsize' parameter to shift and extend each tags. Not to use this automate fixation is a default behavior now. DEFAULT: False\",\n default=False)\n optparser.add_option(\"--nomodel\",dest=\"nomodel\",action=\"store_true\",\n help=\"Whether or not to build the shifting model. If True, MACS will not build model. by default it means shifting size = 100, try to set shiftsize to change it. DEFAULT: False\",\n default=False)\n optparser.add_option(\"--shiftsize\",dest=\"shiftsize\",type=\"int\",default=100,\n help=\"The arbitrary shift size in bp. When nomodel is true, MACS will use this value as 1/2 of fragment size. DEFAULT: 100 \")\n optparser.add_option(\"--keep-dup\",dest=\"keepduplicates\",type=\"string\",default=\"auto\",\n help=\"It controls the MACS behavior towards duplicate tags at the exact same location -- the same coordination and the same strand. The default 'auto' option makes MACS calculate the maximum tags at the exact same location based on binomal distribution using 1e-5 as pvalue cutoff; and the 'all' option keeps every tags. If an integer is given, at most this number of tags will be kept at the same location. Default: auto\")\n optparser.add_option(\"--to-large\",dest=\"tolarge\",action=\"store_true\",default=False,\n help=\"When set, scale the small sample up to the bigger sample. By default, the bigger dataset will be scaled down towards the smaller dataset, which will lead to smaller p/qvalues and more specific results. Keep in mind that scaling down will bring down background noise more. DEFAULT: False\")\n optparser.add_option(\"--down-sample\",dest=\"downsample\",action=\"store_true\",default=False,\n help=\"When set, random sampling method will scale down the bigger sample. By default, MACS uses linear scaling. Warning: This option will make your result unstable and irreproducible since each time, random reads would be selected. Consider to use 'randsample' script instead. DEFAULT: False\")\n optparser.add_option(\"--shift-control\",dest=\"shiftcontrol\",action=\"store_true\",default=False,\n help=\"When set, control tags will be shifted just as ChIP tags according to their strand before the extension of d, slocal and llocal. By default, control tags are extended centered at their current positions regardless of strand. You may consider to turn this option on while comparing two ChIP datasets of different condition but the same factor. DEFAULT: False\")\n optparser.add_option(\"--half-ext\",dest=\"halfext\",action=\"store_true\",default=False,\n help=\"When set, MACS extends 1/2 d size for each fragment centered at its middle point. DEFAULT: False\")\n optparser.add_option(\"-B\",\"--bdg\",dest=\"store_bdg\",action=\"store_true\",\n help=\"Whether or not to save extended fragment pileup, local lambda and score tracks at every bp into a bedGraph file. DEFAULT: False\",\n default=False)\n optparser.add_option(\"--broad\",dest=\"broad\",action=\"store_true\",\n help=\"If set, MACS will try to call broad peaks by linking nearby highly enriched regions. The linking region is controlled by another cutoff through --linking-cutoff. The maximum linking region length is 4 times of d from MACS. DEFAULT: False\",default=False)\n optparser.add_option(\"--broad-cutoff\",dest=\"broadcutoff\",type=\"float\",default=0.1,\n help=\"Cutoff for broad region. This option is not available unless --broad is set. If -p is set, this is a pvalue cutoff, otherwise, it's a qvalue cutoff. DEFAULT: 0.1 \")\n optparser.add_option(\"--verbose\",dest=\"verbose\",type=\"int\",default=2,\n help=\"Set verbose level. 0: only show critical message, 1: show additional warning message, 2: show process information, 3: show debug messages. DEFAULT:2\")\n return optparser", "def fill_parser(self, parser):\n parser.add_argument(\n \"library\",\n nargs=\"?\",\n help=\"Library to fetch (e.g. charms.mycharm.v2.foo.); optional, default to all\",\n )", "def create_parser():\n parser = OptionParser()\n\n parser.add_option(\"-s\", \"--script\", dest=\"script\", default='pbs.sh', help=\"Output location\")\n parser.add_option(\"-p\", \"--period\", dest=\"period\", default=\"30\", help=\"qstat period\")\n\n parser.set_usage(\"\"\"%prog [options]\"\"\")\n return parser", "def add_parser_options(self):\n #pass -i for using an include list\n self.parser.add_option(\"-i\",\"--include\", action=\"store_false\", dest=\"exin\")\n #pass -e for using an exclude list, this is also the default\n self.parser.add_option(\"-e\",\"--exclude\", action=\"store_true\", dest=\"exin\", default=True)\n #set the directory to check\n self.parser.add_option(\"-d\", \"--dir\", dest=\"dir\", default=\"tempimport\")\n #pass a list of files to include, or extentions to exclude\n #example -l file1,file2,file3 do not put spaces between the items in the list\n #quote any file with a space in the name\n self.parser.add_option(\"-l\", \"--list\", dest=\"list\")", "def configure_parser(sub_parsers):\n\n parser = sub_parsers.add_parser(\n 'ants',\n description='Solve a traveling salesman problem using ant colony optimization',\n help='Ant colony optimization for the traveling salesman problem')\n\n parser.add_argument(\n '-r',\n '--rho',\n type=float,\n default=.5,\n help='Evaporation rate (default 0.5)')\n parser.add_argument(\n '-a',\n '--alpha',\n type=float,\n default=.5,\n help='Relative importance of the pheromone (default 0.5)')\n parser.add_argument(\n '-b',\n '--beta',\n type=float,\n default=.5,\n help='Relative importance of the heuristic information (default 0.5)')\n parser.add_argument(\n '-q',\n '--q',\n type=float,\n default=1.,\n help='Constant Q. Used to calculate the pheromone, laid down on an edge (default 1)')\n parser.add_argument(\n '-n',\n '--iteration-number',\n type=int,\n default=10,\n help='Number of iterations to execute (default 10)')\n parser.add_argument(\n '-o',\n '--two-opt',\n action='store_true',\n default=False,\n help='Enable to use 2-opt local search after each iteration (default off)')\n parser.add_argument(\n '-t',\n '--tsp-file',\n type=str,\n default=path.join(path.abspath(path.dirname(inspect.getfile(inspect.currentframe()))), 'resources/burma14.tsp'),\n help='Path of the tsp file that shall be loaded (default loads the built-in burma14.tsp)')\n\n parser.add_argument(\n 'ant_number',\n type=int,\n help='Number of ants used for solving')\n\n parser.set_defaults(func=_run_aco4tsp)", "def set_options(optparser):\n optparser.usage += ' [application ...]'\n\n optparser.add_option('-a', '--apache', action='store_const', const=generate_apache_rules, dest='generate', help='generate apache rules', default=generate_apache_rules)\n optparser.add_option('-l', '--lighttpd', action='store_const', const=generate_lighttpd_rules, dest='generate', help='generate lighttpd rules')\n optparser.add_option('-n', '--nginx', action='store_const', const=generate_nginx_rules, dest='generate', help='generate nginx rules')", "def standard_parser(description, epilog=None, per_file_options=True):\n parser = OptionParser(description=description, epilog=epilog,\n formatter=TitledHelpFormatter())\n parser.add_option(\"--version\", action=\"store_true\", dest=\"output_version\",\n default=False, help=\"output version number to output \"\n \"file, or stdout if no output file is given\")\n parser.add_option(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\",\n default=False, help=\"verbose outout\")\n parser.add_option(\"-s\", \"--silent\", action=\"store_true\", dest=\"silent\",\n default=False, help=\"silent running\")\n if per_file_options:\n parser.add_option(\"-m\", \"--metrics\", action=\"store_true\",\n dest=\"metrics\", default=False, help=\"output asset \"\n \"metrics\")\n parser.add_option(\"--log\", action=\"store\", dest=\"output_log\", default=None,\n help=\"write log to file\")\n\n group = OptionGroup(parser, \"Asset Generation Options\")\n group.add_option(\"-j\", \"--json_indent\", action=\"store\", dest=\"json_indent\",\n type=\"int\", default=0, metavar=\"SIZE\",\n help=\"json output pretty printing indent size, defaults \"\n \"to 0\")\n\n # TODO - Asset Generation Options currently disabled\n #\n #group.add_option(\"-6\", \"--base64-encoding\", action=\"store_true\", dest=\"b64_encoding\", default=False,\n # help=(\"encode long float and int attributes in base64, defaults to disabled %s\" %\n # \"- [ currently unsupported ]\"))\n #group.add_option(\"-c\", \"--force-collision\", action=\"store_true\", dest=\"force_collision\", default=False,\n # help=\"force collision generation - [ currently unsupported ]\")\n #group.add_option(\"-r\", \"--force-render\", action=\"store_true\", dest=\"force_render\", default=False,\n # help=\"force rendering generation - [ currently unsupported ]\")\n\n group.add_option(\"--keep-unused-images\", action=\"store_true\", dest=\"keep_unused_images\", default=False,\n help=\"keep images with no references to them\")\n\n group.add_option(\"-I\", \"--include-type\", action=\"append\", dest=\"include_types\", default=None, metavar=\"TYPE\",\n help=\"only include objects of class TYPE in export.\")\n group.add_option(\"-E\", \"--exclude-type\", action=\"append\", dest=\"exclude_types\", default=None, metavar=\"TYPE\",\n help=\"exclude objects of class TYPE from export. \"\n \"Classes currently supported for include and exclude: \"\n \"geometries, nodes, animations, images, effects, materials, lights, \"\n \"physicsmaterials, physicsmodels and physicsnodes. \"\n \"CAUTION using these options can create incomplete assets which require fixup at runtime. \")\n parser.add_option_group(group)\n\n group = OptionGroup(parser, \"Asset Location Options\")\n group.add_option(\"-u\", \"--url\", action=\"store\", dest=\"asset_url\", default=\"\", metavar=\"URL\",\n help=\"asset URL to prefix to all asset references\")\n group.add_option(\"-a\", \"--assets\", action=\"store\", dest=\"asset_root\", default=\".\", metavar=\"PATH\",\n help=\"PATH of the asset root\")\n group.add_option(\"-d\", \"--definitions\", action=\"append\", dest=\"definitions\", default=None, metavar=\"JSON_FILE\",\n help=\"definition JSON_FILE to include in build, \"\n \"this option can be used repeatedly for multiple files\")\n parser.add_option_group(group)\n\n if per_file_options:\n group = OptionGroup(parser, \"File Options\")\n group.add_option(\"-i\", \"--input\", action=\"store\", dest=\"input\", default=None, metavar=\"FILE\",\n help=\"source FILE to process\")\n group.add_option(\"-o\", \"--output\", action=\"store\", dest=\"output\", default=\"default.json\", metavar=\"FILE\",\n help=\"output FILE to write to\")\n parser.add_option_group(group)\n\n # TODO - Database Options are currently disabled\n #\n #group = OptionGroup(parser, \"Database Options\")\n #group.add_option(\"-A\", \"--authority\", action=\"store\", dest=\"authority\", default=None,\n # metavar=\"HOST:PORT\",\n # help=(\"Authority of the database in the form HOST:PORT. %s\" %s\n # \"If undefined, database export is disabled.\"))\n #group.add_option(\"-D\", \"--database\", action=\"store\", dest=\"database\", default=\"default\",\n # metavar=\"NAME\", help=\"NAME of the document database\")\n #group.add_option(\"-P\", \"--put-post\", action=\"store_true\", dest=\"put_post\", default=False,\n # help=\"put or post the asset to the authority database\")\n #group.add_option(\"-O\", \"--document\", action=\"store\", dest=\"document\", default=\"default.asset\",\n # metavar=\"NAME\", help=\"NAME of the document\")\n #parser.add_option_group(group)\n\n return parser", "def setup_parser():\n\n psr_desc=\"cfdi engine service interface\"\n psr_epi=\"select a config profile to specify defaults\"\n\n psr = argparse.ArgumentParser(\n description=psr_desc, epilog=psr_epi)\n\n psr.add_argument('-nmp', action='store_true', dest='nmp',\n help='unique process approach (useful in development)')\n\n psr.add_argument('-d', action='store_true', dest='debug',\n help='print debug information')\n\n psr.add_argument('-c', '--config', action='store',\n dest='config',\n help='load an specific config profile')\n\n psr.add_argument('-p', '--port', action='store',\n dest='port',\n help='launches service on specific port')\n\n return psr.parse_args()", "def make_option_parser():\n\n parser = OptionParser(usage = \"%prog -o ./simulation_results\",\n description = \"This script simulates microbiome \" +\n \"change over time using Ornstein-Uhlenbeck (OU) models. These are \" +\n \"similar to Brownian motion models, with the exception that they \" +\n \"include reversion to a mean. Output is a tab-delimited data table \" +\n \"and figures.\",\n version = __version__)\n\n required_options = OptionGroup(parser, \"Required options\")\n\n required_options.add_option('-o','--output', type=\"string\",\n help='the output folder for the simulation results')\n\n parser.add_option_group(required_options)\n\n optional_options = OptionGroup(parser, \"Optional options\")\n\n optional_options.add_option('--pert_file_path',\\\n default = os.path.abspath(resource_filename('karenina.data','set_xyz_lambda_zero.tsv')),\\\n type = \"string\",\\\n help = 'file path to a perturbation file specifying parameters for' +\n ' the simulation results [default: %default]')\n\n optional_options.add_option('--treatment_names',\\\n default=\"control,destabilizing_treatment\",type=\"string\",\\\n help=\"Comma seperated list of treatment named [default:%default]\")\n\n optional_options.add_option('-n','--n_individuals',\\\n default=\"35,35\",type=\"string\",\\\n help='Comma-separated number of individuals to simulate per treatment.'+\\\n 'Note: This value must be enclosed in quotes. Example: \"35,35\". [default: %default]')\n\n optional_options.add_option('-t', '--n_timepoints',default=10, type=\"int\",\\\n help='Number of timepoints to simulate. (One number, which is the ' +\n 'same for all treatments) [default: %default]')\n\n optional_options.add_option('-p','--perturbation_timepoint',\\\n default=5,type=\"int\",\\\n help='Timepoint at which to apply a perturbation. Must be less than ' +\n '--n_timepoints [default: %default]')\n\n optional_options.add_option('-d','--perturbation_duration',\\\n default=100,type=\"int\",\\\n help='Duration that the perturbation lasts. [default: %default]')\n\n optional_options.add_option('--interindividual_variation',\n default=0.01,type=\"float\",help='Starting variability between ' +\n 'individuals. [default: %default]')\n\n optional_options.add_option('--delta',default=0.25,type=\"float\",\n help='Starting delta parameter for Brownian motion and ' +\n 'Ornstein-Uhlenbeck processes. A higher number indicates more ' +\n 'variability over time. [default: %default]')\n\n optional_options.add_option('-l','--L',default=0.20,type=\"float\",\n help='Starting lambda parameter for Ornstein-Uhlenbeck processes. A ' +\n 'higher number indicates a greater tendancy to revert to the mean ' +\n 'value. [default: %default]')\n\n optional_options.add_option('--fixed_start_pos',default=None,type=\"string\",\n help='Starting x,y,z position for all points, as comma separated ' +\n 'floating point values, e.g. 0.0,0.1,0.2. If not supplied, starting ' +\n 'positions will be randomized based on the interindividual_variation ' +\n 'parameter [default: %default]')\n\n optional_options.add_option('-v','--verbose', action=\"store_true\", dest=\"verbose\", default=False,\n help='-v, allows for verbose output' +\n ' [default: %default]')\n\n parser.add_option_group(optional_options)\n\n return parser", "def fill_parser(self, parser):\n parser.add_argument(\n \"library\",\n nargs=\"?\",\n help=\"Library to publish (e.g. charms.mycharm.v2.foo.); optional, default to all\",\n )", "def prepare_optparser ():\n usage = \"usage: %prog -c mysample.cfg -s A01A -1 A01_1.fq -2 A02_2.fq\"\n description = \"Please set the sample name. e.g. L04A, L04C, L04T.\"\n optparser = OptionParser(version = \"0.0.1\", description = description, usage = usage, add_help_option = False)\n optparser.add_option(\"-h\", \"--help\", action = \"help\", help = \"Show this help message and exit.\")\n optparser.add_option(\"-c\", \"--config\", dest = \"config\", default = \"config.cfg\" ,type = \"string\",\n help = \"Set the config File.\")\n optparser.add_option(\"-s\", \"--samplename\", dest = \"samplename\" ,type = \"string\",\n help = \"Set the samplename.\")\n optparser.add_option(\"-1\", \"--fastq1\", dest = \"fastq1\", type = \"string\",\n help = \"input fastq file paired 1\")\n optparser.add_option(\"-2\", \"--fastq2\", dest = \"fastq2\", type = \"string\",\n help = \"input fastq file paired 2\")\n optparser.add_option(\"-d\", \"--dataprocess\", dest = \"dataprocess\", default = \"1111111111\",type = \"string\",\n help = \"Need point 6 digit number, eg. 111111: Conduct Genome Process, fastq_mapping, Add Read Group, Reorder Contig, Mark Duplicates, split_ntrim step one by one;100000 only conduct Genome Process; 000001:Only conduct split_ntrim step\")\n optparser.add_option(\"-i\", \"--in_bam\", dest = \"in_bam\" ,type = \"string\",\n help = \"You can set this to your bam file path.(If fastq1 is empty, required!)\")\n optparser.add_option(\"-o\", \"--out_dir\", dest = \"out_dir\" ,type = \"string\", default = \"vcf\",\n help = \"Set the vcf file out_dir.[vcf]\")\n return(optparser)", "def add_parse_options(cls, parser):\n # Decoder params\n parser.add_argument(\"-beam_size\", default=1, type=int, help=\"Beam size\")\n parser.add_argument(\"-lm_weight\", default=0.0, type=float, help=\"LM weight in decoding\")\n parser.add_argument(\"-lm_path\", default=\"/share/data/speech/shtoshni/research/asr_multi/\"\n \"code/lm/models/best_models/run_id_301/lm.ckpt-250000\", type=str,\n help=\"LM ckpt path\")\n parser.add_argument(\"-cov_penalty\", default=0.0, type=float,\n help=\"Coverage penalty\")", "def fill_parser(self, parser):\n parser.add_argument(\"filepath\", type=useful_filepath, help=\"The charm or bundle to upload\")\n parser.add_argument(\n \"--release\",\n action=\"append\",\n help=\"The channel(s) to release to (this option can be indicated multiple times)\",\n )\n parser.add_argument(\n \"--resource\",\n action=\"append\",\n type=ResourceOption(),\n default=[],\n help=(\n \"The resource(s) to attach to the release, in the <name>:<revision> format \"\n \"(this option can be indicated multiple times)\"\n ),\n )", "def internal_setup_options_parser(self, argparser):\n self.setup_options_parser(argparser)\n subparser = argparser.add_subparsers(dest='verb')\n for (name, verb) in self.verbs.items():\n vparser = subparser.add_parser(name, help=verb.help)\n verb.setup_options_parser(vparser)", "def MakeOpts():\n parser = ArgumentParser()\n\n parser.add_argument(\"-o\", \"--host\", dest=\"host\", default=\"hldbv02\",\n help=\"The hostname for the MySQL database\")\n parser.add_argument('-d', '--debug', action='store_true', default=False,\n help='debug mode, store results in dummy DB')\n \n xml_group = parser.add_mutually_exclusive_group(required=True)\n xml_group.add_argument(\"-x\", \"--xml_filename\", default=None,\n help=\"The filename for a single XML result file\")\n xml_group.add_argument(\"-a\", \"--xml_dir\", default=None,\n help=\"The directory from which to import the latest XML results file\")\n \n parser.add_argument(\"-p\", \"--plate\", default=None, type=int, required=True,\n help=\"The plate number (usually between 1-10) in the robot script\")\n parser.add_argument('exp_id_csv', nargs=1,\n help='the name of the CVS file where the exp_ids are')\n\n return parser", "def make_cli_parser(self):\n super(BplnArgParser, self).make_cli_parser()\n links_opt = self.cli_parser.add_option('--selected-links',\n help=(\"A CSV-formatted file containing pairs of \"\n \"terms to test. Tests will be done to decide \"\n \"if the annotation term from the first column \"\n \"\\\"is linked to\\\" the annotation term from the \"\n \"second column. [NOTE: Selecting this option \"\n \"restricts the program to only test the matches \"\n \"designated in the file.] [NOTE: This option \"\n \"conflicts with '--selected-terms' and \"\n \"'--selected-terms-with-all'.]\"\n )\n )\n anns_opt = self.cli_parser.add_option('--selected-terms',\n help=(\"A file containing annotation terms to test \"\n \"linkage to each other. The file should contain one \"\n \"term per line. Selecting this option restricts the \"\n \"program to only testing the given terms against \"\n \"each other. [NOTE: This option conflicts with \"\n \"'--selected-links' and \"\n \"'--selected-terms-with-all'.]\"\n )\n )\n anns_all_opt = self.cli_parser.add_option(\n '--selected-terms-with-all',\n help=(\"A file containing annotation terms to test \"\n \"linkage to all other terms (one-against-all and \"\n \"all-against-one). The file should contain one \"\n \"term per line. Selecting this option restricts \"\n \"the program to only testing the given terms \"\n \"against all other terms. [NOTE: \"\n \"This option conflicts with '--selected-links' and \"\n \"'--selected-terms'.]\"\n )\n )\n self.cli_parser.register_conflict(\n (links_opt, anns_opt, anns_all_opt))", "def build_argparser(self):\n firstletters = ''\n for name, (categ, rest) in self.data.items():\n firstletters += name[0]\n\n self.argparser = argparse.ArgumentParser(\n usage='m3 x {} [arguments]'.format(self.name))\n\n for name, (categ, rest) in self.data.items():\n argargs = {}\n if rest.get('help'):\n argargs['help'] = rest['help']\n if rest.get('type') == 'flag':\n argargs['action'] = 'store_true'\n argargs['required'] = False\n elif 'default' not in rest:\n argargs['required'] = True\n if firstletters.count(name[0]) == 1:\n self.argparser.add_argument('-' + name[0],\n '--' + name, **argargs) # noqa: T484\n else:\n self.argparser.add_argument('--' + name, **argargs) # noqa:T484", "def new_option_parser():\n from amuse.units.optparse import OptionParser\n result = OptionParser()\n result.add_option(\"-N\", dest=\"N\", type=\"int\",\n default=10000,\n help=\"no. of particles [%default]\")\n result.add_option(\"--theta\", dest=\"theta\", type=\"float\",\n default=0.3,\n help=\"Opening Angle [%default]\")\n result.add_option(\"--M_min\", unit=units.MSun, dest=\"M_min\", type=\"float\",\n default=0.1 | units.MSun,\n help=\"Min. star mass [%default]\")\n result.add_option(\"--M_max\", unit=units.MSun, dest=\"M_max\", type=\"float\",\n default=100 | units.MSun,\n help=\"Max. star mass [%default]\")\n result.add_option(\"-r\", unit=units.parsec, dest=\"r\", type=\"float\",\n default=3 | units.parsec,\n help=\"Size of star cluster [%default]\")\n result.add_option(\"--t_end\", unit=units.Myr, dest=\"t_end\", type=\"float\",\n default=10 | units.Myr,\n help=\"End time of simulation [%default]\")\n result.add_option(\"--tg_time_step_frac\", dest=\"tg_time_step_frac\", type=\"float\",\n default=0.1,\n help=\"Fraction of gravity timestep for SE timestep[%default]\")\n result.add_option(\"--tse_time_step_frac\", dest=\"tse_time_step_frac\", type=\"float\",\n default=0.5,\n help=\"Fraction of t_dyn for gravity timestep [%default]\")\n result.add_option(\"--bridge_time_step_frac\", dest=\"bridge_time_step_frac\", type=\"float\",\n default=1 / 20.,\n help=\"Fraction of bridge timestep [%default]\")\n result.add_option(\"--imf\", dest=\"imf\", type=\"string\",\n default='salpeter',\n help=\"Choose IMF: salpeter, miller_scalo, otherexp [%default]\")\n result.add_option(\"--scheme\", dest=\"code\", type=\"int\",\n default=0,\n help=\"Gravity code: (0) hybrid, (1) all N-body, (2) all tree-code [%default]\")\n result.add_option(\"--m_cut\", unit=units.MSun, dest=\"m_cut\", type=\"float\",\n default=10 | units.MSun,\n help=\"Mass splitting parameter [%default]\")\n result.add_option(\"--workers\", dest=\"workers\", type=\"int\",\n default=1,\n help=\"Number of Worker threads to use for each code [%default]\")\n\n return result", "def _setup_parser():\n parser = argparse.ArgumentParser(add_help=True)\n parser.add_argument('--eval_model', type=str, default=None)\n parser.add_argument('--stack', type=int, default=1)\n parser.add_argument('--flare', action='store_true')\n parser.add_argument('--mixreg', action='store_true')\n\n env_group = parser.add_argument_group(\"Env Args\")\n env_group.add_argument('--env_name', type=str, default=ENV_NAME)\n env_group.add_argument('--num_envs', type=int, default=NUM_ENVS)\n env_group.add_argument('--num_levels', type=int, default=NUM_LEVELS)\n env_group.add_argument('--start_level', type=int, default=START_LEVEL)\n\n agent_group = parser.add_argument_group(\"Agent Args\")\n PPOAgent.add_to_argparse(agent_group)\n\n model_group = parser.add_argument_group(\"Model Args\")\n ImpalaPPO.add_to_argparse(model_group)\n\n return parser", "def _configure_args(self, parser: ArgumentParser) -> ArgumentParser:\n pass", "def init_parser(self):\n usage = \"%s [options] [subcommand]\" % (self.command)\n description = \"An SBTools test plug-in.\"\n\n parser = SBToolOptionParser(self, self.sbtools, usage, description=description)\n return parser", "def initialize_parser():\n ftypes = [\n \"png\",\n \"jpg\",\n \"jpeg\",\n \"pdf\",\n \"ps\",\n \"eps\",\n \"rgba\",\n \"svg\",\n \"tiff\",\n \"tif\",\n \"pgf\",\n \"svgz\",\n \"raw\",\n ]\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-s\",\n \"--savefig\",\n action=\"store\",\n default=False,\n choices=ftypes,\n help=\"Save figure to a file\",\n )\n return parser", "def build_parser(description):\n parser = argparse.ArgumentParser(description = description)\n #requiredNamed = parser.add_argument_group('required named arguments')\n parser.add_argument('-p', '--profile', default='default',\n choices=get_profile_names(),\n help='botocore profile name for AWS creds and other vars.')\n parser.add_argument('-r', '--region', default=None,\n help='AWS region to use')\n #parser.add_argument('--search-regions', action='store_true', default=False,\n # help='search regions for VPC with given vpc_name')\n #parser.add_argument('--quiet', action='store_true', default=False,\n # help='prevent status messages to STDOUT')\n\n # create a subparser for our plugins to attach to.\n subparser = parser.add_subparsers(\n title = 'subcommands',\n description = 'valid subcommands',\n help = '--help for additional subcommand help'\n )\n\n plugins = load_entry_points('botoform.plugins')\n load_parsers_from_plugins(subparser, plugins)\n\n return parser", "def setup_parser():\r\n parser = argparse.ArgumentParser(description='Freeseer Recording Utility',\r\n formatter_class=argparse.RawTextHelpFormatter)\r\n parser.add_argument(\"-v\", \"--version\", action='version',\r\n version=textwrap.dedent('''\\\r\n Freeseer {version} ({platform})\r\n Python {pymajor}.{pyminor}.{pymicro}\r\n PyGst {pygst_version}\r\n PyQt {pyqt_version}\r\n Qt {qt_version}\r\n Yapsy {yapsy_version}\r\n '''.format(version=__version__,\r\n platform=sys.platform,\r\n pymajor=sys.version_info.major,\r\n pyminor=sys.version_info.minor,\r\n pymicro=sys.version_info.micro,\r\n pygst_version=pygst._pygst_version,\r\n pyqt_version=QtCore.PYQT_VERSION_STR,\r\n qt_version=QtCore.QT_VERSION_STR,\r\n yapsy_version=yapsy.__version__)))\r\n\r\n # Configure Subparsers\r\n subparsers = parser.add_subparsers(dest='app', help='Command List')\r\n setup_parser_record(subparsers)\r\n setup_parser_config(subparsers)\r\n setup_parser_talk(subparsers)\r\n setup_parser_report(subparsers)\r\n setup_parser_upload(subparsers)\r\n return parser", "def init_parser():\n parser = argparse.ArgumentParser(\n description='Backup application code and data.')\n parser.add_argument('-a', '--app-id', required=True,\n help='the application ID to run the backup for')\n parser.add_argument('--source-code', action='store_true',\n default=False, help='backup the source code too. Disabled by default.')\n parser.add_argument('-d', '--debug', required=False, action=\"store_true\",\n default=False, help='display debug messages')\n parser.add_argument('--skip', required=False, nargs=\"+\",\n help='skip the following kinds, separated by spaces')\n\n return parser", "def _GenParser():\n usage = ('%prog [options]\\n'\n 'Post a build request to the try server for the given revision.\\n')\n parser = optparse.OptionParser(usage=usage)\n parser.add_option('-H', '--host',\n help='Host address of the try server.')\n parser.add_option('-P', '--port', type='int',\n help='HTTP port of the try server.')\n parser.add_option('-u', '--user', default=getpass.getuser(),\n dest='user',\n help='Owner user name [default: %default]')\n parser.add_option('-e', '--email',\n default=os.environ.get('TRYBOT_RESULTS_EMAIL_ADDRESS',\n os.environ.get('EMAIL_ADDRESS')),\n help=('Email address where to send the results. Use either '\n 'the TRYBOT_RESULTS_EMAIL_ADDRESS environment '\n 'variable or EMAIL_ADDRESS to set the email address '\n 'the try bots report results to [default: %default]'))\n parser.add_option('-n', '--name',\n default='try_job_http',\n help='Descriptive name of the try job')\n parser.add_option('-b', '--bot',\n help=('IMPORTANT: specify ONE builder per run is supported.'\n 'Run script for each builders separately.'))\n parser.add_option('-r', '--revision',\n help=('Revision to use for the try job; default: the '\n 'revision will be determined by the try server; see '\n 'its waterfall for more info'))\n parser.add_option('--root',\n help=('Root to use for the patch; base subdirectory for '\n 'patch created in a subdirectory'))\n parser.add_option('--patch',\n help='Patch information.')\n return parser", "def fill_parser(self, parser):\n parser.add_argument(\n \"charm_name\",\n metavar=\"charm-name\",\n help=\"The charm name to associate the resource\",\n )\n parser.add_argument(\"resource_name\", metavar=\"resource-name\", help=\"The resource name\")\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\n \"--filepath\",\n type=SingleOptionEnsurer(useful_filepath),\n help=\"The file path of the resource content to upload\",\n )\n group.add_argument(\n \"--image\",\n type=SingleOptionEnsurer(str),\n help=\"The digest of the OCI image\",\n )", "def __init__(self, name, title=None, help=None,\n dynamic_group_owner='',\n driver_option=''):\n self.name = name\n self.title = \"%s options\" % name if title is None else title\n self.help = help\n self.dynamic_group_owner = dynamic_group_owner\n self.driver_option = driver_option\n\n self._opts = {} # dict of dicts of (opt:, override:, default:)\n self._argparse_group = None\n self._driver_opts = {} # populated by the config generator", "def Generator_Option_Parser(argv, extra_opt, ignore_infile = True):\n opts = [(\"states\" , int, None , True , True),\n (\"symbols\" , int, None , True , True),\n (\"tape\" , int, 10000, False, True),\n (\"steps\" , int, 10000, False, True),\n (\"infile\" , str, None , False, True),\n (\"outfile\" , str, None , False, True),\n (\"log_number\", int, None , False, True)] + extra_opt\n ignore_opts = []\n if ignore_infile:\n ignore_opts.append(\"infile\")\n opts, args = Option_Parser(argv, opts, help_flag = True, no_mult = True,\n ignore_opts = ignore_opts)\n\n # The furthest that the machine can travel in n steps is n+1 away from the\n # origin. It could travel in either direction so the tape need not be longer\n # than 2 * max_steps + 3\n if opts[\"tape\"] > 2 * opts[\"steps\"] + 3:\n opts[\"tape\"] = 2 * opts[\"steps\"] + 3\n\n # Default output filename is based off of parameters.\n if not opts[\"outfile\"]:\n opts[\"outfile\"] = \"%dx%d.out\" % (opts[\"states\"], opts[\"symbols\"])\n opts[\"outfilename\"] = opts[\"outfile\"]\n opts[\"outfile\"] = open_outfile(opts[\"outfilename\"])\n if not opts[\"outfile\"]:\n sys.exit(1)\n\n opts[\"infilename\"] = opts[\"infile\"]\n if not ignore_infile:\n opts[\"infile\"] = open_infile(opts[\"infilename\"])\n\n return opts, args", "def setup_parser(self):\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument('words', metavar='W', nargs='+', help=POSITIONAL_HELP)\n parser.add_argument('-a','--any', dest=\"search_funct\", action=\"store_const\", \n const='any', default='all', help=SEARCH_HELP)\n parser.add_argument('-o','--only-id', action='store_true', help=ID_HELP)\n parser.add_argument('-u', '--update', action='store_true', help=UPDATE_HELP)\n return parser", "def parse_options(args):\n parser = argparse.ArgumentParser(description='Contrail/EC procedure wrapper script ')\n\n parser.add_argument('-v', '--verbose', dest='debug', action='store_true',\n help='Enable verbose mode')\n\n parser.add_argument('-p', '--path', dest='path_name',\n help=\"Full path to the manifest file\")\n\n subparsers = parser.add_subparsers(title='Fire off EC jobs based on the path to manifest file',\n description='Select one command',\n dest='command')\n\n parser_parse = subparsers.add_parser('parse',\n description='parse and execute build for manifest')\n opt = parser.parse_args(args)\n return opt", "def MakeOpts():\n opt_parser = OptionParser()\n #opt_parser.add_option(\"-d\", \"--sqlite_db_filename\",\n # dest=\"sqlite_db_filename\",\n # default=\"../res/tecan.sqlite\",\n # help=\"The filename of the Sqlite database\")\n opt_parser.add_option(\"-p\", \"--plate_num\",\n type='int',\n dest=\"plate_num\",\n default=None,\n help=\"The number for the plate that is to be exported\")\n opt_parser.add_option(\"-e\", \"--exp_id\",\n dest=\"exp_id\",\n default=None,\n help=\"The expID for the data\")\n opt_parser.add_option(\"-r\", \"--reading_label\",\n dest=\"reading_label\",\n default=None,\n help=\"The Reading Label for the data\")\n opt_parser.add_option(\"-c\", \"--csv\",\n action=\"store_true\",\n dest='csv',\n default=False,\n help=\"Format the output as CSV\")\n opt_parser.add_option(\"-o\", \"--output_fname\",\n dest='output_fname',\n default=None,\n help=\"Filename for the output\")\n return opt_parser", "def optparse_init(self):\n\n from optparse import OptionParser, OptionGroup\n usage = \"Usage: %prog [options] input_file(s) [output]\"\n p = OptionParser(usage, version=\"%prog \"+ __version__)\n p.add_option(\"-p\", \"--profile\", dest='profile', type='choice', choices=profile_list,\n help=\"Tile cutting profile (%s) - default 'mercator' (Google Maps compatible)\" % \",\".join(profile_list))\n p.add_option(\"-r\", \"--resampling\", dest=\"resampling\", type='choice', choices=resampling_list,\n help=\"Resampling method (%s) - default 'average'\" % \",\".join(resampling_list))\n p.add_option(\"-f\", \"--tile-format\", dest=\"tile_format\", type='choice', choices=tile_formats_list,\n help=\"Image format of generated tiles (%s) - default 'png'\" % \",\".join(tile_formats_list))\n p.add_option('-s', '--s_srs', dest=\"s_srs\", metavar=\"SRS\",\n help=\"The spatial reference system used for the source input data\")\n p.add_option('-z', '--zoom', dest=\"zoom\",\n help=\"Zoom levels to render (format:'2-5' or '10').\")\n p.add_option('-e', '--resume', dest=\"resume\", action=\"store_true\",\n help=\"Resume mode. Generate only missing files.\")\n p.add_option('-a', '--srcnodata', dest=\"srcnodata\", metavar=\"NODATA\",\n help=\"NODATA transparency value to assign to the input data\")\n p.add_option('-i', '--init-dest', dest=\"init_dest\",\n help=\"Colour used to initialize output, only for 'jpeg' tile format\")\n p.add_option('', '--tilesize', dest=\"tilesize\",\n help=\"Size of the tiles - default 256\")\n p.add_option('', '--osm', dest=\"tms_osm\", action=\"store_true\",\n help=\"tms or osm numbering - default tms\")\n p.add_option('', '--mbtiles', dest=\"mbtiles\", action=\"store_true\",\n help=\"mbtiles - tiles creation to mbtiles file\")\n p.add_option('', '--mbtiles_to_disk', dest=\"mbtiles_todisk\", action=\"store_true\",\n help=\"mbtiles tiles- write mbtiles tiles to a directory\")\n p.add_option('', '--mbtiles_from_disk', dest=\"mbtiles_fromdisk\", action=\"store_true\",\n help=\"mbtiles tiles- create mbtiles file from tile directory\")\n p.add_option('', \"--te\", dest=\"te_bounds\",\n help=\"bounds to extract (coordinates in the output SRS): xmin ymin xmax ymax OR xmin,ymin,xmax,ymax\")\n p.add_option(\"-v\", \"--verbose\", dest=\"verbose\",action=\"store_true\",\n help=\"Print status messages to stdout\")\n # KML options\n g = OptionGroup(p, \"KML (Google Earth) options\", \"Options for generated Google Earth SuperOverlay metadata\")\n g.add_option(\"-k\", \"--force-kml\", dest='kml', action=\"store_true\",\n help=\"Generate KML for Google Earth - default for 'geodetic' profile and 'raster' in EPSG:4326. For a dataset with different projection use with caution!\")\n g.add_option(\"-n\", \"--no-kml\", dest='kml', action=\"store_false\",\n help=\"Avoid automatic generation of KML files for EPSG:4326\")\n g.add_option(\"-u\", \"--url\", dest='url',\n help=\"URL address where the generated tiles are going to be published\")\n g.add_option('-d', '--kml-depth', dest=\"kml_depth\",\n help=\"How many levels to store before linking, default 1\")\n g.add_option('--kmz', dest=\"kmz\", action=\"store_true\",\n help=\"Compress KML files into KMZ format, default for 'gearth' profile\")\n g.add_option('--no-kmz', dest=\"kmz\", action=\"store_false\",\n help=\"Do not compress KML files into KMZ format, default for 'mercator', 'geodetic' and 'raster' profiles\")\n p.add_option_group(g)\n\n # HTML options\n g = OptionGroup(p, \"Web viewer options\", \"Options for generated HTML viewers a la Google Maps\")\n g.add_option(\"-w\", \"--webviewer\", dest='webviewer', type='choice', choices=webviewer_list,\n help=\"Web viewer to generate (%s) - default 'all'\" % \",\".join(webviewer_list))\n g.add_option(\"-t\", \"--title\", dest='title',\n help=\"Title of the map\")\n g.add_option(\"-c\", \"--copyright\", dest='copyright',\n help=\"Copyright for the map\")\n g.add_option(\"-g\", \"--googlekey\", dest='googlekey',\n help=\"Google Maps API key from http://code.google.com/apis/maps/signup.html\")\n g.add_option(\"-y\", \"--yahookey\", dest='yahookey',\n help=\"Yahoo Application ID from http://developer.yahoo.com/wsregapp/\")\n p.add_option_group(g)\n\n # TODO: MapFile + TileIndexes per zoom level for efficient MapServer WMS\n #g = OptionGroup(p, \"WMS MapServer metadata\", \"Options for generated mapfile and tileindexes for MapServer\")\n #g.add_option(\"-i\", \"--tileindex\", dest='wms', action=\"store_true\"\n # help=\"Generate tileindex and mapfile for MapServer (WMS)\")\n # p.add_option_group(g)\n\n p.set_defaults(verbose=False, profile=\"mercator\", kml=False, url=None,\n copyright='', resampling='average', resume=False, tilesize=None,mbtiles=False,tms_osm=False,\n mbtiles_todisk=False,mbtiles_fromdisk=False,te_bounds='',\n googlekey='INSERT_YOUR_KEY_HERE', yahookey='INSERT_YOUR_YAHOO_APP_ID_HERE')\n\n self.parser = p", "def buildOptions(self):\n ZenScriptBase.buildOptions(self)\n self.parser.add_option('--userid',\n dest=\"userid\",default=\"\",\n help=\"name of user who is acking the event\")\n \n self.parser.add_option('--evid',\n dest=\"evids\", action=\"append\",\n help=\"event id that is acked\")\n\n self.parser.add_option('--state', type='int',\n dest=\"state\", default=1,\n help=\"event id that is acked [default: ack]\")", "def add_parser_options(cls, parser):\n for arg in cls.configurables():\n getattr(cls, arg).add_argument(parser)", "def parse_options() -> Namespace:\n\n opt_parser = OptionParser(\n \"liftoff\",\n [\n \"script\",\n \"config_path\",\n \"procs_no\",\n \"gpus\",\n \"per_gpu\",\n \"no_detach\",\n \"verbose\",\n \"copy_to_clipboard\",\n \"time_limit\", # This should be removed in favour of start_by\n \"start_by\",\n \"end_by\",\n \"optimize\",\n \"args\",\n \"filters\",\n \"results_path\",\n \"name\",\n \"max_runs\",\n \"shuffle\",\n ],\n )\n return opt_parser.parse_args()", "def _create_option_parser():\n parser = OptionParser(usage=\"\"\"%prog <INPUT_FILE_OR_DIR>\n Fix end-of-line characters (use -h or --help for help)\"\"\")\n parser.add_option(\n '-e', '--eol',\n dest='eol',\n metavar='eol-format',\n help='specify the desired end-of-line format (win/CRLF, unix/LF, mac/CR)'\n )\n parser.add_option(\n '--auto-convert',\n dest='auto_convert',\n default=False,\n action='store_true',\n help='specify whether to auto-convert end-of-line (disabled by default)'\n )\n return parser", "def build_arg_parser(self):\n super(App, self).build_arg_parser()\n\n exgroup = self.parser.add_mutually_exclusive_group()\n exgroup.add_argument('--format', action='store', choices=[\"zip\", \"tar\"], help='write files in the given format.')\n exgroup.add_argument('--filename', action='store', help='Override default filename and/or extension')", "def options_parse():\n parser = argparse.ArgumentParser()\n\n # Options for model parameters setup (only change if model training was changed)\n parser.add_argument('--num_filters', type=int, default=64,\n help='Filter dimensions for DenseNet (all layers same). Default=64')\n parser.add_argument('--num_classes_ax_cor', type=int, default=79,\n help='Number of classes to predict in axial and coronal net, including background. Default=79')\n parser.add_argument('--num_classes_sag', type=int, default=51,\n help='Number of classes to predict in sagittal net, including background. Default=51')\n parser.add_argument('--num_channels', type=int, default=7,\n help='Number of input channels. Default=7 (thick slices)')\n parser.add_argument('--kernel_height', type=int, default=5, help='Height of Kernel (Default 5)')\n parser.add_argument('--kernel_width', type=int, default=5, help='Width of Kernel (Default 5)')\n parser.add_argument('--stride', type=int, default=1, help=\"Stride during convolution (Default 1)\")\n parser.add_argument('--stride_pool', type=int, default=2, help=\"Stride during pooling (Default 2)\")\n parser.add_argument('--pool', type=int, default=2, help='Size of pooling filter (Default 2)')\n\n sel_option = parser.parse_args()\n\n return sel_option", "def _GetOptionsParser():\n\n parser = optparse.OptionParser(__doc__)\n\n parser.add_option('--filePath',\n dest='filePath',\n action='store',\n help='js or css file path')\n\n return parser", "def option_parser():\n usage = '''\n $ ./crawler -d5 <url>\n Here in this case it goes till depth of 5 and url is target URL to\n start crawling.\n '''\n version = '0.0.1'\n\n parser = optparse.OptionParser(usage=usage, version=version)\n\n parser.add_option(\"-l\", \"--links\", action=\"store_true\",\n default=False, dest=\"links\", help=\"links for target url\")\n\n parser.add_option(\"-d\", \"--depth\", action=\"store\", type=\"int\",\n default=30, dest=\"depth\", help=\"Maximum depth traverse\")\n opts, args = parser.parse_args()\n\n if len(args) < 1:\n parser.print_help()\n raise SystemExit(1)\n\n return opts, args", "def _make_parser(self, **kwargs):\n\n kwargs.setdefault('help', self.help)\n kwargs.setdefault('formatter_class',argparse.RawDescriptionHelpFormatter)\n kwargs.setdefault('description', self.description)\n kwargs.setdefault('name', self.name)\n names = (kwargs.get('name') or self.name).split('.')\n \n def _get_subparser(a):\n if a._subparsers:\n for action in a._subparsers._actions:\n if isinstance(action, argparse._SubParsersAction):\n return action\n raise RuntimeError('could not find adequate subparser')\n return a.add_subparsers(dest='command',\n title='commands',\n metavar='COMMAND')\n def _get_parser(node, idx, names):\n name = names[idx]\n if name in node.choices:\n return node.choices[name]\n args = {\n 'name' : name,\n 'help' : 'a group of sub-commands',\n }\n return node.add_parser(**args)\n \n parser = ACMD_PARSER\n node = _get_subparser(parser)\n\n for i,n in enumerate(names[:-1]):\n node = _get_subparser(parser)\n parser = _get_parser(node, i, names)\n \n node = _get_subparser(parser)\n kwargs['name'] = names[-1]\n parser = node.add_parser(**kwargs)\n return parser", "def build_option_parser(self, description, version):\r\n parser = argparse.ArgumentParser(\r\n description=description,\r\n add_help=False, )\r\n parser.add_argument(\r\n '--version',\r\n action='version',\r\n version=__version__, )\r\n parser.add_argument(\r\n '-v', '--verbose', '--debug',\r\n action='count',\r\n dest='verbose_level',\r\n default=self.DEFAULT_VERBOSE_LEVEL,\r\n help=_('Increase verbosity of output and show tracebacks on'\r\n ' errors. Can be repeated.'))\r\n parser.add_argument(\r\n '-q', '--quiet',\r\n action='store_const',\r\n dest='verbose_level',\r\n const=0,\r\n help=_('Suppress output except warnings and errors'))\r\n parser.add_argument(\r\n '-h', '--help',\r\n action=HelpAction,\r\n nargs=0,\r\n default=self, # tricky\r\n help=_(\"Show this help message and exit\"))\r\n # Global arguments\r\n parser.add_argument(\r\n '--os-auth-strategy', metavar='<auth-strategy>',\r\n default=env('OS_AUTH_STRATEGY', default='keystone'),\r\n help=_('Authentication strategy (Env: OS_AUTH_STRATEGY'\r\n ', default keystone). For now, any other value will'\r\n ' disable the authentication'))\r\n parser.add_argument(\r\n '--os_auth_strategy',\r\n help=argparse.SUPPRESS)\r\n\r\n parser.add_argument(\r\n '--os-auth-url', metavar='<auth-url>',\r\n default=env('OS_AUTH_URL'),\r\n help=_('Authentication URL (Env: OS_AUTH_URL)'))\r\n parser.add_argument(\r\n '--os_auth_url',\r\n help=argparse.SUPPRESS)\r\n\r\n parser.add_argument(\r\n '--os-tenant-name', metavar='<auth-tenant-name>',\r\n default=env('OS_TENANT_NAME'),\r\n help=_('Authentication tenant name (Env: OS_TENANT_NAME)'))\r\n parser.add_argument(\r\n '--os_tenant_name',\r\n help=argparse.SUPPRESS)\r\n\r\n parser.add_argument(\r\n '--os-tenant-id', metavar='<auth-tenant-id>',\r\n default=env('OS_TENANT_ID'),\r\n help=_('Authentication tenant ID (Env: OS_TENANT_ID)'))\r\n\r\n parser.add_argument(\r\n '--os-username', metavar='<auth-username>',\r\n default=utils.env('OS_USERNAME'),\r\n help=_('Authentication username (Env: OS_USERNAME)'))\r\n parser.add_argument(\r\n '--os_username',\r\n help=argparse.SUPPRESS)\r\n\r\n parser.add_argument(\r\n '--os-user-id', metavar='<auth-user-id>',\r\n default=env('OS_USER_ID'),\r\n help=_('Authentication user ID (Env: OS_USER_ID)'))\r\n\r\n parser.add_argument(\r\n '--os-password', metavar='<auth-password>',\r\n default=utils.env('OS_PASSWORD'),\r\n help=_('Authentication password (Env: OS_PASSWORD)'))\r\n parser.add_argument(\r\n '--os_password',\r\n help=argparse.SUPPRESS)\r\n\r\n parser.add_argument(\r\n '--os-region-name', metavar='<auth-region-name>',\r\n default=env('OS_REGION_NAME'),\r\n help=_('Authentication region name (Env: OS_REGION_NAME)'))\r\n parser.add_argument(\r\n '--os_region_name',\r\n help=argparse.SUPPRESS)\r\n\r\n parser.add_argument(\r\n '--os-token', metavar='<token>',\r\n default=env('OS_TOKEN'),\r\n help=_('Defaults to env[OS_TOKEN]'))\r\n parser.add_argument(\r\n '--os_token',\r\n help=argparse.SUPPRESS)\r\n\r\n parser.add_argument(\r\n '--service-type', metavar='<service-type>',\r\n default=env('OS_NETWORK_SERVICE_TYPE', default='network'),\r\n help=_('Defaults to env[OS_NETWORK_SERVICE_TYPE] or network.'))\r\n\r\n parser.add_argument(\r\n '--endpoint-type', metavar='<endpoint-type>',\r\n default=env('OS_ENDPOINT_TYPE', default='publicURL'),\r\n help=_('Defaults to env[OS_ENDPOINT_TYPE] or publicURL.'))\r\n\r\n parser.add_argument(\r\n '--os-url', metavar='<url>',\r\n default=env('OS_URL'),\r\n help=_('Defaults to env[OS_URL]'))\r\n parser.add_argument(\r\n '--os_url',\r\n help=argparse.SUPPRESS)\r\n\r\n parser.add_argument(\r\n '--os-cacert',\r\n metavar='<ca-certificate>',\r\n default=env('OS_CACERT', default=None),\r\n help=_(\"Specify a CA bundle file to use in \"\r\n \"verifying a TLS (https) server certificate. \"\r\n \"Defaults to env[OS_CACERT]\"))\r\n\r\n parser.add_argument(\r\n '--insecure',\r\n action='store_true',\r\n default=env('NEUTRONCLIENT_INSECURE', default=False),\r\n help=_(\"Explicitly allow neutronclient to perform \\\"insecure\\\" \"\r\n \"SSL (https) requests. The server's certificate will \"\r\n \"not be verified against any certificate authorities. \"\r\n \"This option should be used with caution.\"))\r\n\r\n return parser", "def make_cli_parser(self):\n super(SaArgParser, self).make_cli_parser()\n self.cli_parser.add_option('--steps', type='int',\n default=mcmc.defaults.NUM_STEPS,\n help=(\"the number of steps to Anneal. \"\n\t\t\t\t\"[default: %default]\")\n )\n self.cli_parser.add_option('--temperature', type='int',\n default=mcmc.defaults.TEMPERATURE,\n help=(\"the starting temperature to anneal from. \"\n \"[default: %default]\")\n )\n self.cli_parser.add_option('--end_temperature', type='int',\n default=mcmc.defaults.END_TEMPERATURE,\n help=(\"the temperature to end annealing.\"\n \"[default: %default]\")\n )\n self.cli_parser.add_option('--activity-threshold',\n type='float',\n default=mcmc.defaults.ACTIVITY_THRESHOLD,\n help=(\"set the (differential) expression threshold at \"\n \"which a gene is considered active [default: \"\n \"%default=-log10(0.05)]\")\n )\n self.cli_parser.add_option('--free-parameters',\n action='store_true',\n help=(\"parameters will be adjusted randomly, rather \"\n \"than incrementally\")\n )\n self.cli_parser.add_option('--disable-swaps', action='store_true',\n help=(\"disables swapping links as an option for \"\n \"transitions\")\n )\n self.cli_parser.add_option('--transition-ratio', type='float',\n default=0.9,\n help=(\"The target ratio of proposed link transitions \"\n \"to proposed parameter transitions [default: \"\n \"%default]\"\n )\n )\n self.cli_parser.add_option('--parameters-outfile',\n default=mcmc.defaults.PARAMETERS_OUTFILE,\n help=(\"the file to which the parameters results should \"\n \"be written [default: %default]\")\n )\n self.cli_parser.add_option('--transitions-outfile',\n default=mcmc.defaults.TRANSITIONS_OUTTFILE,\n help=(\"the file to which the transitions data should \"\n \"be written [default: %default]\")\n )\n self.cli_parser.add_option('--detailed-transitions',\n action='store_true',\n help=(\"Transitions file includes full information about \"\n \"each step's state.\")\n )\n self.cli_parser.add_option('--bzip2', action='store_true',\n help=\"compress transitions file using bzip2\"\n )", "def _init_parser():\n\t\n\t_parser = argparse.ArgumentParser()\n\t_parser.add_argument(\"--pull\", help=\"pull scripts from UR3\", action=\"store_true\")\n\t_parser.add_argument(\"--create\", help=\"create data base from script files\", action=\"store_true\")\n\t_parser.add_argument(\"--clear\", help=\"clear all data base\", action=\"store_true\")\n\treturn _parser", "def _parse_options(self):\n parser = argparse.ArgumentParser(prog=self._program,\n formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=30, width=132))\n parser.add_argument(\"--debug\", action='store_true', default=self._debug, help=\"The debug flag. (Default: {0})\".format(self._debug))\n parser.add_argument(\"--drives\", default=None, help=\"The drives to display. (Default: {0})\".format(self._drives))\n parser.add_argument(\"--exclude\", default=None, help=\"The drives to exclude. (Default: {0})\".format(self._exclude))\n parser.add_argument(\"--force_spt\", action='store_true', help=\"Force using spt (debug). (Default: {0})\".format(self._force_spt))\n parser.add_argument(\"--json\", action='store_true', default=self._json_format, help=\"Enable JSON format. (Default: {0})\".format(self._json_format))\n parser.add_argument(\"--long\", action='store_true', default=self._long_format, help=\"Enable long format. (Default: {0})\".format(self._long_format))\n parser.add_argument(\"--noencs\", action='store_false', default=self._include_enclosures, help=\"Exclude enclosures. (Default: {0})\".format(not self._include_enclosures))\n parser.add_argument(\"--noheader\", action='store_false', default=self._report_header, help=\"Exclude headers. (Default: {0})\".format(not self._report_header))\n parser.add_argument(\"--power_on_hours\", action='store_true', default=self._power_on_hours, help=\"Include power on hours. (Default: {0})\".format(not self._power_on_hours))\n # Filters for spt:\n parser.add_argument(\"--firmware_version\", default=None, help=\"The firmware version. (Default: {0})\".format(self.firmware_version))\n parser.add_argument(\"--product_name\", default=None, help=\"The product name. (Default: {0})\".format(self.product_name))\n parser.add_argument(\"--vendor_name\", default=None, help=\"The vendor name. (Default: {0})\".format(self.vendor_name))\n parser.add_argument(\"--serial_number\", default=None, help=\"The serial number. (Default: {0})\".format(self.serial_number))\n parser.add_argument(\"--sas_address\", default=None, help=\"The SAS address. (Default: {0})\".format(self.target_port))\n parser.add_argument(\"--target_port\", default=None, help=\"The target port. (Default: {0})\".format(self.target_port))\n parser.add_argument(\"--use_lsscsi\", action='store_true', help=\"Find devices via lsscsi. (Default: {0})\".format(self._use_lsscsi))\n parser.add_argument(\"--spt_path\", default=None, help=\"The spt tool path. (Default: {0})\".format(self.tool))\n\n args = parser.parse_args()\n\n self._debug = args.debug\n if self._debug:\n self.log_level = logging.DEBUG\n self._json_format = args.json\n self._long_format = args.long\n if args.drives:\n self._drives = args.drives.split(',')\n if args.exclude:\n self._exclude = args.exclude.split(',')\n if not args.noencs:\n self._include_enclosures = False\n if not args.noheader:\n self._report_header = False\n if args.power_on_hours:\n self._power_on_hours = True\n if args.firmware_version:\n self.firmware_version = args.firmware_version\n if args.product_name:\n self.product_name = args.product_name\n if args.vendor_name:\n self.vendor_name = args.vendor_name\n if args.serial_number:\n self.serial_number = args.serial_number\n if args.sas_address:\n self.target_port = args.sas_address\n if args.target_port:\n self.target_port = args.target_port\n if args.force_spt:\n self._force_spt = args.force_spt\n if args.use_lsscsi:\n self._use_lsscsi = args.use_lsscsi\n if args.spt_path:\n self.tool = args.spt_path", "def build_parser() -> ArgumentParser:\n parser = ArgumentParser(prog=\"bartender\")\n parser.add_argument(\"--version\", action=\"version\", version=version(\"bartender\"))\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n\n serve_sp = subparsers.add_parser(\"serve\", help=\"Serve web service\")\n serve_sp.set_defaults(func=serve)\n\n perform_sp = subparsers.add_parser(\"perform\", help=\"Async Redis queue job worker\")\n perform_sp.add_argument(\n \"--config\",\n default=Path(\"config.yaml\"),\n type=Path,\n help=\"Configuration with schedulers that need arq workers\",\n )\n perform_sp.add_argument(\n \"--destination\",\n nargs=\"+\",\n help=\"\"\"Name of destinations to run workers for.\n Each destination must have `scheduler.type:arq`.\n By default runs workers for all destinations with `scheduler.type:arq`.\"\"\",\n dest=\"destination_names\",\n )\n perform_sp.set_defaults(func=perform)\n\n add_generate_token_subcommand(subparsers)\n\n return parser", "def parser(cls, *, with_showtb=False):\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help='produce more output')\n parser.add_argument('-q', '--quiet', action='count', default=0,\n help='produce less output')\n parser.add_argument('--dry-run', dest='dryrun', action='store_true',\n default=False, help='do not actually make changes')\n\n if with_showtb:\n parser.add_argument('--traceback', action='store_true',\n default=False, help='do not hide tracebacks')\n\n return parser", "def addOption(self, parser):\n pass", "def __add_arguments__(cls, parser):", "def parseOptions():\n \n parser=op.OptionParser(usage=\"Usage %prog SERVER\"\n ,version=\"%prog 1.0\",description=\"Sets up wordpress.\"\n +\"SERVER is the base url for the server, this should be your domain name \"\n +\"which points to your machine's IP, or your machine's IP if you don't have \"\n +\"a domain name. This script should probably be run with sudo as it will \"\n +\"likely have to edit and read files which aren't editable or perhaps \"\n +\"not even readable by standard users.\")\n \n parser.add_option(\"--dry-run\",dest=\"dryRun\",action=\"store_true\",default=False\n ,help=\"If set will not actually do anything, only print out what it would \"\n +\"have done [not default]\")\n return parser.parse_args()", "def add_options(cls, parser):\n\n group = parser.add_argument_group(\"Transform/Docify\")\n group.add(\n \"--doc_length\",\n \"-doc_length\",\n type=int,\n default=200,\n help=\"Number of tokens per doc.\",\n )\n group.add(\n \"--max_context\",\n \"-max_context\",\n type=int,\n default=1,\n help=\"Max context segments.\",\n )", "def configure_parser(parser):\n qibuild.parsers.cmake_build_parser(parser)\n qibuild.parsers.project_parser(parser)\n group = parser.add_argument_group(\"make options\")\n group.add_argument(\"--rebuild\", \"-r\", action=\"store_true\", default=False)\n group.add_argument(\"--coverity\", action=\"store_true\", default=False,\n help=\"Build using cov-build. Ensure you have \"\n \"cov-analysis installed on your machine.\")\n group.add_argument(\"--num-workers\", \"-J\", dest=\"num_workers\", type=int,\n help=\"Number of projects to be built in parallel\")", "def __init__(self):\n self.__parser=OptionParser(option_class=MyOption)\n self.__source=''\n self.__dest=''\n self.__all=False\n self.__inv=False\n self.__tree=''\n self.__authpath=''\n self.__verbose=False", "def setup_parser():\n parser = HelpfulParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('infile', type=str, help=\"input data file\")\n\n parser.add_argument('-u', '--usage', action=\"help\",\n help=\"show this help message and exit\")\n parser.add_argument('-h', '--host', metavar='HOST', type=str,\n default='localhost', help='Server hostname')\n parser.add_argument('-p', '--port', metavar='PORT', type=int,\n default='3000', help='Server port')\n parser.add_argument('-U', '--user', metavar='USER', type=str,\n default=None, help='Username')\n parser.add_argument('-P', '--passwd', metavar='PW', type=str,\n default=None, help='Password')\n parser.add_argument('-n', '--nspace', metavar='NS', type=str,\n default='test', help='Namespace')\n parser.add_argument('-s', '--set', metavar='SET', type=str,\n default='osm', help='Set name')\n return parser", "def parse_options():\n parent_parser = argparse.ArgumentParser(add_help=False)\n # We create top level parser\n parser = argparse.ArgumentParser(\n parents=[parent_parser],\n epilog=help_parser.PARSER_EPILOG\n % {'cli': 'node', 'option': '--update'}\n + help_parser.COMMAND_EPILOG,\n formatter_class=RawTextHelpFormatter)\n\n parser.add_argument('-u', '--user', dest='username')\n parser.add_argument('-p', '--password', dest='password')\n parser.add_argument('-v', '--version', action='version', version=version)\n\n parser.add_argument(\n '-i', '--id', dest='experiment_id', type=int,\n help='experiment id submission')\n\n list_group = parser.add_mutually_exclusive_group()\n\n list_group.add_argument(\n '-e', '--exclude', action='append', \n dest='exclude_nodes_list',\n help='exclude nodes list') \n\n list_group.add_argument(\n '-l', '--list', action='append',\n dest='nodes_list',\n help='nodes list')\n\n command_group = parser.add_mutually_exclusive_group(required=True)\n\n command_group.add_argument(\n '-sta', '--start', action='store_true',\n help='start command')\n\n command_group.add_argument(\n '-sto', '--stop', action='store_true',\n help='stop command')\n\n command_group.add_argument(\n '-r', '--reset', action='store_true',\n help='reset command')\n\n command_group.add_argument(\n '-up','--update', dest='path_file',\n help='flash firmware command with path file')\n\n return parser", "def build_parser ():\n\n parser = argparse.ArgumentParser (description = __doc__)\n\n parser.add_argument (\n '-v', '--verbose', dest='verbose', action='count',\n help='increase output verbosity', default=0\n )\n parser.add_argument (\n '-l', '--live', dest='get_live_data', action='store_true',\n help='get live data from OSM database',\n )\n parser.add_argument (\n '-e', '--edit', action='store_true',\n help='edit the OSM database',\n )\n parser.add_argument (\n '-u', '--user', dest='my_edits', action='store_true',\n help='only report about my edits',\n )\n parser.add_argument (\n '--min-length', dest=\"min_length\", type=float, default=1000.0,\n help='way must be longer than this to get a ref (in m) (default=1000)',\n )\n parser.add_argument (\n '--batch-size', dest=\"batch_size\", type=int, default=10,\n help='apply OSM edits in changesets of this size (default=10)',\n )\n return parser", "def build_option_parser(parser):\n parser.add_argument(\n '--os-network-api-version',\n metavar='<network-api-version>',\n default=utils.env(\n 'OS_NETWORK_API_VERSION',\n default=DEFAULT_NETWORK_API_VERSION),\n help='Network API version, default=' +\n DEFAULT_NETWORK_API_VERSION +\n ' (Env: OS_NETWORK_API_VERSION)')\n return parser", "def build_option_parser(self, description, version):\n parser = argparse.ArgumentParser(\n description=description,\n add_help=False, )\n parser.add_argument(\n '--version',\n action='version',\n version=__version__, )\n parser.add_argument(\n '-v', '--verbose', '--debug',\n action='count',\n dest='verbose_level',\n default=self.DEFAULT_VERBOSE_LEVEL,\n help=_('Increase verbosity of output and show tracebacks on'\n ' errors. You can repeat this option.'))\n parser.add_argument(\n '-q', '--quiet',\n action='store_const',\n dest='verbose_level',\n const=0,\n help=_('Suppress output except warnings and errors.'))\n parser.add_argument(\n '-h', '--help',\n action=HelpAction,\n nargs=0,\n default=self, # tricky\n help=_(\"Show this help message and exit.\"))\n parser.add_argument(\n '-r', '--retries',\n metavar=\"NUM\",\n type=check_non_negative_int,\n default=0,\n help=_(\"How many times the request to the Neutron server should \"\n \"be retried if it fails.\"))\n # FIXME(bklei): this method should come from keystoneauth1\n self._append_global_identity_args(parser)\n\n return parser", "def ParseOptions(cls, options, config_object):", "def options(self, parser, env):\n pass", "def configure_parser(parser):\n qisys.parsers.default_parser(parser)\n parser.add_argument(\"manifest_xml\")\n parser.add_argument(\"version\", nargs=\"?\")", "def setup_parser():\n PARSER = argparse.ArgumentParser(description='Running GSI')\n\n PARSER.add_argument('analysis_datetime', type=str, help=\"analysis_datetime\")\n PARSER.add_argument('gsi_dir', type=str, help=\"gsi_dir\")\n PARSER.add_argument('gsi_processor', type=int, help=\"gsi_processor\")\n PARSER.add_argument('cycle_interval', type=int, help=\"cycle_interval\")\n PARSER.add_argument('model_vertical_level', type=int, help=\"model_vertical_level\")\n PARSER.add_argument('background_data', type=str, help=\"background_data\")\n PARSER.add_argument('crtm_root', type=str, help=\"crtm_root\")\n PARSER.add_argument('gsi_root', type=str, help=\"gsi_root\")\n \n PARSER.add_argument('--f_prepbufr', type=str, dest=\"f_prepbufr\", default='')\n PARSER.add_argument('--f_1bamua', type=str, dest=\"f_1bamua\", default='')\n PARSER.add_argument('--f_1bhrs4', type=str, dest=\"f_1bhrs4\", default='')\n PARSER.add_argument('--f_1bmhs', type=str, dest=\"f_1bmhs\", default='')\n PARSER.add_argument('--f_gpsro', type=str, dest=\"f_gpsro\", default='')\n PARSER.add_argument('--f_radwnd', type=str, dest=\"f_radwnd\", default='')\n PARSER.add_argument('--f_refInGSI', type=str, dest=\"f_refInGSI\", default='')\n PARSER.add_argument('--model_core', type=str, dest=\"model_core\", default='ARW')\n PARSER.add_argument('--cv_option', type=str, dest=\"cv_option\", default='NAM')\n PARSER.add_argument('--computing_platform', type=str, dest=\"computing_platform\", default='LINUX_PBS')\n PARSER.add_argument('--new_run', type=str, dest=\"new_run\", default='True')\n PARSER.add_argument('--outer_loop', type=int, dest=\"outer_loop\", default=2)\n PARSER.add_argument('--inner_loop', type=int, dest=\"inner_loop\", default=50)\n PARSER.add_argument('--if_clean', type=str, dest=\"if_clean\", default='no')\n\n '''\n python Main_Script.py 2017082112 /mnt/WRF/gsi_test/practice_11 4 1 50 /mnt/WRF/wrf_1FMTHf/wrfinput_d01 /opt/miniconda2/envs/wrf/crtm-2.2.3/CRTM_2.2.3 /opt/miniconda2/envs/wrf/comGSIv3.5_EnKFv1.1 --f_prepbufr /opt/miniconda2/envs/wrf/bufr_stuff/bin/test.bufr\n return PARSER.parse_args(['2017082112', '/home/szhang/gsi_directory/practice_10', \n 4, 1, 50,\n '/home/szhang/gsi_directory/practice_10/background_data', \n '/home/szhang/gsi_directory/practice_10/crtm_root', \n '/home/szhang/gsi_directory/practice_10/gsi_root', \n '--f_prepbufr', '/home/szhang/gsi_directory/practice_10/f_prepbufr'])\n '''\n return PARSER.parse_args()" ]
[ "0.7635837", "0.7635837", "0.7205048", "0.71621025", "0.7039156", "0.7033386", "0.702205", "0.6990235", "0.69513005", "0.6902962", "0.68967104", "0.6892518", "0.6840839", "0.6837334", "0.68262494", "0.6812211", "0.6810176", "0.6762378", "0.67567515", "0.67565316", "0.6741107", "0.6739755", "0.6729433", "0.6721733", "0.67190635", "0.67001945", "0.66763306", "0.6663974", "0.6663634", "0.66482484", "0.6639496", "0.6639401", "0.66337043", "0.6633503", "0.6628216", "0.6619177", "0.661351", "0.6582466", "0.65807503", "0.65799636", "0.6554797", "0.6551018", "0.6549183", "0.65309083", "0.65254587", "0.6522199", "0.65199625", "0.65131867", "0.650897", "0.65031284", "0.64944345", "0.6490752", "0.64882934", "0.6479451", "0.64746124", "0.64704186", "0.6469359", "0.64612216", "0.6430085", "0.6421517", "0.6404052", "0.6402925", "0.6401517", "0.6399882", "0.63923657", "0.6387728", "0.6382988", "0.63774866", "0.6373769", "0.6373709", "0.6367486", "0.63657194", "0.6365447", "0.63636094", "0.63542444", "0.6354237", "0.63523793", "0.63508385", "0.634936", "0.63393", "0.63291955", "0.63147986", "0.63133705", "0.63086385", "0.6305851", "0.6303311", "0.6297126", "0.6294804", "0.62936395", "0.62935317", "0.629318", "0.6287834", "0.62851155", "0.6282474", "0.62822133", "0.6281885", "0.6274804", "0.6272261", "0.6271641", "0.62713933" ]
0.6769619
17
Run the makerdf script.
def main(): optparser = initOpts() (options, args) = optparser.parse_args() output = StringIO.StringIO() assembleRDF(file(options.licenses_xml), output, options.verbose) if options.output_rdf: file(options.output_rdf, 'w').write(output.getvalue()) else: print output.getvalue()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(): \n # Parse Arguments\n args = parse_arguments()\n\n # Print outdir\n print(\"Writing output to \" + args.outdir)\n\n # Print start statement\n print('Starting script for ' + args.file + ' at ' + str(datetime.datetime.now()), flush=True)\n\n # Put all the files in a function that will further handle the files as dataframe\n create_df(args.file, args.outdir)\n\n # Script is finished\n print('All done for ' + args.file + ' at ' + str(datetime.datetime.now()), flush=True)", "def go(self):\n fof_file = files.get_fof_file(self['run'])\n plot_file = fof_file.replace('.fits','.png')\n\n with open(self.args.fit_config) as fobj:\n fit_conf = yaml.load(fobj)\n\n text=_fof_script_template % {\n 'fof_file':fof_file,\n 'plot_file':plot_file,\n 'fit_config':self.args.fit_config,\n 'meds_files':self.meds_files,\n 'extra_psf_fwhm': fit_conf['fofs']['extra_psf_fwhm_arcsec']\n }\n\n fof_script=files.get_fof_script_path(self['run'])\n print('writing fof script:',fof_script)\n with open(fof_script,'w') as fobj:\n fobj.write(text)\n os.system('chmod 755 %s' % fof_script)", "def main():\n s = content.DataFiles()\n \n date_list = generate.get_list_dates(2016, 2016, 500)\n prod_list = list(s.get_collist_by_name(os.path.join(content.data_fldr,'food','garden_produce.csv'), 'name')[0])\n \n tbl_cust = generate.TableGenerator(8, ['STRING','PEOPLE', 'PEOPLE', 'PLACE'], ['Customer ID', 'First Name', 'Surname', 'Country'])\n tbl_cust.save_table('customers.csv')\n cust_list = list(s.get_collist_by_name('customers.csv', 'Customer ID')[0])\n \n tbl_sales = generate.TableGenerator(25, [date_list, cust_list, prod_list, 'CURRENCY'], ['Date of sale', 'Customer ID', 'Product', 'Amount'])\n tbl_sales.save_table('sales.csv')", "def main():\n parser = ArgumentParser(\n description=\"Write an XDMF file for post-processing results in HDF5.\")\n parser.add_argument(dest=\"file_name\", metavar=\"<filename>\",\n help=\"path to an HDF5 file for which XDMF metadata should be written\")\n parser.add_argument(\"-t\", \"--type\", dest=\"type\", metavar=\"<type>\",\n choices=['single', 'multiple'], default=\"multiple\", help=\"type of HDF5 file\")\n parser.add_argument(\"-a\", \"--analysis\", dest=\"analysis\", metavar=\"<analysis>\",\n choices=['static', 'temporal'], default=\"temporal\", help=\"type of analysis\")\n parser.add_argument(\"-m\", \"--mesh-path\", dest=\"mesh_path\", metavar=\"<mesh-path>\",\n default=\"/ModelData\", help=\"internal HDF5 file path to the mesh\")\n parser.add_argument(\"-r\", \"--results-path\", dest=\"results_path\", metavar=\"<results-path>\",\n default=\"/ResultsData\", help=\"internal HDF5 file path to the results\")\n parser.add_argument(\"--require-results\",\n dest = \"require_results\",\n action = \"store_const\",\n default = False,\n const = True,\n help = \"Ignore outputs that have mesh data but lack results.\")\n print('\\nCreate XDMF:')\n args = parser.parse_args()\n if args.type == \"multiple\" and args.analysis == \"temporal\":\n WriteMultifileTemporalAnalysisToXdmf(args.file_name,\n args.mesh_path,\n args.results_path)\n elif args.type == \"single\" and args.analysis == \"temporal\":\n WriteSinglefileTemporalAnalysisToXdmf(args.file_name,\n args.mesh_path,\n args.results_path,\n require_results = args.require_results)\n else:\n raise RuntimeError(\"Unsupported command line options.\")", "def main():\n argparser = argparse.ArgumentParser(description=\"Convert plot to table\")\n\n argparser.add_argument(\"pdf\", action=\"store\", help=\"pdf file\",\n default=None, nargs=\"*\")\n\n args = argparser.parse_args()\n\n if len(args.pdf) == 0:\n open_gui()\n else:\n process_pdf(args.pdf[0])\n\n generate_data()", "def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()", "def main():\n parser = optparse.OptionParser()\n parser.add_option(\"-c\", \"--clear\", action=\"store_true\", dest=\"clear\",\n help=\"clear out all generated reports\")\n parser.add_option(\"-n\", \"--num\", action=\"store\", type=\"int\", dest=\"num\",\n help=\"number of data points to generate\")\n parser.add_option(\"-m\", \"--min\", action=\"store\", type=\"float\", dest=\"min\",\n help=\"minimum of polynomial range\")\n parser.add_option(\"-f\", \"--fun\", action=\"store\", type=\"string\", dest=\"fun\",\n help=(\"Python expression (function of x)\"))\n (options, _) = parser.parse_args()\n if options.clear:\n clear_data()\n else:\n report_id = generate_id()\n if report_id is None:\n print \"Too many tests exist already\"\n else:\n gen = DataGen(options.min, options.fun, options.num)\n gen.generate_data()\n gen.write_ref(report_id)\n gen.write_rand(report_id)", "def main():\n mip = parametros()\n mir = Reporte(CURRENT_PATH, mip.debug, mip.overwrite)\n pdfs = mir.obtener()\n if pdfs:\n print(\"Obteniendo nuevos pdf:\")\n for pdf in pdfs:\n print(f\"* {pdf}\")\n\n for file in glob.glob(f\"{CURRENT_PATH}/resources/pdf/*.pdf\"):\n data = mir.parser(file)\n mir.escribir(data)", "def run_report_generation(**kwargs):\n out = run_python_script_helper(\n os.path.dirname(__file__), \"report_generation_example.py\", **kwargs\n )\n return out", "def main():\n args = setup_args()\n header_info = extract_header_info_from_probes(args.probe)\n\n for gene in header_info.keys():\n # check there is a folder for gene, else create it\n gene_out_dir = os.path.join(args.output_path, gene)\n if not os.path.exists(gene_out_dir):\n os.mkdir(gene_out_dir)\n\n gene_msa_fname = os.path.join(gene_out_dir, '{}_msa.fa'.format(gene))\n gene_ref = os.path.join(os.path.abspath(args.gene_refs), gene + '.fa')\n generate_msa_for_gene(gene, header_info[gene], gene_ref, gene_msa_fname)", "def main(argv):\n data = load_data(\"../car_sales.json\")\n summary = process_data(data)\n print(summary)\n summary_with_brakes = \"\"\n summary_with_lines = \"\"\n for item in summary:\n summary_with_brakes += item + '<br/>'\n summary_with_lines += item + '\\n'\n print(summary_with_brakes)\n # TODO: turn this into a PDF report\n table_data = cars_dict_to_table(data)\n reports.generate(\"/tmp/cars.pdf\", \"Cars\", summary_with_brakes, table_data)\n # TODO: send the PDF report as an email attachment\n recipient = \"{}@example.com\".format(os.environ.get('USER'))\n message = emails.generate('[email protected]', recipient, 'Sales summary for last month', summary_with_lines, \"/tmp/cars.pdf\")\n emails.send(message)", "def script_generator(self):\n analyze_tool = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/analyze_logs.py\"\n ex_options = self.global_setting.get('analyze_options', str())\n py = self.global_setting.get('python', sys.executable)\n if os.access(py, os.X_OK):\n content = \"set -e \\n\" \n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s %s plot_curve *.log.json \"%(py, analyze_tool)\n content += \"--keys loss loss_cls loss_pts_init \"\n content += \"loss_pts_refine \"\n content += \"--out losses.pdf %s &> analyze.log \\n\"%(ex_options)\n\n content += \"touch analyze.done \\n\"\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def script_generator(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('evaluate_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n \n content += \"%s %s %s --work_dir %s --validate %s &> train.log \\n\"%(py, \n train_py,\n self.setting['config_file'],\n self.run_dir,\n ex_options)\n content += \"touch evaluate.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def main():\n \n ## read parameters from command line\n parser = argparse.ArgumentParser() \n parser.add_argument(\"-d\", \"--density\", help=\"Density\")\n parser.add_argument(\"-k\", \"--kappa\", help=\"Bending rigidity\")\n parser.add_argument(\"-t\", \"--time\", help=\"Timestep\") \n parser.add_argument(\"-s\", \"--save\", help=\"Save options\", action=\"store_true\") \n args = parser.parse_args()\n\n ## load data \n database = \"/local/duman/SIMULATIONS/long_filaments\"\n datafolder = database + \"/density_\" + args.density + \"/kappa_\" + args.kappa\n infile = datafolder + \"/VORTEX/data.hdf5\"\n lx, ly, time, xedges, yedges, rho_hist, vx_hist, vy_hist, vorticity \\\n = load_data(infile, int(args.time))\n \n ## plot data\n savebase = \"~/RolfData/many_filaments_5\" \n if args.save:\n sfolder = savebase + \"/plots/VORTEX\" \n os.system('mkdir -p ' + sfolder)\n else:\n sfolder = \"~/Desktop\"\n plot_data(sfolder, lx, ly, time, xedges, yedges, rho_hist, vx_hist, vy_hist, vorticity, args.save)", "def do_run(template_file, wkdir=None):\r\n\r\n # Imports\r\n from opan.utils import make_timestamp\r\n from opan.const import atomSym\r\n\r\n # If wkdir specified, try changing there first\r\n if not wkdir == None:\r\n old_wkdir = os.getcwd()\r\n os.chdir(wkdir)\r\n ## end if\r\n\r\n # Pull in the template\r\n with open(template_file) as f:\r\n template_str = f.read()\r\n ## end with\r\n\r\n # Create working folder, enter\r\n dir_name = time.strftime(dir_fmt)\r\n os.mkdir(dir_name)\r\n os.chdir(dir_name)\r\n\r\n # Set up and create the log, and log wkdir\r\n setup_logger()\r\n logger = logging.getLogger(log_names.loggername)\r\n logger.info(\"Jensen calc series started: \" + time.strftime(\"%c\"))\r\n logger.info(\"Working in directory: \" + os.getcwd())\r\n\r\n # Proofread the template\r\n proof_template(template_str)\r\n\r\n # Log the template file contents\r\n logger.info(\"Template file '\" + template_file + \"' contents:\\n\\n\" + \\\r\n template_str)\r\n\r\n # Log the metals and nonmetals to be processed, including those\r\n # nonmetals for which the monocations will be calculated.\r\n logger.info(\"Metals: \" + \", \".join([atomSym[a].capitalize() \\\r\n for a in metals]))\r\n logger.info(\"Non-metals: \" + \", \".join([atomSym[a].capitalize() \\\r\n for a in nonmetals]))\r\n logger.info(\"Cations calculated for non-metals: \" + \\\r\n \", \".join([atomSym[a].capitalize() for a in cation_nms]))\r\n\r\n # Log the geometry scale-down factor, if used\r\n if fixed_dia_sep:\r\n logger.info(\"Using fixed initial diatomic separation of \" + \\\r\n str(init_dia_sep) + \" Angstroms.\")\r\n else:\r\n logger.info(\"Using geometry scale-down factor: \" + str(geom_scale))\r\n ## end if\r\n\r\n # Store the starting time\r\n start_time = time.time()\r\n\r\n # Create the data repository\r\n repo = h5.File(repofname, 'a')\r\n\r\n # Log notice if skipping atoms\r\n if skip_atoms:\r\n logger.warning(\"SKIPPING ATOM COMPUTATIONS\")\r\n else:\r\n # Loop atoms (atomic calculations)\r\n for at in metals.union(nonmetals):\r\n run_mono(at, template_str, repo)\r\n repo.flush()\r\n ## next at\r\n ## end if\r\n\r\n # Loop atom pairs (diatomics) for run execution\r\n for m in metals:\r\n for nm in nonmetals:\r\n # Run the diatomic optimizations\r\n run_dia(m, nm, 0, template_str, repo)\r\n\r\n # Ensure repository is updated\r\n repo.flush()\r\n\r\n # Run the diatomic monocation optimizations for hydrides, oxides\r\n if nm in cation_nms:\r\n run_dia(m, nm, 1, template_str, repo)\r\n ## end if\r\n\r\n # Ensure repository is updated\r\n repo.flush()\r\n\r\n # Clear any residual temp files from failed comps\r\n clear_tmp(atomSym[m].capitalize() + atomSym[nm].capitalize())\r\n\r\n ## next nm\r\n ## next m\r\n\r\n # Close the repository\r\n repo.close()\r\n\r\n # Exit the working directory; if wkdir not specified then just go to\r\n # parent directory; otherwise restore the old wd.\r\n if wkdir == None:\r\n os.chdir('..')\r\n else:\r\n os.chdir(old_wkdir)\r\n ## end if\r\n\r\n # Log end of execution\r\n logger.info(\"Calc series ended: \" + time.strftime(\"%c\"))\r\n logger.info(\"Total elapsed time: \" + \\\r\n make_timestamp(time.time() - start_time))", "def run_script(input_dir, output_dir):\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 1. load dataset \"\"\"\n print(\"loading data ......\")\n print(\"+++++++Read the surface shape data+++++++\")\n shape_file_name = input_dir + \"aligned_shapes.mat\"\n mat = loadmat(shape_file_name)\n y_design = mat['aligned_shape']\n n, l, m = y_design.shape\n print(\"The dimension of shape matrix is \" + str(y_design.shape))\n print(\"+++++++Read the sphere coordinate data+++++++\")\n template_file_name = input_dir + \"template.mat\"\n mat = loadmat(template_file_name)\n coord_mat = mat['template']\n # d = coord_mat.shape[1]\n print(\"+++++++Read the design matrix+++++++\")\n design_data_file_name = input_dir + \"design_data.txt\"\n design_data = np.loadtxt(design_data_file_name)\n # read the covariate type\n var_type_file_name = input_dir + \"var_type.txt\"\n var_type = np.loadtxt(var_type_file_name)\n print(\"+++++++Construct the design matrix: normalization+++++++\")\n x_design = read_x(design_data, var_type)\n p = x_design.shape[1]\n print(\"The dimension of design matrix is \" + str(x_design.shape))\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 2. Statistical analysis: including (1) smoothing and (2) hypothesis testing\"\"\"\n gpvals, lpvals_fdr, clu_pvals, efit_beta, efity_design, efit_eta = mfsda.run_stats(y_design, coord_mat, design_data, var_type)\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step3. Save all the results\"\"\"\n gpvals_file_name = output_dir + \"global_pvalue.txt\"\n np.savetxt(gpvals_file_name, gpvals)\n lpvals_fdr_file_name = output_dir + \"local_pvalue_fdr.txt\"\n np.savetxt(lpvals_fdr_file_name, lpvals_fdr)\n clu_pvals_file_name = output_dir + \"cluster_pvalue.txt\"\n np.savetxt(clu_pvals_file_name, clu_pvals)", "def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()", "def main():\n \n # Build arguments and grab arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file1\", help=\"Input 1st input filename\")\n parser.add_argument(\"file2\", help=\"Input 2nd input filename\")\n args = parser.parse_args()\n\n\n #do the distance calculation\n distances, mean, stdv = find_distances(args.file1, args.file2)\n np.savetxt(str(args.file1)+\"_\"+str(args.file2)+\"_comparison\"+\".txt\", np.c_[mean, stdv], fmt=\"%s\", delimiter=\",\", header=\"mean, stdv\")\n plot = plot_distances(distances,args.file1, args.file2)\n plt.savefig((str(args.file1)+\"_\"+str(args.file2)+\"distance_histrogram.pdf\"))", "def runStudy(catName,energyStr,truePdfName,dataFileNames,sigMasses):\n\n dataTree = root.TChain()\n for i in dataFileNames:\n dataTree.Add(i+\"/outtree\"+catName)\n dataTree.SetCacheSize(10000000);\n dataTree.AddBranchToCache(\"*\");\n\n truePdfFunc = None\n if truePdfName == \"Bernstein\" or truePdfName == \"Chebychev\" or truePdfName == \"Polynomial\" or truePdfName == \"SumExp\" or truePdfName == \"SumPow\" or truePdfName == \"Laurent\" or truePdfName == \"ExpTimesBernstein\" or truePdfName == \"ExpTimesChebychev\" or truePdfName == \"ExpTimesPolynomial\":\n truePdfFunc = getattr(fitOrderChooser,\"makePDFBak\"+truePdfName)\n else:\n truePdfFunc = getattr(makeCards,\"makePDFBak\"+truePdfName)\n\n dimuonMass = root.RooRealVar(\"dimuonMass\",\"m [GeV/c^{2}]\",110.,160.)\n dimuonMass.setBins(50)\n dimuonMass.setRange(\"exprange\",120,160)\n dimuonMass.setRange(\"whole\",110,160)\n dimuonMass.setRange(\"low\",110,120) # Silly ranges for old fit functionality\n dimuonMass.setRange(\"high\",130,160)\n dimuonMass.setRange(\"signal\",120,130)\n dimuonMass.setRange(\"signalfit\",110,140)\n dimuonMass.setRange(\"annaRegion\",123.5,127.5)\n dimuonMassArgSet = root.RooArgSet(dimuonMass)\n wTrue = root.RooWorkspace(\"wTrue\")\n wTrueImport = getattr(wTrue,\"import\")\n\n canvas = root.TCanvas(\"canvas\"+catName+energyStr+truePdfName)\n tlatex = root.TLatex()\n tlatex.SetNDC()\n tlatex.SetTextFont(root.gStyle.GetLabelFont())\n tlatex.SetTextSize(0.04)\n\n # Hack to Make makePDFBakOld work\n minMassZ = 88.\n maxMassZ = 94.\n dimuonMassZ = root.RooRealVar(\"dimuonMass\",\"dimuonMass\",minMassZ,maxMassZ)\n\n ### Load data\n \n realData = root.RooDataSet(\"realData\"+catName+energyStr,\n \"realData\"+catName+energyStr,\n dataTree,root.RooArgSet(dimuonMass)\n )\n realDataHist = realData.binnedClone(\"realDataHist\"+catName+energyStr)\n nData = realData.sumEntries()\n realDataZ = root.RooDataSet(\"realDataZ\"+catName+energyStr,\n \"realDataZ\"+catName+energyStr,\n dataTree,root.RooArgSet(dimuonMassZ)\n )\n\n ### Make Bak Pdfs\n\n trashParamList, trashBakNormTup, trashDebug, trueOrder = truePdfFunc(truePdfName+catName+energyStr,realData,dimuonMass,110,160,wTrueImport,dimuonMassZ,realDataZ)\n truePdf = wTrue.pdf(\"bak\")\n truePdf.SetName(truePdfName)\n truePdf.SetTitle(\"True PDF \")\n\n nDataVar = root.RooFit.RooConst(nData)\n nBakVar = root.RooRealVar(\"nBak\",\"N_{B}\",nData/2.,nData*2)\n truePdfE = root.RooExtendPdf(truePdfName+\"E\",\"True PDF Extended\",truePdf,nBakVar)\n\n # Make sure Voigt params are set to True vals and constant\n if truePdfName == \"Old\":\n for xTrue in rooArgSet2List(truePdf.getParameters(realData)):\n if not (\"voit\" in xTrue.GetName()):\n continue\n for xToy in rooArgSet2List(trueToyPdf.getParameters(realData)):\n trueMatch = re.match(r\".*(_voit.*)\",xTrue.GetName()) \n toyMatch = re.match(r\".*(_voit.*)\",xToy.GetName()) \n assert(trueMatch)\n if not toyMatch:\n continue\n trueBaseName = trueMatch.group(1)\n toyBaseName = toyMatch.group(1)\n if not ( trueBaseName == toyBaseName ):\n continue\n xToy.setVal(xTrue.getVal())\n xTrue.setConstant(True)\n xToy.setConstant(True)\n\n ### Now load Signal PDFs\n nSigVarBounds = nData/2.\n nSigVar = root.RooRealVar(\"nSig\",\"N_{S}\",-nSigVarBounds,nSigVarBounds)\n sigPdfs = []\n sigPdfEs = []\n wSigs = []\n for hmass in sigMasses:\n wSig = root.RooWorkspace(\"signal\"+catName+energyStr+str(hmass))\n makeCards.makePDFSigNew(catName+energyStr,\"sig_ggH\",dimuonMass,float(hmass),\n getattr(wSig,\"import\")\n )\n sigPdf = wSig.pdf(\"ggH\")\n sigPdf.SetName(\"sigPDF_\"+str(hmass)+\"_\"+catName+energyStr)\n sigPdfs.append(sigPdf)\n wSigs.append(wSig)\n sigPdfE = root.RooExtendPdf(sigPdf.GetName()+\"E\",sigPdf.GetTitle()+\" Extended\",sigPdf,nSigVar)\n sigPdfEs.append(sigPdfE)\n\n ## Load the 1*SM N signal events\n nSigSMs = []\n for hmass in sigMasses:\n nSigSMs.append(getSMSigCounts(catName,hmass,energy=energyStr))\n\n result = {}\n\n ### Do S+B Fits\n for hmass,sigPdf,sigPdfE,nSigSM in zip(sigMasses,sigPdfs,sigPdfEs,nSigSMs):\n truePdfPlusSigPdf = root.RooAddPdf(\"truePdfPlusSigPdf\"+catName+energyStr,\"\",root.RooArgList(truePdfE,sigPdfE))\n fr = truePdfPlusSigPdf.fitTo(realData,\n PRINTLEVEL,\n root.RooFit.Save(1)\n )\n #frPars = fr.floatParsFinal()\n #for i in range(frPars.getSize()):\n # frPars[i].Print()\n #nSigVar.Print()\n\n result[hmass] = nSigVar.getError()\n #result[hmass] = nSigVar.getError()/nSigSM\n\n # Debug plot for fit to data\n frame = dimuonMass.frame()\n chi2RealDataVar = truePdfPlusSigPdf.createChi2(realDataHist)\n ndfRealData = dimuonMass.getBins() - 1 # b/c roofit normalizes\n ndfRealData -= rooPdfNFreeParams(truePdfPlusSigPdf,realDataHist)\n realData.plotOn(frame)\n errVisArg = root.RooFit.VisualizeError(fr,1,True)\n errFillArg = root.RooFit.FillStyle(3001)\n truePdfPlusSigPdf.plotOn(frame,root.RooFit.Range('low,signal,high'),root.RooFit.NormRange('low,signal,high'),errVisArg,errFillArg,root.RooFit.FillColor(root.kGreen-7))\n truePdfPlusSigPdf.plotOn(frame,root.RooFit.Range('low,signal,high'),root.RooFit.NormRange('low,signal,high'),root.RooFit.Components(truePdf.GetName()),root.RooFit.LineStyle(2),root.RooFit.LineColor(root.kRed+1))\n truePdfPlusSigPdf.plotOn(frame,root.RooFit.Range('low,signal,high'),root.RooFit.NormRange('low,signal,high'))\n #truePdfPlusSigPdf.plotOn(frame,root.RooFit.Range('low,signal,high'),root.RooFit.NormRange('low,signal,high'),root.RooFit.Components(sigPdf.GetName()),root.RooFit.LineColor(root.kRed+1))\n \n frame.Draw()\n frame.SetTitle(\"\")\n frame.GetYaxis().SetTitle(\"Events / 1 GeV/c^{2}\")\n tlatex.SetTextAlign(12)\n tlatex.DrawLatex(gStyle.GetPadLeftMargin(),0.96,\"CMS Internal\")\n tlatex.DrawLatex(0.02+gStyle.GetPadLeftMargin(),0.85,\"Ref PDF: \"+truePdfName)\n tlatex.SetTextAlign(32)\n tlatex.DrawLatex(0.99-gStyle.GetPadRightMargin(),0.96,catName+\" \"+energyStr)\n tlatex.DrawLatex(0.97-gStyle.GetPadRightMargin(),0.85,\"Ref. S+B Fit to Real Data\")\n tlatex.DrawLatex(0.97-gStyle.GetPadRightMargin(),0.80,\"Ref. GOF: {0:.2f}\".format(scipy.stats.chi2.sf(chi2RealDataVar.getVal(),ndfRealData)))\n tlatex.DrawLatex(0.97-gStyle.GetPadRightMargin(),0.75,\"Ref. #chi^{{2}}/NDF: {0:.2f}\".format(chi2RealDataVar.getVal()/ndfRealData))\n canvas.SaveAs(\"output/debug_oneSig_RealData_\"+truePdfName+\"_\"+catName+\"_\"+energyStr+\"_\"+str(hmass)+\".png\")\n\n return result", "def main():\n\n # Initial message\n taq_data_tools_responses_physical_short_long.taq_initial_message()\n\n # Tickers and days to analyze\n year = '2008'\n tickers = ['AAPL', 'GOOG']\n taus_p = [x for x in range(10, 101, 10)]\n tau = 1000\n\n # Basic folders\n taq_data_tools_responses_physical_short_long.taq_start_folders(year)\n\n # Run analysis\n taq_data_plot_generator(tickers, year, tau, taus_p)\n\n print('Ay vamos!!!')\n\n return None", "def run(self):\n\t\tself.print_header_information()\n\n\t\t#self.get_number_of_instances_from_user()\n\n\t\t#self.compile_dataframe(self.number_of_instances)\n\n\t\tprint \"\\n{}\".format(self.data)\n\n\t\t# Uncomment these lines for debugging\n\t\tself.compile_dataframe_default()\n\t\t# print \"\\n{}\".format(self.data)\n\n\t\tself.analysis_of_dataframe(self.data)", "def build(self) -> None:\n\n print(\"Genereting files..\")\n self.doc = self.doc + r'\\end{document}'\n\n f = open(\"latex\\\\\" + self.report_name + '.tex', 'w')\n f.write(self.doc)\n f.close()\n\n os.chdir('latex')\n\n cmd = ['pdflatex', '-interaction', 'nonstopmode', self.report_name + '.tex']\n #cmd = ['pdflatex', '-interaction', self.report_name + '.tex']\n\n for i in range(2):\n proc = subprocess.Popen(cmd)\n proc.communicate()\n retcode = proc.returncode\n if not retcode == 0:\n os.chdir('..')\n raise ValueError('Error {} executing command: {}'.format(retcode, ' '.join(cmd)))\n\n os.unlink(self.report_name + '.aux')\n os.unlink(self.report_name + '.lof')\n os.unlink(self.report_name + '.log')\n os.unlink(self.report_name + '.lot')\n os.unlink(self.report_name + '.out')\n os.unlink(self.report_name + '.toc')\n\n os.chdir('..')", "def create_gar(self):\n print('Maketh the report!')\n # Date setup\n date = datetime.today().strftime('%Y-%m-%d')\n year = datetime.today().strftime('%Y')\n\n # Page setup\n geometry_options = {\"tmargin\": \"2cm\",\n \"lmargin\": \"1.8cm\",\n \"rmargin\": \"1.8cm\",\n \"headsep\": \"1cm\"}\n\n doc = pylatex.Document(page_numbers=False,\n geometry_options=geometry_options)\n\n header = self.produce_header_footer()\n\n doc.preamble.append(header)\n doc.change_document_style(\"header\")\n\n #\n # DOCUMENT BODY/CREATION\n with doc.create(pylatex.Section('GeneSippr Analysis Report', numbering=False)):\n doc.append('GeneSippr!')\n\n with doc.create(pylatex.Subsection('GeneSeekr Analysis', numbering=False)) as genesippr_section:\n with doc.create(pylatex.Tabular('|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|')) as table:\n # Header\n table.add_hline()\n table.add_row(self.genesippr_table_columns)\n for sample_name in self.samples:\n table_data = [sample_name]\n for data in self.genesippr_headers:\n try:\n print(sample_name, data, self.report_data['genesippr'][sample_name][data])\n table_data.append(self.report_data['genesippr'][sample_name][data])\n except KeyError:\n pass\n table.add_row(table_data)\n self.create_caption(genesippr_section, 'a', \"+ indicates marker presence : \"\n \"- indicates marker was not detected\")\n\n # Create the PDF\n doc.generate_pdf('{}_{}_{}'\n .format(os.path.join('/home/adamkoziol/Bioinformatics/sippr/gui/161104_M02466_0002_000000000-AV4G5'), 'gar', date), clean_tex=False)\n print('{}_{}_{}'.format(os.path.join('/home/adamkoziol/Bioinformatics/sippr/gui/161104_M02466_0002_000000000-AV4G5'), 'gar', date))\n # for report_name in self.report_data:\n # for sample_name in self.samples:\n # for header, value in self.report_data[report_name][sample_name].items():\n # print(report_name, sample_name, header, value)", "def main():\n parser = ArgumentParser(description=\"write to a file\")\n\n parser.add_argument(\"-i\",\"--input\", type=setup.is_valid_h5_file, required=True, nargs='+',\n help=\"path(s) of HDF5 master file(s)\")\n\n parser.add_argument(\"-b\",\"--beamcenter\", nargs=2, required=True,\n help=\"beam center in X and Y (two arguments)\")\n\n parser.add_argument(\"-r\",\"--oscillation\", type=float, default=1,\n help=\"oscillation angle per well, default = 1\")\n\n parser.add_argument(\"-d\",\"--distance\", type=float, default=100,\n help=\"detector distance in mm\")\n\n parser.add_argument(\"-w\",\"--wavelength\", type=float, default=1.216,\n help=\"Wavelength in Angstrom, default is 1.216\")\n\n parser.add_argument(\"-f\",\"--framesperdegree\", type=int, default=5,\n help=\"Number of frames per degree, default is 5\")\n\n parser.add_argument(\"-t\",\"--totalframes\", type=int, default=0,\n help=\"Total number of frames to be processed, default all\")\n\n parser.add_argument(\"--output\", default=os.getcwd(),\n help=\"Use this option to change output directory, default pwd\")\n\n parser.add_argument(\"-sg\",\"--spacegroup\", type=int, default=0,\n help=\"Space group\")\n\n parser.add_argument(\"-u\",\"--unitcell\", type=str, default=\"50 50 50 90 90 90\",\n help=\"unit cell\")\n\n argslist = parser.parse_args()\n for masterfile in argslist.input:\n master1= Master(argslist,masterfile)\n master1.printDataWells()", "def main( argv = None ):\n\n if not argv: argv = sys.argv\n\n # setup command line parser\n parser = optparse.OptionParser( version = \"%prog version: $Id: script_template.py 2871 2010-03-03 10:20:44Z andreas $\", \n usage = globals()[\"__doc__\"] )\n\n ## add common options (-h/--help, ...) and parse command line \n (options, args) = E.Start( parser, argv = argv )\n\n infile = open(args[0], 'r')\n min_rpkm = float(args[1])\n outfile = open(args[2] + '.distance', 'w')\n\n # try to detect if relative or absolute path\n if argv[1][0] == '/' or argv[1][0] == '~':\n ABS_PATH = True\n else:\n ABS_PATH = False\n\n # build output matrix\n for line in infile:\n if line.startswith(\"#\"): continue\n if line.startswith(\"gene_id\"): \n header = line[:-1].split('\\t')[2::]\n num_samples=len(header)\n outfile.write(\" %s\\n\" % num_samples)\n\n # initialize output matrix\n the_matrix=[]\n for i in range(num_samples):\n the_matrix.append([0.0]*num_samples)\n continue\n\n la = map(float, line.rstrip('\\n').split('\\t')[2::])\n if max(la) < min_rpkm:\n continue\n la = map(lambda x: x + 0.01, la) # to handle any zero values, add 0.01 to every RPKM\n avg_rpkm = float(sum(la))/len(la)\n ratios = map(lambda x: log(x/avg_rpkm, 2), la)\n for i in range(num_samples):\n for j in range(num_samples):\n the_matrix[i][j] += abs( ratios[i] - ratios[j] )\n\n # write distance matrix\n for i in range(num_samples):\n outfile.write( \"%-10s\" % header[i] )\n for j in range(num_samples):\n outfile.write( ' ' + str( the_matrix[i][j] ) )\n outfile.write( '\\n' )\n infile.close(); outfile.close()\n\n # create tmp directory & work there - different syntax though if absolute vs relative path\n # make commands file for fitch & run\n commands_file = open( argv[3] + '.commands', 'w')\n TMP_DIR = \"\".join([choice(letters) for x in xrange(10)]); getoutput('mkdir %s' % TMP_DIR)\n if ABS_PATH:\n commands_file.write( '%s\\nG\\nJ\\n23\\n5000\\nP\\n0\\n2\\nY\\n' % (argv[3] + '.distance') )\n commands_file.close()\n getoutput('cd %s; fitch < %s; rm outfile; mv outtree %s; cd ..' % ( TMP_DIR, argv[3] + '.commands', argv[3] ) )\n else:\n commands_file.write( '../%s\\nG\\nJ\\n23\\n5000\\nP\\n0\\n2\\nY\\n' % (argv[3] + '.distance') )\n commands_file.close()\n getoutput('cd %s; fitch < ../%s; rm outfile; mv outtree ../%s; cd ..' % ( TMP_DIR, argv[3] + '.commands', argv[3] ) )\n getoutput('rmdir %s' % TMP_DIR )\n\n\n ## write footer and output benchmark information.\n E.Stop()", "def setup(self):\n super(__class__, self).setup()\n # construct command line call\n setup_script = '%s/tfMRI.py' % \\\n os.environ['ABCDTASKPREPDIR']\n arg1 = self.kwargs['path']\n arg2 = self.kwargs['sourcedata_root']\n arg3 = self.kwargs['subject']\n arg4 = self.kwargs['session']\n anat_metadata = self.config.get_bids('t1w_metadata')\n # get make/software information\n make = anat_metadata['Manufacturer']\n if make == 'GE':\n reg = re.compile(r'.*(DV2[56]).*')\n software_version = reg.match(anat_metadata[\n 'SoftwareVersions']).group(1)\n else:\n software_version = 'NA'\n cmd = ' '.join((setup_script, arg1, arg2, arg3, arg4, make,\n software_version))\n print(cmd)\n\n log_dir = self._get_log_dir()\n out_log = os.path.join(log_dir, self.__class__.__name__ + '_setup.out')\n err_log = os.path.join(log_dir, self.__class__.__name__ + '_setup.err')\n result = self.call(cmd, out_log, err_log)", "def main():\n \n MiscUtil.PrintInfo(\"\\n%s (RDK v%s; %s): Starting...\\n\" % (ScriptName, rdBase.rdkitVersion, time.asctime()))\n \n (WallClockTime, ProcessorTime) = MiscUtil.GetWallClockAndProcessorTime()\n \n # Retrieve command line arguments and options...\n RetrieveOptions()\n \n # Process and validate command line arguments and options...\n ProcessOptions()\n \n # Perform actions required by the script...\n CalculateRMSD()\n \n MiscUtil.PrintInfo(\"\\n%s: Done...\\n\" % ScriptName)\n MiscUtil.PrintInfo(\"Total time: %s\" % MiscUtil.GetFormattedElapsedTime(WallClockTime, ProcessorTime))", "def main():\n\n\toptions = parse_arguments()\n\tcodon_counts = parse.codon_freq_table(options.codon)\n\tgenetic_code = parse.genetic_code(options.codon_table, options.gene_code)\n\n\tdc = degenerate_codons(genetic_code=genetic_code,codon_counts=codon_counts)\n\tdc.compute_results()\n\tdc.output(options.output_format)", "def run_makefakedata(self):\n cl_mfd = self._build_MFD_command_line()\n\n check_ok = self.check_cached_data_okay_to_use(cl_mfd)\n if check_ok is False:\n utils.run_commandline(cl_mfd)\n if not np.all([os.path.isfile(f) for f in self.sftfilenames]):\n raise IOError(\n f\"It seems we successfully ran {self.mfd},\"\n f\" but did not get the expected SFT file path(s): {self.sftfilepath}.\"\n f\" What we have in the output directory '{self.outdir}' is:\"\n f\" {os.listdir(self.outdir)}\"\n )\n logger.info(f\"Successfully wrote SFTs to: {self.sftfilepath}\")\n logger.info(\"Now validating each SFT file...\")\n for sft in self.sftfilenames:\n lalpulsar.ValidateSFTFile(sft)", "def dms_maker(self, force_rerun=False):\n log.debug('{}: running surface representation maker...'.format(self.id))\n\n if not self.receptorpdb_path:\n return ValueError('Please run protein_only_and_noH')\n\n dms = op.join(self.dock_dir, '{}_receptor.dms'.format(self.id))\n\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=dms):\n cmd = 'dms {} -n -w 1.4 -o {}'.format(self.receptorpdb_path, dms)\n os.system(cmd)\n\n self.dms_path = dms\n\n if ssbio.utils.is_non_zero_file(dms):\n self.dms_path = dms\n log.debug('{}: successful dms execution'.format(self.dms_path))\n else:\n log.critical('{}: dms_maker failed to run on receptor file'.format(self.receptorpdb_path))", "def main():\n if len(sys.argv) < 3:\n message = \"\"\"\n Usage: python generate_dataset.py <dataset_name> <number of files> <size of each file in bytes>\n \"\"\"\n print(message)\n sys.exit(0)\n dataset_name = sys.argv[1]\n file_number = int(sys.argv[2])\n file_size = int(sys.argv[3])\n\n if not os.path.exists(dataset_name):\n os.makedirs(dataset_name)\n\n for i in range(file_number):\n tmp_file = open('./' + dataset_name + '/' + dataset_name + '.file' + str(i), 'w+')\n tmp_file.write(os.urandom(file_size))\n tmp_file.close()", "def main():\n args = utils.read_arguments(__doc__)\n documents = []\n filenames = list(traverse_directory(args[\"input_dirpath\"],'*clean*.txt'))\n labels_dirname = args[\"labels_dirpath\"]\n labels_from_json = get_all_labels_from_json(labels_dirname)\n for filename in tqdm(filenames):\n with AnnotatedIBMFactory(filename) as instance_extractor:\n filename_key = filename.split(\"/\")[-1]\n document = instance_extractor.build_document(\n labels_from_json[filename_key])\n documents.append(document)\n utils.pickle_to_file(documents, args['output_file'])", "def main():\n\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config[\"database\"])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n\n rendered_report = render_report(show_years=show_years,\n panelists=panelists,\n report_settings=app_config[\"report\"])\n\n generate_output_files(rendered_report=rendered_report,\n report_settings=app_config[\"report\"])", "def run(self):\n report_file = self.get_report_file_name()\n self.export_records_to_file(report_file)\n print(\"Report file ({}) generated.\".format(report_file))", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('filename', type=argparse.FileType('r'), nargs='+', help='The list of files to generate strdl documentation for')\n args = parser.parse_args()\n for file in args.filename:\n strdl_gen.generate_file(strdl_parser.parse(file))", "def run():\n print(\"clewsy CLEWs Model Building Script.\")\n print(\"When using clewsy please reference:\")\n print(\"T. Niet and A. Shivakumar (2020): clewsy: Script for building CLEWs models.\")\n main(sys.argv[1:])", "def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n _logger.debug(\"Starting crazy calculations...\")\n createlookuptable(args.imagefolder)\n _logger.info(\"Script ends here\")", "def writeScripts():\n msepR = \"\"\"#!/usr/bin/env Rscript\n args = commandArgs(TRUE)\n mutGene = args[1]\n mutFile = args[2]\n wtFile = args[3]\n\n mutData = read.table(mutFile)\n wtData = read.table(wtFile)\n xMin = min(mutData$V2, wtData$V2)\n if (xMin > 0) {\n xMin = 0\n }\n xMax = max(mutData$V2, wtData$V2)\n if (xMax < 0) {\n xMax = 0\n }\n xMin = 1.4*xMin\n xMax = 1.4*xMax\n \n yMax = 1.1*max(density(mutData$V2)$y, density(wtData$V2)$y)\n \n pdf(paste(mutGene, \".msep.pdf\", sep = \"\"), height = 5, width = 5)\n plot(density(mutData$V2), col = \"red\", xlim = c(xMin, xMax), ylim = c(0, yMax), main = mutGene, xlab = \"\")\n par(new = T)\n plot(density(wtData$V2), xlim = c(xMin, xMax), ylim = c(0, yMax), main = \"\", xlab = \"\", ylab = \"\")\n dev.off()\n \"\"\"\n \n backgroundR = \"\"\"#!/usr/bin/env Rscript\n args = commandArgs(TRUE)\n mutGene = args[1]\n realFile = args[2]\n nullFile = args[3]\n\n realData = read.table(realFile)$V1\n nullData = read.table(nullFile)$V1\n nullData = nullData[!is.nan(nullData)]\n minVal = min(realData, nullData)\n if (minVal > 0) {\n minVal = 0\n }\n maxVal = max(realData, nullData)\n if (maxVal < 0) {\n maxVal = 0\n }\n minVal = 1.4*minVal\n maxVal = 1.4*maxVal\n\n pdf(paste(mutGene, \".background.pdf\", sep = \"\"), height = 5, width = 5)\n plot(density(nullData), main = mutGene, xlim = c(minVal, maxVal))\n abline(v = realData[1], col = \"red\")\n dev.off()\n \"\"\"\n \n f = open(\"msep.R\", \"w\")\n f.write(msepR)\n f.close\n f = open(\"background.R\", \"w\")\n f.write(backgroundR)\n f.close\n system(\"chmod 755 msep.R background.R\")", "def main(argv):\n print(\"Market Data Generator\")\n\n now_nanos.sim_time = 0\n args = parse_args(argv)\n print(\"Output file '{}' in {} format\".format(args.outputfile.name,\n 'CSV' if args.csv else 'binary'))\n\n if args.csv:\n name = args.outputfile.name\n args.outputfile.close()\n args.outputfile = open(name, \"w\")\n\n mids = gen_book(args)\n colors = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black']\n\n for (i, series) in enumerate(mids):\n pyplot.plot(range(args.samples), numpy.array(series), colors[i % len(colors)])\n pyplot.show()", "def main():\n df_data = import_clean_process()\n plot_data_matplotlib(df_data)\n return", "def start():\n\n\tif check_file_location(output_file):\n\t\tpass \n\telse:\n\t\tresult = process_file(input_file, output_file, df_chunk_file)\n\t\tif result != 1:\n\t\t\traise (\"Error ! Cannot generate file\")", "def script_generator(self):\n\n self._get_free_tcp_port()\n\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('train_options', str())\n\n if not os.access(py, os.X_OK):\n py = \"/home/haihuam/anaconda3/envs/RepPoints/bin/python\"\n \n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n\n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s -m torch.distributed.launch \"%(py)\n content += \"--nproc_per_node=%s \"%(self.setting['train_num_gpu'])\n content += \"--master_port %s \"%(self.dist_train_port)\n content += \"%s %s --launcher pytorch \"%(train_py, self.setting['config_file'])\n content += \"--work_dir %s \"%(self.run_dir)\n content += \"--resume_from latest.pth \"\n content += \"--validate %s &> %s.log \\n\"%(ex_options, self.stage)\n content += \"touch train.done \\n\"\n # return content\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def run_script(self):\n pass", "def main():\n\n NUM_TRAIN = noise.init_train_thresh\n NUM_TEST = 20\n XDIM = 1\n\n # Train the emulator\n x_train = np.random.uniform(size=(NUM_TRAIN, XDIM))\n y_train = np.array([noise(x) for x in x_train])\n\n # Output error estimates\n noise.output_err = True\n\n # Get values from the trained emulator\n x_emu = np.random.uniform(size=(NUM_TEST, XDIM))\n\n y_emu = np.zeros_like(x_emu)\n y_err = np.zeros_like(x_emu)\n\n for i, x in enumerate(x_emu):\n val, err = noise(x)\n y_emu[i] = val\n y_err[i] = err\n\n # Plot the results\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.scatter(x_train[:, 0], y_train, marker=\"+\", label=\"training values\")\n ax.errorbar(\n x_emu,\n y_emu[:, 0],\n yerr=y_err.flatten(),\n linestyle=\"None\",\n marker=\"o\",\n capsize=3,\n label=\"emulator\",\n color=\"red\",\n )\n\n ax.legend()\n\n # `__file__` is undefined when running in sphinx\n try:\n fig.savefig(__file__ + \".png\")\n except NameError:\n pass", "def start(self) -> None:\n\n self.doc = self.doc + r'''\n \\documentclass[\n 10pt, % Main document font size\n a4paper, % Paper type, use 'letterpaper' for US Letter paper\n ]{scrartcl}\n\n \\usepackage{graphicx}\n \\usepackage{epstopdf}\n \\usepackage{float}\n \\usepackage[scale=0.75]{geometry} % Reduce document margins\n \\usepackage{hyperref}\n \\usepackage{longtable}\n\n \\begin{document}\n\n \\title{Automatic Exploratory Data Analysis} % The article title\n\n \\subtitle{Study Case} % Uncomment to display a subtitle\n\n \\author{Jacob} % The article author(s) - author affiliations need to be specified in the AUTHOR AFFILIATIONS block\\\n\n \\maketitle % Print the title/author/date block\n\n \\newpage\n \\tableofcontents % Print the table of contents\n\n \\newpage\n \\listoffigures % Print the list of figures\n\n \\newpage\n \\listoftables % Print the list of tables\n '''", "def makecldf(args):\n with_dataset(args, Dataset._install)", "def run_main():\n\n parser = argparse.ArgumentParser(description=\"Scan a run directory and create files to \")\n parser.add_argument('--run-directory', dest='run_directory',\n action='store', default='',\n help='path to directory with xed files to process')\n args = parser.parse_args(sys.argv[1:])\n\n if not os.path.isdir(args.run_directory):\n sys.stderr.write(\"{0} is not a directory, exiting\\n\".format(args.run_directory))\n return 1\n run_name = os.path.abspath(args.run_directory)\n\n if os.path.basename(run_name):\n run_name = os.path.basename(run_name)\n else:\n run_name = os.path.split(run_name)[0].split('/')[-1]\n\n if not os.path.exists('info'):\n os.mkdir('info')\n\n for directory in os.listdir(args.run_directory):\n if not os.path.isdir(os.path.join(args.run_directory, directory)):\n continue\n csv_filename = \"info/{0}_{1}_files.csv\".format(run_name, directory)\n entries = glob.glob(os.path.join(args.run_directory, directory, '*.xed'))\n if len(entries) == 0:\n continue\n with open(csv_filename, 'w') as file_obj:\n csv_writer = csv.writer(file_obj)\n csv_writer.writerow(['Run', 'Data Set', 'File'])\n for entry in entries:\n uri = \"srm://ceph-se.osgconnect.net:8443/srm/v2/\" + \\\n \"server?SFN=/cephfs/srm/xenon/\" + \\\n entry.replace('/xenon/', '')\n csv_writer.writerow([run_name, directory, uri])", "def run(self):\n\n print \"\\n\\n\\tPlease Note: Templates are generated based off\"\n print \"\\t of the OS environment variables that are set.\"\n print \"\\t* Running ReHeat.\"\n\n self.set_creds()\n self.gen_ip() # used in template description\n self.gen_tenant_id()\n if self.reheat_error:\n return self.reheat_errmsg\n\n print \"\\t* You have opted to generate %s file[s]\" % self.template_type\n if 'all' in self.template_type:\n self.gen_heat_data()\n self.gen_heat_template()\n self.gen_compute_data()\n return self.gen_compute_template()\n elif 'heat' in self.template_type:\n self.gen_heat_data()\n return self.gen_heat_template()\n elif 'compute' in self.template_type:\n self.gen_compute_data()\n return self.gen_compute_template()\n else:\n raise Exception(\"User provided an improper template type.\")", "def main():\n summary = process_text()\n # TODO: turn this into a PDF report\n paragraph = \"<br/>\".join(summary)\n title = \"Processed Update on {}\".format(date.today().strftime('%B %d, %Y'))\n attachment = f'{path}/processed.pdf'\n reports.generate_report(attachment, title, paragraph)\n\n # TODO: send the PDF report as an email attachment\n sender = \"[email protected]\"\n receiver = \"{}@example.com\".format(os.environ.get('USER'))\n subject = \"Upload Completed - Online Fruit Store\"\n body = \"All fruits are uploaded to our website successfully. A detailed list is attached to this email.\"\n message = emails.generate_email(sender, receiver, subject, body, attachment)\n emails.send_email(message)", "def main():\n args = get_arguments()\n\n mode = args.mode\n sdf_path = os.path.expandvars(args.sdf_path)\n summary_file = os.path.expanduser(args.summary_file)\n assert os.path.exists(sdf_path), \"sdf-path not exists: {}\".format(sdf_path)\n\n if mode == \"SUM\":\n summary(sdf_path, summary_file)\n elif mode == \"VAL\":\n validate(sdf_path, summary_file)", "def _auto_run(args):\n\n # TDH (2020-01-13) For developement testing the following section\n # replicates the functionality of \"standard_analysis.py\" so that\n # json_results can be created and used to create the graph image\n # files.\n import benchmark_postprocessing as bmpp\n file_list = bmpp.get_benchmark_files(args.benchmark_results_dir)\n json_results = bmpp.parse_files(file_list)\n json_results = bmpp.parse_and_add_benchmark_metadata(json_results)\n run_id_list = get_unique_run_ids(json_results)\n\n # TDH (2020-01-13) - Create unqiue reports for each run ID found.\n # Even a single results directory can contain results from multiple\n # run IDs.\n for run_id in run_id_list:\n output_path = os.path.join(\n args.benchmark_results_dir,\n '{}_report'.format(run_id))\n\n # TDH: Thorough attempt to safely create the results directory and\n # provide good error reporting if something went wrong.\n try:\n os.mkdir(output_path)\n except OSError:\n logging.error('Failed to create directory for report at {}'.format(\n output_path))\n create_standard_analysis_report(output_path,\n json_results,\n run_id)", "def main():\n\tparser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n\t\t\t\t\t epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n\tparser.add_argument('dfile', type=str, help='Data file name')\n\tparser.add_argument('dhist', type=str, help='Data object name')\n parser.add_argument(\"-scale\", type=float, dest='scale', help='Scale', default=1.0)\n parser.add_argument(\"-doption\", type=str, dest='doption', help='Data draw option', default=\"colz\")\n parser.add_argument(\"-title\", type=str, dest='title', help='Plot title', default=None)\n parser.add_argument(\"-xtitle\", type=str, dest='xtitle', help='x-axis title', default=None)\n parser.add_argument(\"-ytitle\", type=str, dest='ytitle', help='y-axis title', default=None)\n parser.add_argument(\"-logx\", action='store_true', default=False, dest='logx', help='Set log scale for the horizontal axis')\n parser.add_argument(\"-logy\", action='store_true', default=False, dest='logy', help='Set log scale for the vertical axis')\n parser.add_argument(\"-o\", type=str, dest='output', help='Output file name. If given then the canvas is not shown.', default=\"\")\n\n\targs = parser.parse_args()\n\n ROOT.gStyle.SetOptStat(False)\n ROOT.gStyle.SetPalette(ROOT.kTemperatureMap)\n\n if args.output:\n ROOT.gROOT.SetBatch(True)\n\n df = ROOT.TFile(args.dfile)\n dh = df.Get(args.dhist)\n\n dh.Scale(args.scale)\n\n if args.title:\n dh.SetTitle(args.title)\n\n if args.xtitle:\n dh.SetXTitle(args.xtitle)\n \n if args.ytitle:\n dh.SetYTitle(args.ytitle)\n \n dh.Draw(args.doption)\n\n ROOT.gPad.SetLogx(args.logx)\n ROOT.gPad.SetLogy(args.logy)\n\n if args.output:\n ROOT.gPad.Print(args.output)\n else:\n ROOT.gPad.GetCanvas().ToggleEventStatus()\n input()", "def main():\n\n dofile = \"thebook\"\n\n #spellcheck()\n\n common_options = '--encoding=utf-8 --examples_as_exercises '\n\n # --- HTML ---\n\n common_html_options = ' '\n\n # HTML Bootstrap\n bootstrap_options = ' --html_style=bootswatch_readable --html_code_style=inherit --html_pre_style=inherit --toc_depth=2 --pygments_html_style=default --html_template=template_bootstrap_wtoc.html --html_figure_caption=bottom --html_figure_hrule=top+bottom' \n\n html(\n dofile,\n options=common_options + common_html_options + bootstrap_options,\n split=True)\n\n # One long HTML file\n #html(dofile, options=common_options + common_html_options + ' --html_style=bloodish --html_output=%s-1' % dofile, split=False)\n\n # Solarized HTML\n #html(dofile, options=common_options + common_html_options + ' --html_style=solarized3 --html_output=%s-solarized' % dofile, split=True)\n\n mksnippets()\n sys.exit(1)\n\n # --- latex ---\n\n common_latex_options = ' --latex_code_style=vrb'\n\n for version in 'paper', 'screen': # , 'A4', '2up', 'A4-2up':\n latex(\n dofile,\n latex_program='pdflatex',\n options=common_options + common_latex_options,\n version=version,\n postfix='auto')\n\n # --- Sphinx ---\n\n# sphinx_themes = ['pyramid',]\n# for theme in sphinx_themes:\n# dirname = 'sphinx-rootdir' if len(sphinx_themes) == 1 else 'sphinx-rootdir-%s' % theme\n# sphinx(\n# dofile,\n# options=common_options + '',\n# dirname=dirname,\n# theme=theme,\n# automake_sphinx_options='',\n# split=False)\n\n # Dump all Unix commands run above as a Bash script\n bash = open('tmp_make.sh', 'w')\n print 'see tmp_make.sh for an equivalent auto-generated unix script'\n bash.write('''#!/bin/bash\nset -x # display all commands in output\n\n# Safe execution of a Unix command: exit if failure\nfunction system {\n \"$@\"\n if [ $? -ne 0 ]; then\n echo \"make.sh: unsuccessful command $@\"\n echo \"abort!\"\n exit 1\n fi\n}\n''')\n for cmd in unix_command_recorder:\n if cmd.startswith('doconce format') or cmd.startswith('rm '):\n bash.write('\\n') # delimiter line in script\n bash.write('system ' + cmd + '\\n')\n bash.close()\n\n print 'see tmp_output.log for the output of all the commands'", "def prepare_afd_script():\n\n\t## get the variable to explain\n\tvar_to_explain = \"undef\"\n\tcmpt = 0\n\tdata_file = open(\"data/data.csv\", \"r\")\n\tfor line in data_file:\n\t\tif(cmpt == 0):\n\t\t\tline_in_array = line.split(\",\")\n\t\t\tvar_to_explain = line_in_array[0]\n\t\tcmpt += 1\n\tdata_file.close()\n\tvar_to_explain = var_to_explain.replace(\"\\\\\", \".\")\n\tif(var_to_explain[0] == \".\"):\n\t\tvar_to_explain = \"X\" + var_to_explain\n\n\t## write the script\n\ttemplate_file = open(\"scripts/afd_template.R\", \"r\")\n\tafd_script = open(\"scripts/afd_script.R\", \"w\")\n\tcmpt = 1\n\tfor line in template_file:\n\t\tif(cmpt == 108):\n\t\t\tline_to_write = \"data.lda <- lda(\"+str(var_to_explain)+\" ~ ., data=data)\"\n\t\t\tafd_script.write(line_to_write+\"\\n\")\n\t\telif(cmpt == 123):\n\t\t\t line_to_write = \"ldahist(data = data.lda.values$x[,comp], g=\"+str(var_to_explain)+\")\"\n\t\t\t afd_script.write(line_to_write+\"\\n\")\n\t\telif(cmpt == 132):\n\t\t\tline_to_write = \"text(data.lda.values$x[,1],data.lda.values$x[,2],\"+str(var_to_explain)+\",cex=0.7,pos=4,col=\\\"red\\\")\"\n\t\t\tafd_script.write(line_to_write+\"\\n\")\n\t\telse:\n\t\t\tafd_script.write(line)\n\t\tcmpt += 1\n\tafd_script.close()\n\ttemplate_file.close()", "def main():\n parser = argparse.ArgumentParser(\n description=\"Generate table summary for our experiments from our saved small data.\"\n )\n parser.add_argument(\n \"--path\",\n type=str,\n default=None,\n help=\"the path to saved testing smalldata for RLSO\",\n )\n parser.add_argument(\n \"--sigma_type\",\n type=int,\n default=0,\n help=\"How to show sigma data. 0 shows no sigma data. 1 shows sigma data in the same column. 2 shows sigma data in a separate column. \",\n )\n parser.add_argument(\n \"--titlestring\",\n type=str,\n default=None,\n help=\"String to append to our plot title. Defaults to None\",\n )\n parser.add_argument(\n \"--dim_list\",\n type=list,\n default=None,\n help=\"List of dimensions the experiments were run on.\",\n )\n parser.add_argument(\n \"--table_type\",\n type=int,\n default=0,\n help=\"Type of table. 0 for Latex, and 1 for markdown for github.\",\n )\n args = parser.parse_args()\n\n if args.path is None:\n print(\"Path to pickle data needed!\")\n return\n\n pickle_savepath = args.path\n with open(pickle_savepath, \"rb\") as pickle_file:\n data = pickle.load(pickle_file)\n\n table_data = construct_table_data(\n data, sigma_style=args.sigma_type, dim_list_override=args.dim_list\n )\n\n if args.table_type == 0:\n table_type = \"latex_raw\"\n else:\n table_type = \"github\"\n\n print_table(\n table_data, args.titlestring, tablefmt=table_type, sigma_style=args.sigma_type\n )\n return", "def main():\n year = time.strftime(\"%Y\")\n month = time.strftime(\"%m\")\n today = time.strftime(\"%Y%m%d\")\n homedir = \"/home/\" + user + \"/raspi-sump/\"\n webchart.create_folders(year, month, homedir)\n webchart.create_chart(homedir)\n webchart.copy_chart(year, month, today, homedir)", "def run_module_text_to_gdf(args):\n GeoGenerator = dataset_builder.GeoDataGenerator(\n input_directory=args.input_directory, output_directory=args.output_directory,\n )\n GeoGenerator.run(n_jobs=args.n_jobs, starting_block=args.starting_block)", "def __main__():\n try:\n gff_file = sys.argv[1]\n mat_file = sys.argv[2]\n except:\n print __doc__\n sys.exit(-1)\n\n genes, transcripts, exons, utr3, utr5, cds = GFFParse(gff_file) \n gene_models = CreateGeneModels(genes, transcripts, exons, utr3, utr5, cds)\n # TODO Write to matlab/octave struct instead of cell arrays.\n sio.savemat(mat_file, \n mdict=dict(genes=gene_models), \n format='5', \n oned_as='row')", "def main():\n data_visualisation()\n write_hyper_params()\n write_result_tables()\n write_box_plots()", "def main(sourcedatafile, targetdatafile, documentationfile, tail):\n data = load_data(sourcedatafile)\n genres = get_metadata(data)\n featurematrix = get_featurematrix(data)\n make_scatterplot(genres, featurematrix, targetdatafile)\n docfile.write(sourcedatafile, targetdatafile, documentationfile, docstring, tail, __file__)", "def main():\n subjectlist = ['hel{}'.format(i) for i in range(1, 20) if i is not 9]\n logfile = setup_log(os.path.join(os.environ['hel'], 'logs',\n 'randomise_setup_fslmerge'))\n logfile.info('Setup for randomise.')\n logfile.info('Making a 4D data set by combining images')\n outdir = os.path.join(os.environ['hel'], 'graph_analyses',\n 'randomise_global_connectivity')\n for subclust_n in range(1, 4):\n outfilename = os.path.join(outdir,\n 'knnward_clst1_subclust{}_4Dfile'.format(\n subclust_n))\n mergefsl(logfile, make_file_list(subjectlist, subclust_n), outfilename)", "def main():\n f_name = sys.argv[1]\n file_contents = open(f_name).read()\n C = CAST([], \"python\")\n C2 = C.from_json_str(file_contents)\n\n V = CASTToAGraphVisitor(C2)\n last_slash_idx = f_name.rfind(\"/\")\n file_ending_idx = f_name.rfind(\".\")\n pdf_file_name = f\"{f_name[last_slash_idx + 1 : file_ending_idx]}.pdf\"\n V.to_pdf(pdf_file_name)", "def main(args):\n gt_path = args.ground_truth\n djdd_path = args.djdd\n bjdd_path = args.bjdd\n\n mse_fn = th.nn.MSELoss()\n psnr_fn = PSNR()\n\n device = \"cpu\"\n # if th.cuda.is_available():\n # device = \"cuda\"\n\n pdf = pd.DataFrame(columns=[\"filename\",\"imgid\", \"PSNR_for_DJDD\", \"MSE_for_DJDD\", \"PSNR_for_BJDD\", \"MSE_for_BJDD\"])\n\n count = 0\n msedjdd = 0.0\n psnrdjdd = 0.0\n\n msebjdd = 0.0\n psnrbjdd = 0.0\n\n for root, _, files in os.walk(gt_path):\n for idx, name in enumerate(files):\n \n # djdd image\n output_djdd = np.array(imread(os.path.join(djdd_path, name+\"_0_output.png\"))).astype(np.float32) / (2**8-1)\n output_djdd = th.from_numpy(np.transpose(output_djdd, [2,0,1])).to(device).unsqueeze(0)\n\n #bjdd image\n output_bjdd = np.array(imread(os.path.join(bjdd_path, name.split('.')[0]+\"_sigma_0_bayer_PIPNet.png\"))).astype(np.float32) / (2**8-1)\n output_bjdd = th.from_numpy(np.transpose(output_bjdd, [2,0,1])).to(device).unsqueeze(0)\n\n # gt image\n target = np.array(imread(os.path.join(root, name))).astype(np.float32) / (2**8-1)\n target = th.from_numpy(np.transpose(target, [2, 0, 1])).to(device).unsqueeze(0)\n\n\n target_djdd = crop_like(target, output_djdd)\n target_bjdd = crop_like(target, output_bjdd)\n\n psnr_djdd = psnr_fn(output_djdd, target_djdd).item()\n mse_djdd = mse_fn(output_djdd, target_djdd).item()\n\n psnr_bjdd = psnr_fn(output_bjdd, target_bjdd).item()\n mse_bjdd = mse_fn(output_bjdd, target_bjdd).item()\n\n psnrdjdd += psnr_djdd\n msedjdd += mse_djdd\n psnrbjdd += psnr_bjdd\n msebjdd += mse_bjdd\n\n count += 1\n\n LOG.info(f\"imgid: {idx}, PSNR_BJDD: {psnr_bjdd}, MSE_BJDD: {mse_bjdd}, PSNR_DJDD: {psnr_djdd}, MSE_DJDD: {mse_djdd}\")\n pdf = pdf.append({\n \"filename\": name,\n \"imgid\": idx,\n \"PSNR_for_DJDD\": psnr_djdd,\n \"MSE_for_DJDD\": mse_djdd,\n \"PSNR_for_BJDD\": psnr_bjdd,\n \"MSE_for_BJDD\": mse_bjdd\n }, ignore_index=True)\n # pdb.set_trace()\n\n msebjdd /= count\n psnrbjdd /= count\n\n msedjdd /= count\n psnrdjdd /= count\n\n LOG.info(\"--------------BJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrbjdd, msebjdd)\n\n LOG.info(\"--------------DJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrdjdd, msedjdd)\n pdb.set_trace()\n pdf.to_csv(\"/workspace/presentation_compare.csv\")", "def run(self):\n model = self.model\n self.summary_cards(model)\n self.hospitalizations_chart(model)\n self.available_beds_chart(model)\n self.write_population_info(model)\n self.write_age_distribution_chart(model)\n self.write_fatalities_chart(model)\n self.write_healthcare_parameters(model)\n self.write_epidemiological_parameters(model)\n self.write_footnotes(model)", "def main():\n subcommands = {\n \"train\": train.train,\n \"tune\": train_tune.train,\n \"predict\": predict.cli_predict,\n \"evaluate\": evaluate.cli_evaluate,\n \"version\": version,\n }\n\n try:\n import xarray_behave.gui.app\n\n subcommands[\"gui\"] = xarray_behave.gui.app.main_das\n except (ImportError, ModuleNotFoundError):\n logging.exception(\"No GUI avalaible.\")\n # fall back to function that displays helpful instructions\n subcommands[\"gui\"] = no_xb_gui\n\n logging.basicConfig(level=logging.INFO, force=True)\n defopt.run(subcommands, show_defaults=False)", "def main():\n # Path used in assembly and previously discovered min year value.\n split_in_dir_path = \"../../data/split\"\n avg_5_in_dir_path = \"../../data/averaged_5\"\n avg_25_in_dir_path = \"../../data/averaged_25\"\n avg_50_in_dir_path = \"../../data/averaged_50\"\n dates_mat_path = \"../../data/dates_matrix/dates_matrix.npy\"\n min_year = 1962\n data_out_dir_path = \"../../data/rnn_set/data\"\n labels_out_dir_path = \"../../data/rnn_set/labels\"\n assemble_set(\n split_in_dir_path, avg_5_in_dir_path, avg_25_in_dir_path,\n avg_50_in_dir_path, dates_mat_path, min_year,\n data_out_dir_path, labels_out_dir_path\n )", "def run():\n\twrite_fuel_data()", "def main(self):\n try:\n self.parse_args()\n self.run()\n return 0\n except AnalysisBackendError as e:\n L.error(e)\n return 1", "def main():\n\n database = MongoDbUtil('ro').database()\n\n tag = 'Px1id'\n daemons = ['daq_files_watcher', 'jobs_validator', 'submitter']\n colls = ['%s_%s'%(coll, tag) for coll in daemons]\n\n datas = []\n for daemon, coll in zip(daemons, colls):\n last_doc = database[coll].find().skip(database[coll].count()-1)[0]\n accum_stats = last_doc['accum_stats']\n\n vals = {}\n timestamps = []\n for key in accum_stats.keys():\n vals[key] = []\n\n for doc in database[coll].find():\n timestamps.append(doc['date'])\n for key in vals:\n vals[key].append(doc['accum_stats'][key])\n\n urls = []\n for key in vals:\n urls.append(draw(timestamps, vals[key], daemon, key))\n\n datas.append({'title': daemon, 'urls': urls})\n\n make_index_file(tag, datas)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input\", help=\"Fasta rDNA input\")\n parser.add_argument(\"output\", help=\"GFF annotation\")\n parser.add_argument(\"kingdom\", help=\"Choose kingdom\")\n args = parser.parse_args()\n command(args)", "def main(df):\n df1 = analysis.getCostByType(df)\n df2 = analysis.getCostByMonth(df)\n df3 = analysis.getCostByMonthAndType(df)\n writer.createSummaryStats(df)\n fileNames = ['CostByType', 'CostByMonth', 'CostByMonthAndType']\n createSpreadsheet([df1, df2, df3], fileNames)\n return", "def run(self):\n\n section = self.config['make_diagnostics_files']\n mesh_name = section.get('mesh_name')\n mesh_filename = section.get('mesh_filename')\n cores = section.getint('cores')\n with_ice_shelf_cavities = section.getboolean('with_ice_shelf_cavities')\n\n symlink(os.path.join('..', mesh_filename), 'restart.nc')\n make_diagnostics_files(self.config, self.logger, mesh_name,\n with_ice_shelf_cavities, cores)", "def buildPDF(self):\n\n # TODO: get this working\n # TODO: make this configurable via a dialog\n os.chdir(self.file_path.parent)\n proc = subprocess.Popen(\n [\"make\", \"latexpdf\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n proc.wait()\n for line in proc.stdout:\n print(\"stdout: \" + line.rstrip())", "def main():\n input_file_path = sys.argv[1]\n output_file_path = sys.argv[2]\n gps_df = create_df(input_file_path) # creates a data frame\n gps_df = clean_data(gps_df) # cleans the data\n print('Cleaning done')\n write_to_kml(gps_df, output_file_path) # writes to kml file", "def main():\n if len(sys.argv) == 2 and sys.argv[1] == 'train':\n trainer = FlightModelTrainer()\n trainer.train()\n return 0\n\n if len(sys.argv) == 2 and sys.argv[1] == 'graphics':\n trainer = FlightModelTrainer()\n trainer.visualize()\n return 0\n\n predictor = FlightPredictor(path_to_weather=WEATHER_TRAIN_DATA_PATH)\n result = predictor.predict(pd.read_csv(FLIGHTS_TEST_DATA_PATH))\n print('result')\n print(result)\n # result.to_csv(\"out.csv\")\n return 0", "def run(args):\n print(\"matdb Copyright (C) 2019 HALL LABS\")\n print(\"This program comes with ABSOLUTELY NO WARRANTY.\")\n print(\"This is free software, and you are welcome to redistribute it under \"\n \"certain conditions.\")\n if args is None:\n return\n\n #No matter what other options the user has chosen, we will have to create a\n #database controller for the specification they have given us.\n from matdb.database import Controller\n cdb = Controller(args[\"dbspec\"])\n\n if args[\"d\"]:\n _generic_find(cdb, \"Database Context Instances\", args[\"p\"])\n if args[\"t\"]:\n _generic_find(cdb.trainers, \"Fitter Context Instances\", args[\"p\"])", "def main(\n files: List[Path] = typer.Argument(default=None, dir_okay=False, exists=True),\n template: Optional[str] = typer.Option(\n None, '--template', help='Name of template file'\n ),\n logo: Optional[str] = typer.Option(None, '--logo', help='Name of logo file'),\n logo_width: Optional[str] = typer.Option(\n None, '--logo-width', help='Logo width (default 35mm)'\n ),\n highlight_style: Optional[str] = typer.Option(None, '--highlight-style',\n help='Specify coloring style to be used in highlighting source code'),\n syntax_definition: Optional[str] = typer.Option(None, '--syntax-definition',\n help='Specify a directory which contains syntax definition files'),\n no_toc: bool = typer.Option(\n False, '--no-toc', help='table of contents in PDF document'\n ),\n no_number_sections: bool = typer.Option(False, '--no-number-sections', help='no section numbering'),\n\n no_titlepage: bool = typer.Option(False, '--no-titlepage', help='title in PDF document'),\n tex_file: bool = typer.Option(\n False, '--tex', help='create TeX file instead of PDF document'\n ),\n email: Optional[str] = typer.Option(None, '--email', help='Author email'),\n company: Optional[str] = typer.Option(None, '--company', help='Name of company'),\n department: Optional[str] = typer.Option(\n None, '--department', help='Name of department'\n ),\n confidential: bool = typer.Option(\n False, '--confidential', help='indicate confidential'\n ),\n debug: bool = typer.Option(False, '--debug', help='turns debugging on'),\n pdf_engine: str = typer.Option(\n 'xelatex',\n '--pdf-engine',\n help='Specify pdf engine, one of lualatex, xelatex or tectonic ',\n ),\n _version: bool = typer.Option(\n None, '-V', '--version', callback=version_callback, help='Show version and exit'\n ),\n):\n\n if not files:\n typer.echo('Error: Must specify at least one .md file.')\n raise typer.Abort()\n\n mdfiles: List[str] = [str(md) for md in files]\n\n template = template or os.environ.get('MD2PDF_TEMPLATE')\n if template is None:\n print('No template specified')\n sys.exit(1)\n\n email = email or os.environ.get('MD2PDF_AUTHOR_EMAIL')\n footer_center = ''\n\n # command line overwrites `MD2PDF_PDF_ENGINE`. if both are not given\n # then `xelatex` is the default\n pdf_engine = pdf_engine or os.environ.get('MD2PDF_PDF_ENGINE') or 'xelatex'\n # check that pdf-engine is one of the following\n if pdf_engine not in ['xelatex', 'lualatex', 'tectonic']:\n print('--pdf-engine must be one of \"xelatex\", \"lualatex\", \"tectonic\"')\n sys.exit(1)\n\n ext = '.pdf'\n if tex_file:\n ext = '.tex'\n\n if len(mdfiles) == 1:\n toml_file = os.path.splitext(mdfiles[0])[0] + '.toml'\n\n if os.path.exists(toml_file):\n print(f'TOML file {toml_file} found')\n parsed_toml = toml.load(toml_file)\n default_val = parsed_toml.get('default')\n if default_val is None:\n print(f'No file names found in {toml_file}')\n else:\n mdfiles = default_val.get('files')\n\n for mdf in mdfiles:\n print(f'Compiling {mdf}')\n\n main_mdfile = os.path.realpath(mdfiles[0])\n\n outfile = Path(main_mdfile).stem + ext\n\n year = date.today().year\n\n company = company or os.environ.get('MD2PDF_COMPANY')\n department = department or os.environ.get('MD2PDF_DEPARTMENT')\n\n if company:\n if confidential:\n footer_center = f'© Copyright {year} {company}'\n else:\n footer_center = f'{year} {company}'\n\n pdcmd = PandocCmd(outfile)\n pdcmd.append(f'--template={template}')\n pdcmd.append(f'--pdf-engine={pdf_engine}')\n\n pdcmd.set_v('footer-center', footer_center)\n pdcmd.set_v('company', company)\n pdcmd.set_v('department', department)\n\n syntax_definition = syntax_definition or os.environ.get('MD2PDF_SYNTAX_DEFINITION_DIR')\n if syntax_definition is not None:\n add_syntax_definition(pdcmd, syntax_definition)\n\n pdcmd.append('--highlight-style')\n highlight_style = highlight_style or os.environ.get('MD2PDF_HIGHLIGHT_STYLE')\n if highlight_style is None:\n pdcmd.append('pygments')\n else:\n check_highlight_style(highlight_style)\n pdcmd.append(highlight_style)\n\n if not no_number_sections:\n pdcmd.append('--number-sections')\n\n if no_titlepage:\n pdcmd.set_m('titlepage', 'false')\n\n logo = logo or os.environ.get('MD2PDF_LOGO')\n pdcmd.set_v('logo', logo)\n\n logo_width = logo_width or os.environ.get('MD2PDF_LOGO_WIDTH')\n pdcmd.set_v('logo-width', logo_width)\n\n pdcmd.set_m('email', email)\n\n if not no_toc:\n pdcmd.append('--toc')\n\n pdcmd.extend(mdfiles)\n\n if debug:\n print(' '.join(pdcmd.pandoc))\n\n\n pdcmd.run()", "def makeCommands(f0,psi0,th,logFileName,mat,fnyld,fnhrd):\n from mk.library.lib import gen_tempfile\n stdoutFileName = gen_tempfile(\n prefix='stdout-mkrun')\n # stdoutFileName ='/tmp/dump'\n\n cmd = 'python main.py --fn %s -f %5.4f -p %+6.1f -t %+7.2f --fnyld %s --fnhrd %s '%(\n logFileName,f0,psi0,th,fnyld,fnhrd)\n\n if mat!=-1:\n cmd = cmd + ' --mat %i'%mat\n cmd = cmd + ' > %s'%stdoutFileName\n print 'cmd:',cmd\n return cmd", "def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n _logger.debug(\"Generating NUFEB simulation files\")\n\n # create nutrients\n light = Nutrient(1e-1, None, None, \"g\", \"nn\")\n co2 = Nutrient(float(args.co2), 1.9e-09, 44.01, \"l\", \"nn\")\n o2 = Nutrient(0.28125, 2.30e-9, 32, \"l\", \"nn\")\n sucrose = Nutrient(float(args.sucrose), 5.2e-10, 342.3, \"l\", \"nn\")\n gco2 = Nutrient(0, None, 44.01, \"g\", \"nn\")\n TEMPLATES_DIR = (Path(__file__).parent) / \"templates\"\n\n captureRate = round(1000 / args.timestep)\n # define dump parameters\n dump_list = {\n \"vtk_dump\": f\"dump atom_vtk all vtk {captureRate} dump*.vtu id type diameter vx vy vz fx fy fz \\n dump grid_vtk all grid/vtk {captureRate} dump_%_*.vti con\",\n \"image_dump\": f\"dump du_image all image {captureRate} image.*.jpg type diameter zoom 2 bacillus type size 1280 720 view 45 60 \\n dump_modify du_image acolor 1 green acolor 2 red\",\n \"movie_dump\": f\"dump du_mov all movie {captureRate} movie.avi type diameter zoom 1.5 bacillus type size 1280 720 view 0 0 \\n dump_modify du_mov acolor 1 green acolor 2 red\",\n \"hdf_dump\": f\"dump du_h5 all nufeb/hdf5 {captureRate} dump.h5 id type x y z vx vy vz fx fy fz radius conc reac\",\n }\n\n dumps = defaultdict(list)\n for i in range(4):\n tmp = [\"vtk_dump\", \"image_dump\", \"movie_dump\", \"hdf_dump\"]\n dumps[tmp[i]]\n\n for dump, dump_var in zip(\n [args.vtk, args.img, args.movie, args.hdf],\n [\"vtk_dump\", \"image_dump\", \"movie_dump\", \"hdf_dump\"],\n ):\n if dump is True or dump == \"True\":\n dumps[dump_var] = dump_list[dump_var]\n else:\n dumps[dump_var] = \"\"\n\n ## Species-specific parameters\n\n # check for runs folder\n if not os.path.isdir(\"runs\"):\n os.mkdir(\"runs\")\n x = float(args.dims.split(\",\")[0])\n y = float(args.dims.split(\",\")[1])\n z = float(args.dims.split(\",\")[2])\n for n in range(1, int(args.num) + 1):\n culture = Culture(args)\n atoms_list = []\n bacilli_list = []\n # Create list of atoms and bacilli for atom definition file\n for cell in culture.cells:\n atoms_list.append(cell.Atom())\n bacilli_list.append(cell.Bacillus())\n # make atom definition file\n for r in range(1, int(args.reps) + 1):\n L = [\n \" NUFEB Simulation\\r\\n\\n\",\n f\" {args.cells_init} atoms \\n\",\n f\" {len(culture.cell_types)} atom types \\n\",\n f\" {args.cells_init} bacilli \\n\\n\",\n f\" 0.0e-4 {x :.2e} xlo xhi \\n\",\n f\" 0.0e-4 {y :.2e} ylo yhi \\n\",\n f\" 0.0e-4 {z :.2e} zlo zhi \\n\\n\",\n \" Atoms \\n\\n\",\n ]\n atoms = L + atoms_list\n atoms.append(\"\\n\")\n atoms.append(\" Bacilli \\n\\n\")\n atoms = atoms + bacilli_list\n # write atom definition file\n f = open(\n f\"runs/atom_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{r}.in\",\n \"w+\",\n )\n f.writelines(atoms)\n RUN_DIR = (\n Path(\"runs\")\n / f\"Run_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{args.reps}\"\n )\n if not os.path.isdir(RUN_DIR):\n os.mkdir(RUN_DIR)\n # os.mkdir(f'runs/Run_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{args.reps}')\n # write initial conditions json file\n dumpfile = open(RUN_DIR / \"metadata.json\", \"w\")\n # dumpfile = open(f\"/runs/Run_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}_{args.reps}/metadata.json\",'w')\n json.dump(CellInfo, dumpfile, indent=6)\n dumpfile.close()\n ###\n\n # write Inputscript\n # open the file\n filein = open(TEMPLATES_DIR / \"bacillus.txt\")\n # filein = resources.read_text(\"nufeb_tools.templates\", \"Bacillus.txt\")\n # read it\n src = Template(filein.read())\n # do the substitution\n result = src.safe_substitute(\n {\n \"n\": args.cells_init,\n \"SucRatio\": culture.SucRatio,\n \"SucPct\": culture.SucPct,\n \"n_cyanos\": culture.n_cyanos,\n \"n_ecw\": culture.n_ecw,\n \"Replicates\": args.reps,\n \"Timesteps\": args.ntimesteps,\n \"ts\": args.timestep,\n \"CYANOGroup\": culture.cyGroup,\n \"ECWGroup\": culture.ecwGroup,\n \"Zheight\": float(args.dims.split(\",\")[2]),\n \"CYANODiv\": culture.cyDiv,\n \"ECWDiv\": culture.ecwDiv,\n \"light\": light.concentration,\n \"co2\": co2.concentration,\n \"o2\": o2.concentration,\n \"sucrose\": sucrose.concentration,\n \"gco2\": gco2.concentration,\n \"CYANOMonod\": culture.cyMonod,\n \"ECWMonod\": culture.ecwMonod,\n \"CYANOcount\": culture.cyanoCount,\n \"ECWcount\": culture.ecwCount,\n \"v_ncyano\": culture.vcyano,\n \"v_necw\": culture.vecw,\n \"vtk_dump\": dumps[\"vtk_dump\"],\n \"image_dump\": dumps[\"image_dump\"],\n \"movie_dump\": dumps[\"movie_dump\"],\n \"hdf_dump\": dumps[\"hdf_dump\"],\n }\n )\n f = open(\n f\"./runs/Inputscript_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}.lmp\",\n \"w+\",\n )\n f.writelines(result)\n\n # write local run script\n # open the file\n filein = open(TEMPLATES_DIR / \"local.txt\")\n # filein = resources.read_text(\"nufeb_tools.templates\", \"local.txt\")\n # read it\n src = Template(filein.read())\n # do the substitution\n result = src.safe_substitute(\n {\n \"n\": n,\n \"SucRatio\": culture.SucRatio,\n \"SucPct\": culture.SucPct,\n \"n_cyanos\": culture.n_cyanos,\n \"n_ecw\": culture.n_ecw,\n \"Reps\": args.reps,\n }\n )\n f = open(\n f\"./runs/local_{culture.n_cyanos}_{culture.n_ecw}_{culture.SucPct}.sh\", \"w+\"\n )\n f.writelines(result)\n # write slurm script\n # open the file\n filein = open(TEMPLATES_DIR / \"slurm_dev.txt\")\n # filein = resources.read_text(\"nufeb_tools.templates\", \"Slurm.txt\")\n # read it\n src = Template(filein.read())\n # do the substitution\n result = src.safe_substitute(\n {\n \"n\": args.cells_init,\n \"job\": f\"NUFEB_cyano{n}\",\n \"USER\": args.user,\n \"Replicates\": args.reps,\n \"SucPct\": culture.SucPct,\n \"n_cyanos\": culture.n_cyanos,\n \"n_ecw\": culture.n_ecw,\n }\n )\n _logger.info(\"Script ends here\")", "def generate(env, daos_prefix, comp_prefix, args):\n analyzer = Analyzer(env, daos_prefix, comp_prefix, args)\n analyzer.analyze_on_exit()", "def main():\n tng.api.runner()", "def main():\n\n config = None\n\n try:\n args = get_args()\n config = process_config(args.config)\n raise RuntimeError(\"Missing or invalid arguments\")\n except Exception as e:\n logging.error(\"Failed\", exc_info=e)\n\n print(\"Create the data generator.\")\n # data_loader = MnistDataLoader(config=config)\n data_loader = IrisDataLoader(config=config)\n train_data = data_loader.get_train_data()\n test_data = data_loader.get_test_data()\n\n print(\"Build the model\")\n # cnn_model = ConvModel(config=config).build_model()\n cnn_model = ANNModel(config=config).build_model()\n\n print(\"Load the best weights\")\n cnn_model.load_weights(\"experiments/{}/{}/checkpoints/{}-weights.best.hdf5\".format(\n config.evaluation.date, config.exp.name, config.exp.name))\n\n print(\"Evaluate the model\")\n print(\"Training Metrics\")\n evaluate(model=cnn_model, data=train_data)\n print(\"Testing Metrics\")\n evaluate(model=cnn_model, data=test_data)\n\n # print(\"Visualize loss and accuracy for Training and Validation data\")\n # plot_history(config=config)\n\n # print(\"Plotting ROC Curve\")\n # plot_roc(model=cnn_model, data=test_data)\n\n print(\"Classifcation Accuracy Report\")\n classification_accuracy_report(model=cnn_model, data=test_data)", "def build():\n\tconsole = Console()\n\tconsole.clear()\n\tconsole.print(BANNER)\n\tif not os.path.exists(\"dataset.yaml\"):\n\t\tclick.clear()\n\t\tconsole.print(\"Dataset config file not found\\nRun - idt init\\n\")\n\t\texit(0)\n\n\twith open('dataset.yaml') as f:\n\t\tdata = yaml.load(f, Loader=yaml.FullLoader)\n\t\n\tclick.clear()\n\tconsole.print(\"Building [bold blue]{dataset_name}[/bold blue] dataset...\\n\".format(dataset_name=data['DATASET_NAME']))\n\tfor classes in data['CLASSES']:\n\t\tclick.clear()\n\t\tconsole.print('Creating [bold blue]{name} class[/bold blue] \\n'.format(name=classes['CLASS_NAME']))\n\t\tsearch_list = classes['SEARCH_KEYWORDS'].split(\",\")\n\t\tfor keywords in search_list:\n\t\t\tfactory = SearchEngineFactory(keywords,data['SAMPLES_PER_SEARCH'],classes['CLASS_NAME'],data['RESIZE_METHOD'], data['DATASET_NAME'],data['IMAGE_SIZE'], data['ENGINE'],data['API_KEY'])\n\t# Remove corrupt files\n\tremove_corrupt(data['DATASET_NAME'])\n\n\t# Create a CSV with dataset info\n\tcreate_dataset_csv(data['DATASET_NAME'])\n\tclick.clear()\n\tconsole.print(\"Dataset READY!\")", "def run_image_viewer( self ):\n\n # XXX: hardcoded program name and image size.\n subprocess.Popen( [\"feh\", \"-dZ\", \"-g\", \"800x600\", self.record[\"filename\"]] )", "def run():\n main()", "def run(self):\n cmds = (self.clean_docs_cmd, self.html_docs_cmd, self.view_docs_cmd)\n self.call_in_sequence(cmds)", "def main():\n run_nutanix_vm_creation_module()", "def main():\n # combineSloEff(makePlots=True, writeDB=False, runMC=False, seedNum=1)\n GPXSloEff(makePlots=True, writeDB=False)", "def script(self):", "def main():\n args = parser.parse_args()\n # create dataframe-ulog class for Attitude/Attiutde-setpoint topic\n att = DfUlg.create(\n args.filename, topics=[\"vehicle_attitude\", \"vehicle_attitude_setpoint\"]\n )\n\n with PdfPages(\"attitude.pdf\") as pdf:\n\n # roll pitch and yaw error\n add_roll_pitch_yaw(att.df)\n add_euler_error(att.df)\n\n plt.figure(0, figsize=(20, 13))\n df_tmp = att.df[\n [\n \"timestamp\",\n \"T_vehicle_attitude_setpoint_0__NF_e_roll\",\n \"T_vehicle_attitude_setpoint_0__NF_e_pitch\",\n \"T_vehicle_attitude_setpoint_0__NF_e_yaw\",\n ]\n ].copy()\n df_tmp.plot(x=\"timestamp\", linewidth=0.8)\n plot_time_series(df_tmp, plt)\n plt.title(\"Roll-Pitch-Yaw-Error\")\n plt.ylabel(\"rad\")\n pdf.savefig()\n plt.close(0)\n\n # inverted\n add_vehicle_z_axis(att.df)\n add_vehicle_inverted(att.df)\n plt.figure(1, figsize=(20, 13))\n df_tmp = att.df[\n [\"timestamp\", \"T_vehicle_attitude_0__NF_tilt_more_90\"]\n ].copy()\n df_tmp.plot(x=\"timestamp\", linewidth=0.8)\n plot_time_series(df_tmp, plt)\n plt.title(\"Inverted\")\n plt.ylabel(\"boolean\")\n pdf.savefig()\n plt.close(1)\n\n # tilt and desired tilt\n add_desired_z_axis(att.df)\n add_desired_tilt(att.df)\n add_tilt(att.df)\n\n pos_tilt = loginfo.get_param(att.ulog, \"MPC_TILTMAX_AIR\", 0)\n man_tilt = loginfo.get_param(att.ulog, \"MPC_MAN_TILT_MAX\", 0)\n plt.figure(2, figsize=(20, 13))\n df_tmp = att.df[\n [\n \"timestamp\",\n \"T_vehicle_attitude_0__NF_tilt\",\n \"T_vehicle_attitude_setpoint_0__NF_tilt_desired\",\n ]\n ].copy()\n df_tmp[\"MPC_TILTMAX_AIR\"] = pos_tilt * np.pi / 180\n df_tmp[\"MPC_MAN_TILT_MAX\"] = man_tilt * np.pi / 180\n df_tmp.plot(x=\"timestamp\", linewidth=0.8, style=[\"-\", \"-\", \"--\", \"--\"])\n\n plot_time_series(df_tmp, plt)\n plt.title(\"Tilt / Desired Tilt\")\n plt.ylabel(\"rad\")\n pdf.savefig()\n plt.close(2)\n\n print(\"attitude.pdf was created\")", "def main():\n dler = Downloader(\"nucleotide\", \"BS000695\", \"/Users/thangchu/Desktop/coronavirus/coronavirus/test.fasta\", True, 0)\n dler.run_everything()", "def write_dapall_script(self, plots=True, overwrite=False):\n # Check that the path exists, creating it if not\n if not self.calling_path.exists():\n self.calling_path.mkdir(parents=True)\n \n # Set the names for the script, stdout, and stderr files\n scriptfile = self.calling_path / 'build_dapall'\n stdoutfile = f'{scriptfile}.out'\n stderrfile = f'{scriptfile}.err'\n\n # Script file already exists, so just return\n if scriptfile.exists() and not overwrite:\n return scriptfile, stdoutfile, stderrfile\n\n # Open the script file and write the date as a commented header\n # line\n file = open(scriptfile, 'w')\n file.write('# Auto-generated batch file\\n')\n file.write(f'# {time.strftime(\"%a %d %b %Y %H:%M:%S\",time.localtime())}\\n')\n file.write('\\n')\n\n # Create the started touch file\n startfile = f'{scriptfile}.started'\n file.write(f'touch {startfile}\\n')\n file.write('\\n')\n\n # Command that constructs the DAPall file\n command = 'OMP_NUM_THREADS=1 '\n command += f'construct_dapall --drpver {self.drpver} -r {self.redux_path} ' \\\n f'--dapver {self.dapver} -a {self.analysis_path}'\n if self.plan_file is not None:\n command += f' --plan_file {self.plan_file}'\n if self.verbose > 0:\n command += (' -'+'v'*self.verbose )\n file.write(f'{command}\\n')\n file.write('\\n')\n\n # Add the plotting commands\n if plots:\n command = f'OMP_NUM_THREADS=1 dapall_qa --drpver {self.drpver} ' \\\n f'--redux_path {self.redux_path} --dapver {self.dapver} ' \\\n f'--analysis_path {self.analysis_path}'\n if self.plan_file is not None:\n command += f' --plan_file {self.plan_file}'\n file.write(f'{command}\\n')\n file.write('\\n')\n\n # Touch the done file\n donefile = f'{scriptfile}.done'\n file.write(f'touch {donefile}\\n')\n file.write('\\n')\n\n file.close()\n ################################################################\n\n # Return the script file, file for stdout, and file for stderr\n return scriptfile, stdoutfile, stderrfile", "def main():\n\n if not os.path.isdir('./results'):\n # results directory is needed\n os.mkdir('./results')\n\n # Run bess daemon\n print('start bess daemon')\n ret = bessctl_do('daemon start')\n if ret.returncode != 0:\n print('failed to start bess daemon')\n return 1\n\n #sleep(2)\n\n cnt_prt_q = [(2,2), (4,2), (8, 2), (2, 8), (4, 8), (8, 8), (16, 8)]\n cnt_prt_q = [(2,128),]\n # cnt_prt_q = [0]\n # Warning: SINGLE_PMD_MULTIPLE_Q is not supported any more.\n # (it needs EXCESS variable to be defined)\n exp_types = ['MULTIPLE_PMD_MULTIPLE_Q',] # 'SINGLE_PMD_MULTIPLE_Q']\n agents = ['BKDRFT', 'BESS']\n agents = ['BKDRFT',]\n for _type in exp_types:\n for agent in agents:\n results = []\n for cnt_ports, cnt_queues in cnt_prt_q:\n res = run_exp(_type, agent, cnt_ports, cnt_queues)\n results.append(res)\n generate_report_file(results,\n './results/{}_{}_results.txt'.format(_type, agent))", "def main():\n\n #\n # Generate waveform\n #\n\n print 'generating waveoform...'\n waveform = pmns_utils.Waveform('shen_135135_lessvisc')\n\n # Pick some extrinsic parameters\n ext_params = ExtParams(distance=1, ra=0.0, dec=0.0, polarization=0.0,\n inclination=0.0, phase=0.0, geocent_peak_time=0.0+5.0)\n\n # Construct the time series for these params\n waveform.make_wf_timeseries(theta=ext_params.inclination,\n phi=ext_params.phase)\n\n #\n # Generate IFO data\n #\n det1_data = DetData(waveform=waveform, ext_params=ext_params)\n\n from scipy import signal\n import pylab as pl\n\n pl.figure()\n pl.plot(det1_data.td_response.sample_times,det1_data.td_response.data)\n pl.plot(det1_data.td_signal.sample_times,det1_data.td_signal.data)\n\n pl.figure()\n f,p = signal.welch(det1_data.td_response.data, fs=1./det1_data.delta_t,\n nperseg=512)\n pl.loglog(f,np.sqrt(p))\n\n f,p = signal.welch(det1_data.td_signal.data, fs=1./det1_data.delta_t,\n nperseg=512)\n pl.loglog(f,np.sqrt(p))\n pl.ylim(1e-25,1e-21)\n pl.show()", "def main():\n\n print(\"Finding maximum clade credibility tree...\")\n find_mcc_tree()\n print(\"Computing clade credibilities...\")\n compute_clade_probabilities()\n if _CAN_PLOT:\n print(\"Plotting maximum clade credibility tree...\")\n plot_mcc_tree()\n else:\n print(\"Skipping plotting tree due to lack of PyQt4 support. :(\")\n print(\"Computing posterior mean paramter estimates...\")\n ranked_means = utils.write_means(\"indoeuropean.log\", \"parameter_means.csv\")\n print(\"Computing ranking correlations...\")\n compute_ranking_correls(ranked_means)\n print(\"Generating LaTeX table...\")\n make_table(ranked_means)\n print(\"Generating rate variation figure...\")\n make_figure(\"category_rates.eps\")", "def main():\n parsed_args = parse_args()\n dfg = DummyFileGenerator(parsed_args[0], **parsed_args[1])\n dfg.write_output_file(**parsed_args[2])", "def start(setup):\n run.run_name = setup.entry_run_name.get()\n run.num_cpus = setup.entry_cpus.get()\n run.medium_volume = setup.entry_medium.get()\n run.sim_time = setup.entry_sim_time.get()\n run.timestep = setup.entry_timestep.get()\n run.pop_size = setup.entry_pop_size.get()\n run.death_per_gen = setup.entry_num_deaths.get()\n run.iterations = setup.entry_iter.get()\n run.output_dir = setup.entry_output.get()\n run.pfba = False if setup.var_pfba.get() is 0 else True\n run.enforce_growth = False if setup.var_growth.get() is 0 else True\n run.oxigen = False if setup.var_oxigen.get() is 0 else True\n run.mutation_chance = setup.entry_mutation_chance.get()\n run.deletion_chance = setup.entry_deletion_chance.get()\n run.repeats = setup.entry_repeats.get()\n run.death_rate = setup.entry_death_rate.get()\n run.mutation_freq = setup.entry_mutation_freq.get()\n run.deletion_freq = setup.entry_deletion_freq.get()\n run.crossover_freq = setup.entry_crossover_freq.get()\n run.twopoint = False if setup.var_twopoint is 0 else True\n run.chromosome = setup.entry_chromosome.get()\n run.solver = setup.solver_var.get()\n\n\n if run.mutation_freq + run.deletion_freq + run.crossover_freq != 1:\n print(f\"Mutation: {run.mutation_freq} + Deletion: {run.deletion_freq} + Crossover: {run.crossover_freq} is not eaqual to 1\")\n return\n\n if not os.path.isdir(run.output_dir):\n print(f\"'{run.output_dir}' is not a valid directory\")\n return\n\n if run.chromosome == '':\n run.chromosome = None\n else:\n if not os.path.isfile(run.chromosome):\n print(f\"'{run.chromosome}' does not exist\")\n return\n\n objective = {}\n data_watcher = DataWatcher()\n data_watcher.set_oxygen(run.oxigen)\n data_watcher.set_enforce_growth(run.enforce_growth)\n data_watcher.set_pfba(run.pfba)\n data_watcher.set_death_rate(run.death_rate)\n culture = Culture()\n culture.register_data_watcher(data_watcher)\n\n if len(setup.widgets) < 2:\n print(\"Less than two Species added\")\n return\n\n run.objective = objective\n run.culture = culture\n\n run_page = RunPage(app.container, app)\n run_page.grid(row=0, column=0, sticky=\"nsew\")\n run.graph_page = run_page\n\n run_page.tkraise()\n\n for widget in setup.widgets:\n objective[widget.species.entry_name.get()] = widget.entry_objective.get()\n model = widget.species.entry_model.get()\n if not os.path.isfile(model):\n print(f\"Can not find file: {model}\")\n return\n\n if run.graph_page != None:\n from tkinter import END, DISABLED, NORMAL\n run.graph_page.text.config(state=NORMAL)\n run.graph_page.text.insert(END, f\"Loading Model of Species: {widget.species.entry_name.get()}\\n\")\n run.graph_page.text.config(state=DISABLED)\n\n print(f\"Loading Model of Species: {widget.species.entry_name.get()}\")\n species = Species(widget.species.entry_name.get(), model, widget.species.entry_radius.get(), widget.species.entry_dryweight.get(), run.solver.lower())\n culture.innoculate_species(species, widget.species.entry_innoculation.get())\n\n run.start_process()", "def main(self, args):\n for plot in args.plots:\n if plot == 'no_plot':\n break\n print \"plotting\", plot\n\n fig = self.plot_figure(plot)\n\n fformat = '{plot}_{index}.{ext}'\n fname = fformat.format(plot=plot, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.distributions == 'all':\n distributions = ['Uf', 'Wf', 'uf_abs',\n 'vorticity', 'vertical_shear']\n else:\n distributions = args.distributions\n for dist in distributions:\n range = self.properties[dist]['range']\n name = self.properties[dist]['name']\n print \"plotting distribution\", dist, name\n fig = self.plot_distribution(getattr(self, dist), range, name)\n\n fformat = 'distribution_{q}_{index}.{ext}'\n fname = fformat.format(q=dist, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.funcs:\n for func in args.funcs:\n print \"multiprocessing\", func\n f = getattr(self, 'plot_' + func)\n f()", "def main(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n\n parser = argparse.ArgumentParser(\n description=\"Produce a pedigree drawing in PDF format \"\n \"from a pedigree file with layout coordinates.\",\n conflict_handler=\"resolve\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\"--verbose\", \"-V\", action=\"count\", default=0)\n\n FamiliesLoader.cli_arguments(parser)\n\n parser.add_argument(\n \"--output\",\n \"-o\",\n metavar=\"o\",\n help=\"the output filename file\",\n default=\"output.pdf\",\n )\n\n parser.add_argument(\n \"--mode\",\n type=str,\n default=\"report\",\n dest=\"mode\",\n help=\"mode of drawing; supported modes are `families` and `report`; \"\n \"defaults: `report`\",\n )\n\n argv = parser.parse_args(argv)\n if argv.verbose == 1:\n logging.basicConfig(level=logging.WARNING)\n elif argv.verbose == 2:\n logging.basicConfig(level=logging.INFO)\n elif argv.verbose >= 3:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.WARNING)\n\n logging.getLogger(\"matplotlib\").setLevel(logging.WARNING)\n\n filename, params = FamiliesLoader.parse_cli_arguments(argv)\n families_loader = FamiliesLoader(filename, **params)\n families = families_loader.load()\n\n mode = argv.mode\n assert mode in (\"families\", \"report\")\n logger.warning(\"using mode: %s\", mode)\n if mode == \"report\":\n generator = draw_families_report(families)\n else:\n generator = draw_families(families)\n\n with PDFLayoutDrawer(argv.output) as pdf_drawer:\n\n for fig in generator:\n pdf_drawer.savefig(fig)\n plt.close(fig)", "def main():\n mdclient = PMClient(getenv('METADATA_URL', 'http://127.0.0.1:8121'))\n test_data_dir = dirname(realpath(__file__))\n object_order = [\n 'analytical_tools',\n 'journals',\n 'citations',\n 'institutions',\n 'users',\n 'contributors',\n 'proposals',\n 'instruments',\n 'transactions',\n 'files',\n 'groups',\n 'keys',\n 'keywords',\n 'values',\n 'atool_transaction',\n 'atool_proposal',\n 'citation_contributor',\n 'citation_keyword',\n 'citation_proposal',\n 'file_key_value',\n 'institution_person',\n 'instrument_custodian',\n 'instrument_group',\n 'proposal_instrument',\n 'proposal_participant',\n 'trans_key_value',\n 'user_group'\n ]\n for obj in object_order:\n mdclient.create(obj, loads(open('{0}.json'.format(join(test_data_dir, obj))).read()))", "def generate():\n local('cd doc && make clean && make html')" ]
[ "0.67287403", "0.63006973", "0.6264529", "0.6237319", "0.6058982", "0.60356236", "0.59877056", "0.59641856", "0.59610593", "0.59463936", "0.5924916", "0.59105444", "0.59030676", "0.5876134", "0.585013", "0.582454", "0.58127964", "0.5797604", "0.5779497", "0.5763903", "0.576209", "0.5749707", "0.57386994", "0.5737595", "0.57370025", "0.5731315", "0.57275367", "0.5722828", "0.5712043", "0.57071286", "0.57039475", "0.5691483", "0.56619895", "0.5661417", "0.5659542", "0.5650781", "0.5635577", "0.5631454", "0.5624339", "0.56164885", "0.5616131", "0.56096363", "0.5608703", "0.5606917", "0.56043273", "0.5600239", "0.5600105", "0.55935335", "0.55851686", "0.5581388", "0.55774504", "0.5567254", "0.5565116", "0.5557489", "0.55479014", "0.5545912", "0.5530485", "0.5530371", "0.5529328", "0.5526968", "0.55212176", "0.5498037", "0.54914546", "0.54902005", "0.5488892", "0.5488009", "0.5487804", "0.5486667", "0.5476262", "0.5476031", "0.54667646", "0.5463449", "0.5462328", "0.54541457", "0.54495776", "0.54441047", "0.54391336", "0.54389113", "0.54088384", "0.5403206", "0.53964216", "0.53951263", "0.5393268", "0.5387794", "0.53847724", "0.53845984", "0.5375227", "0.53713006", "0.5363423", "0.53629714", "0.53615224", "0.53464454", "0.534222", "0.5342058", "0.5337194", "0.53261423", "0.53240925", "0.53240323", "0.53200233", "0.53197664", "0.53196776" ]
0.0
-1
Returns the list of log paths that are not known by the log configuration associated to this instance
def get_new_logs(log_paths,log_conf): if log_conf is None or log_conf.get_host() is None: return log_paths conf_logs = log_conf.get_host().get_logs() new_logs = [log_path for log_path in log_paths if log_path not in conf_logs] print 'New logs detected on %s: %s'(log_conf.get_host().get_name(), new_logs) logger.info('New logs detected on %s: %s',log_conf.get_host().get_name(), new_logs) return new_logs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_paths(self): # pylint:disable=function-redefined\n return self._log_paths", "def all_logs(self):\n return os.listdir(LOGS_BASE_PATH)", "def get_file_list_without_current_log():\n full_list = sorted(filter(os.path.isfile, os.listdir('.')), key=os.path.getmtime)\n full_list.remove(\"connect-log.log\")\n return full_list", "def filter_excluded_paths(self) -> ConfigNodePropertyArray:\n return self._filter_excluded_paths", "def warping_paths(self):\n return self.paths", "def get_log_paths(root_dir: str) -> List[str]:\n paths = []\n if not tf.io.gfile.isdir(root_dir):\n raise ValueError(f'{root_dir} is not a directory.')\n for path, _, files in tf.io.gfile.walk(root_dir):\n if 'metadata.riegeli' in files:\n paths.append(path)\n return paths", "def missingConfigFiles(self):\n return [ conf\n for conf in self.configFiles\n if not os.path.exists(conf)\n and not os.path.isfile(conf)\n ]", "def local_paths(self) -> List[Path]:\n return self._local_paths", "def logs(self):\n return self._logs", "def logs(self):\n return self._logs", "def logs_directory(self):", "def logpath(self):\n return self.outpath", "def additional_log_names(self) -> List[str]:\n return self._additional_log_names", "def log_paths(self, value):\n self._log_paths = value", "def getLogFileNames():\r\n return [\"Server1.txt\", \"Server2.txt\", \"Client1.txt\", \"Client2.txt\"]", "def org_apache_felix_http_path_exclusions(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_http_path_exclusions", "def get_all_path(self, conf):\n\t\tpass", "def paths(self):\r\n return self._paths", "def get_paths(self):\n return self.paths", "def log_directory(self):\n\n return self.get_raw(\"log_directory\")", "def getLogs():", "def getLogs():", "def paths(self):\n return self._paths", "def paths(self):\n return self._paths", "def GetAllLogFilePaths(ssh):\n ssh_cmd = [ssh.GetBaseCmd(constants.SSH_BIN), _FIND_LOG_FILE_CMD]\n log_files = []\n try:\n files_output = utils.CheckOutput(\" \".join(ssh_cmd), shell=True)\n log_files = FilterLogfiles(files_output.splitlines())\n except subprocess.CalledProcessError:\n logger.debug(\"The folder(%s) that running launch_cvd doesn't exist.\",\n constants.REMOTE_LOG_FOLDER)\n return log_files", "def find_logs():\n dirname = os.path.normpath('./logs')\n d = 1\n\n while d < 5:\n if os.path.exists(dirname):\n return os.path.normpath(dirname)\n d += 1\n dirname = os.path.join('../', dirname)\n\n return dirname", "def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files", "def collect_logs(self):\n logs = glob.glob(f\"{self.production.rundir}/*.err\") #+ glob.glob(f\"{self.production.rundir}/*/logs/*\")\n logs += glob.glob(f\"{self.production.rundir}/*.out\")\n messages = {}\n for log in logs:\n with open(log, \"r\") as log_f:\n message = log_f.read()\n messages[log.split(\"/\")[-1]] = message\n return messages", "def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances", "def get_log_path():\n return LOG_PATH", "def GetPath () :\n return sys.hal_log_values [\"__log_path\"]", "def org_apache_felix_jetty_gzip_excluded_paths(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_jetty_gzip_excluded_paths", "def fileHandlers(self):\n fileHandlers = list()\n handlers = self.logger.handlers\n for handler in handlers:\n try:\n if handler._name.startswith(\"LogFile-\"):\n fileHandlers.append(handler)\n except:\n pass\n return fileHandlers", "def logs(self):\n return self.logger.logs()", "def paths(self) -> typing.Optional[typing.List[str]]:\n return self._values.get('paths')", "def _get_daemon_logs_files(self):\n for fname in os.listdir('/tmp/'):\n fname = os.path.join('/tmp/', fname)\n if fname.lower().endswith('.log'):\n yield fname", "def logs(self):\n if not self._logs:\n self.read_logs()\n return self._logs", "def GetPaths(self):\n return self.paths", "def get_paths(self):\n paths = []\n for f in dir(self):\n o = getattr(self, f)\n if callable(o) and hasattr(o, '_path'):\n paths.append(getattr(o, '_path'))\n return paths", "def filepaths(self):\n pass", "def logdir(self) -> str:\n return self._logdir", "def poller_names(self):\n return [i for i in self._config.sections() if i not in ['Local', 'GitHub', 'Logging',\n 'DEFAULT']]", "def get_logs(self):\n return self.network.get_logs()", "def get_tracignore_patterns(env_parent_dir):\n path = os.path.join(env_parent_dir, '.tracignore')\n try:\n lines = [line.strip() for line in read_file(path).splitlines()]\n except IOError:\n return ['.*']\n return [line for line in lines if line and not line.startswith('#')]", "def _list_dir(self):\n return [os.path.join(self._path, fn) for fn in os.listdir(self._path)\n if not fn.endswith(self._fs_transaction_suffix)]", "def get_log_files_to_delete(self):\n dir_name, base_name = os.path.split(self.baseFilename)\n file_names = os.listdir(dir_name)\n result = []\n n, e = os.path.splitext(base_name)\n prefix = n + \".\"\n plen = len(prefix)\n for file_name in file_names:\n if self.namer is None:\n if not file_name.startswith(base_name):\n continue\n else:\n if (\n not file_name.startswith(base_name)\n and file_name.endswith(e)\n and len(file_name) > (plen + 1)\n and not file_name[plen + 1].isdigit()\n ):\n continue\n if file_name[:plen] == prefix:\n suffix = file_name[plen:]\n parts = suffix.split(\".\")\n for part in parts:\n if self.extMatch.match(part):\n result.append(os.path.join(dir_name, file_name))\n break\n if len(result) < self.backupCount:\n result = []\n else:\n result.sort()\n result = result[: len(result) - self.backupCount]\n return result", "def missing_references(self):\n return [ ref for ref in self.reference_names() if not config.file_in_cache(self.name, self.observatory) ]", "def get_paths(self):\n return self.path.split(',')", "def test_logs(self):\n # Purge all logs\n log_dir = self.test_config['LOG_DIR']\n pattern = re.compile('^nginx-access-ui.log-(?P<day_of_log>\\d{8})(\\.gz)?$')\n logs = [f for f in os.listdir(log_dir) if re.search(pattern, f)]\n map(os.remove, logs)\n\n # Try to make report without logs\n self.generate_report()\n self.assertTrue(self.check_in_log(\"Not found logs in directory {}\".format(self.test_config['LOG_DIR'])))", "def get_files_paths(self):\n return self.__files_paths", "def get_access_paths(self):\n return list(filter(lambda path: not self.matrix_walls[self.y][self.x][path],\n (Maze.RIGHT, Maze.LEFT, Maze.TOP, Maze.BOTTOM)))", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def log_path(self):\n return LOGS_RESOURCES_PATH / (self.daemon_id + '.log')", "def source_paths(self):\n paths = self.config.get('static_dirs')\n if paths:\n return paths\n return [self.config.get('static_dir')]", "def get_logging_dir(self):\n return self.logging_dir", "def delete_logs(self):\n if self.etw_log is not None:\n files = sorted(glob.glob(self.etw_log + '*'))\n for path in files:\n try:\n os.remove(path)\n except Exception:\n pass", "def _get_ignore_list(self) -> List[str]:\n if not self.exists():\n return []\n if self._file_exists():\n with open(self._path, \"r\", encoding=DefaultOpenEncoding.READ) as fh:\n return [line.rstrip() for line in fh if line]\n return []", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def empty_paths(self):\n self.paths[:]", "def hide_access_logs():\n access_log = cherrypy.log.access_log\n for handler in tuple(access_log.handlers):\n access_log.removeHandler(handler)", "def log_path(self):\n return os.path.join(self._sandbox, 'log')", "def unfinished_arrow_sh_files(self):\n return op.join(self.log_dir, 'unfinished_arrow_files.txt')", "def list_log_files():\n for filename in os.listdir(\"/home/malyhass/log-parser\"):\n if filename.startswith(\"access.log\"):\n yield filename", "def path(self) -> List[Path]:\n return self._path", "def get_missing_output_path_definitions(\n self,\n ) -> List[StringInputDefinition]:\n\n return [\n input_definition\n for input_definition in self.input_definitions\n if self.input_definition_is_a_missing_output_path(input_definition)\n ]", "def _most_recent_event_files(self):\n regex = re.compile(r\"\\w*events.log\")\n return [\n os.path.join(self._output_dir, x)\n for x in os.listdir(self._output_dir)\n if regex.search(x)\n ]", "def paths(self):\n return list(zip(*self.collected))[0]", "def log_diagnostics(self, paths):\n\t\tpass", "def get_replay_source_no_helper_paths(self):\n paths = []\n classes = self.get_replay_classes_no_helper()\n base_path = self.base_folder_path + \"/data/replay_images/\" + self.replay_source + \"/\"\n\n \n for c in classes:\n full_path = base_path + str(c).zfill(2) + \".tfrecord\"\n paths.append(full_path)\n \n return paths", "def paths(self) -> Paths:\n return self._paths", "def pytest_logger_logsdir(self, config):", "def GetExpectationFilepaths(self) -> List[str]:\n raise NotImplementedError()", "def existing_logs(self):\n temp = list()\n with sqlite3.connect(self.db_file) as conn:\n cur = conn.cursor()\n cur.execute(\"PRAGMA table_info('data')\")\n temp = cur.fetchall()\n # if len(temp) != 0:\n # empty sequence is false\n if temp:\n self._existing_logs = [item[1] for item in temp]\n return self._existing_logs", "def baseline_paths(self) -> Iterator[List[Path]]:\n repo = get_git_repo()\n\n if not repo or self._base_commit is None:\n yield []\n else:\n with self._baseline_context():\n yield [\n relative_path\n for relative_path in self._target_paths\n if self._fname_to_path(repo, str(relative_path))\n not in self._status.added\n ]", "def logdir(self):\n return osp.join('runs/', self.net_name, '')", "def list_logging_conf():\n import pkg_resources\n\n configs = set()\n for plugin in plugin_manager.load_all(__name__):\n configs.update({\n cfg for cfg in pkg_resources.resource_listdir(__name__, '.')\n if cfg.endswith('.json')\n })\n\n return configs", "def missing_mappings(self):\n return [ mapping for mapping in self.mapping_names() if not config.file_in_cache(self.name, self.observatory) ]", "def stderr_path(self):\n return self.log_path\n # return self.path / 'stderr.txt'", "def excluded(cls):\n return []", "def get_config_files(self):\n flag, i = self.inotify\n\n if flag:\n kwargs = {}\n\n if PY3:\n kwargs['timeout_s'] = 0\n\n filenames = set()\n\n for event in i.event_gen(**kwargs):\n if event is None:\n break\n\n filenames.add(event[3])\n\n return list(filenames)\n\n else:\n return os.listdir(self.watch)", "def _local_dir(self):\n return []", "def find_logs(self, log_format):\n # print(self.path)\n r, d, files = next(os.walk(self.path))\n # TODO use regex to find logs\n files = list(filter(lambda x: log_format in x, files))\n files = [os.path.join(r, f) for f in files]\n ctimes = [os.path.getctime(os.path.join(self.path, f)) for f in files]\n # print(self.path, files)\n return list(zip(ctimes, files))", "def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)", "def _get_filepaths(self):\n self._printer(str(self.__len__()) + \" file paths have been parsed in \" + str(self.timer.end))\n if self._hash_files:\n return pool_hash(self.filepaths)\n else:\n return self.filepaths", "def list_logs():\n resource_route = \"/static/log/\"\n file_request_path = request.base_url[:request.base_url.rfind('/')] + resource_route\n path_to_current_file = os.path.dirname(os.path.abspath(__file__))\n logs_path = os.path.join(path_to_current_file, 'static', 'log')\n directory_list = os.listdir(logs_path)\n log_files = [f for f in directory_list if os.path.isfile(os.path.join(logs_path, f))]\n log_files.sort()\n if '.gitignore' in log_files:\n log_files.remove('.gitignore')\n full_log_paths = [file_request_path + f for f in log_files]\n response_code = 200\n return make_response(jsonify({'files': full_log_paths}), response_code)", "def _filter_return_url_from_list(self, paths, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n for path in paths:\r\n if path in entry[\"request\"][\"url\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches", "def get_logdir(self):\n return self.event_writer.get_logdir()", "def output_ignored(self) -> List[str]:\n output = list()\n for ignored in sorted(self.ignored):\n if len(ignored) == 2:\n line = f\"{ignored[0]} - Ignored {ignored[1]}!\"\n else:\n line = f\"{ignored[0]} - {ignored[1]}: Ignored {ignored[2]}!\"\n logger.info(line)\n output.append(line)\n return output", "def loggers(self):\n ret = []\n if self.logger_name:\n if isinstance(self.logger_name, logging.Logger):\n ret.append((self.logger_name.name, self.logger_name))\n else:\n ret.append((self.logger_name, logging.getLogger(self.logger_name)))\n\n else:\n ret = list(logging.Logger.manager.loggerDict.items())\n ret.append((\"root\", logging.getLogger()))\n return ret", "def missing_in_gn_by_file(self):\n return self._missing_gn_files", "def _metric_source_urls(self) -> List[str]:\n # This should ideally link to an issue search that returns all violation suppressions, but that doesn't\n # seem to be possible in SonarQube\n return [self._metric_source.violations_url(self._sonar_id())] if self._metric_source else []", "def logunsentlbsys(self) :\n\t\ttry :\n\t\t\treturn self._logunsentlbsys\n\t\texcept Exception as e:\n\t\t\traise e", "def _exclude_paths_from_environ(env_prefix=''):\n paths = os.environ.get(env_prefix + 'WSGI_AUTH_EXCLUDE_PATHS')\n if not paths:\n return []\n return paths.split(';')", "def get_result_path(self):\n return logPath", "def fetch_error_log(self):\n content = []\n\n def get_lines_at_tail(log_file, max_count):\n \"\"\"Fetch last n lines from a big file.\"\"\"\n if not os.path.exists(log_file):\n return []\n\n file_size = os.path.getsize(log_file)\n # Assume that in average a line has 512 characters at most\n block_size = max_count * 512 if max_count > 0 else file_size\n\n with open(log_file, \"r\") as file_handle:\n if file_size > block_size > 0:\n max_seek_point = file_size // block_size\n file_handle.seek((max_seek_point - 1) * block_size)\n elif file_size:\n file_handle.seek(0, os.SEEK_SET)\n lines = file_handle.read().splitlines()\n while lines and not lines[-1]:\n lines.pop()\n return lines[-max_count:] if max_count > 0 else lines\n\n logging_paths = {self.errpath, self.outpath, self.logpath}\n if self.cfg.file_logger:\n file_log_path = os.path.join(self.runpath, self.cfg.file_logger)\n if file_log_path not in logging_paths:\n logging_paths.add(file_log_path)\n\n for path in logging_paths:\n lines = (\n get_lines_at_tail(path, self.cfg.error_logs_max_lines)\n if path\n else []\n )\n if lines:\n if content:\n content.append(\"\")\n content.append(\"Information from log file: {}\".format(path))\n content.extend([\" {}\".format(line) for line in lines])\n\n return content", "def init_error_files(self): \n \n dir_path = self.init_logs_directory()\n log_errors = self.join_path(dir_path, PATH_FOR_LOG_ERRORS)\n \n return log_errors", "def getLogPath(self, folder):\n path = join(self.folder,'experimentLog_0001.txt')\n for f_path in os.listdir(folder):\n if re.search('experimentLog_[0-9]*.txt', f_path):\n path = join(self.folder,f_path)\n break\n\n return path", "def get_string_of_files_paths(self):\n to_return = \"\"\n for paths in self.__files_paths:\n if len(paths) != 0:\n to_return += paths + \" \"\n return to_return", "def get_relevant_reports(self):\r\n split_reports = [x.split(' ') for x in self.get_directory_list()]\r\n formatted_reps = [list(filter(None, line)) for line in split_reports]\r\n recent_reports = [line for line in formatted_reps if self.is_report_recent(line)]\r\n return recent_reports", "def remote_paths(self) -> list:\r\n results: list = []\r\n\r\n if self.imports_node is not None:\r\n results.extend([node.text for node in filter(is_import_node, self.imports_node)\r\n if startswith(node.text, self.remote_schemas, ignorecase=True)])\r\n\r\n if self.folders_node is not None:\r\n results.extend([node.text for node in filter(is_folder_node, self.folders_node)\r\n if startswith(node.text, self.remote_schemas, ignorecase=True)])\r\n\r\n return results" ]
[ "0.8348011", "0.6821954", "0.6548825", "0.64387006", "0.6341025", "0.6129678", "0.6074887", "0.6059957", "0.605284", "0.605284", "0.60285383", "0.60223985", "0.5995747", "0.5980469", "0.59733", "0.5953644", "0.59462976", "0.5943091", "0.59387076", "0.5936064", "0.59292436", "0.59292436", "0.59043807", "0.59043807", "0.5897476", "0.58848697", "0.58304155", "0.58174986", "0.5802656", "0.5794456", "0.5760617", "0.57519513", "0.5748587", "0.5747799", "0.5737607", "0.5731873", "0.57224774", "0.57001895", "0.569999", "0.5689907", "0.5671817", "0.5669365", "0.56462294", "0.5643656", "0.5629224", "0.56202537", "0.5619088", "0.56190664", "0.5601295", "0.5571469", "0.55691946", "0.55609906", "0.55539984", "0.5544794", "0.55432016", "0.5503631", "0.5495344", "0.5494778", "0.5493221", "0.54808974", "0.54795504", "0.5476893", "0.54667956", "0.5459536", "0.54503256", "0.54499656", "0.54478866", "0.5443376", "0.5433613", "0.5419981", "0.54092354", "0.54092145", "0.54072505", "0.53994185", "0.5399044", "0.5398185", "0.53936857", "0.53929716", "0.5379344", "0.53778726", "0.53718793", "0.5370613", "0.5367351", "0.5367348", "0.53648055", "0.5362291", "0.535703", "0.5354591", "0.5344439", "0.53274", "0.53237534", "0.53183115", "0.53121156", "0.53079784", "0.52878803", "0.5284643", "0.5282051", "0.52813387", "0.5276893", "0.5275793" ]
0.60559905
8
Returns the remote logging configuration or None if the remote configuration does not exist.
def get_instance_log_conf(instance_id): # Retrieve current log config file log_conf_file = None filename = 'logentries_%s.conf'%instance_id rsyslog_conf_name = '/etc/rsyslog.d/%s'%filename local_conf_name = '/tmp/%s'%filename # Clean file present try: local('rm %s'%local_conf_name) except: print 'Could not remove %s. It may not exist'%(local_conf_name) logger.warning('Could not remove %s. It may not exist'%(local_conf_name)) # Get remote conf file or return None if it cannot be retrieved try: get(rsyslog_conf_name,local_conf_name) except: print '%s does not exist on instance %s'%(rsyslog_conf_name,instance_id) logger.warning('%s does not exist on instance %s',rsyslog_conf_name,instance_id) return None # Open conf file or return None if it cannot be opened try: log_conf_file = open(local_conf_name,'r') except: print 'Cannot open %s from instance %s'%(local_conf_name,instance_id) logger.warning('Cannot open %s from instance %s',local_conf_name,instance_id) return None return log_conf_file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logging_config(self) -> 'outputs.LoggingConfigResponse':\n return pulumi.get(self, \"logging_config\")", "def logging_config(self) -> Optional['outputs.FeatureSpecFleetobservabilityLoggingConfig']:\n return pulumi.get(self, \"logging_config\")", "def logging_config(self) -> Optional[pulumi.Input['NodePoolLoggingConfigArgs']]:\n return pulumi.get(self, \"logging_config\")", "def fusion_api_get_remote_syslog_configuration(self, api=None, headers=None, param=None):\n return self.remote_syslog.get(api=api, headers=headers, param=param)", "def log_config(self) -> 'outputs.ConnectorsLogConfigResponse':\n return pulumi.get(self, \"log_config\")", "def get_config() -> Optional[Config]:\n return CurrentConfig.get()", "def get_config(self):\n if self.faucet is not None:\n return self.faucet.get_config()\n return None", "def GetRemoteConfigSetInfo(url):\n # Get the build item\n build_channel, locator = _GetAuthorizedBuildChannel()\n if build_channel.auth_state == ndb_models.AuthorizationState.UNAUTHORIZED:\n return None\n\n try:\n channel_path = locator.path\n path = channel_path + url.split(channel_path)[1]\n build_item = build_channel.GetBuildItem(path)\n except errors.FilePermissionError as err:\n logging.info('No permission to access %s: %s', CONFIG_SET_URL, err)\n return None\n\n # Parse the build item\n remote_config = _ParseBuildItem(build_item)\n return remote_config.info if remote_config else None", "def _get_config_value_for_remote(ctx, remote, config, key):\n roles = ctx.cluster.remotes[remote] if ctx else None\n if 'all' in config:\n return config['all'].get(key)\n elif roles:\n for role in roles:\n if role in config and key in config[role]:\n return config[role].get(key)\n return config.get(key)", "def cloud_conf(self) -> Optional[str]:\n if self.is_ready and (data := self._data):\n return data.cloud_config\n return None", "def git_remote_settings(self) -> pulumi.Output[Optional['outputs.RepositoryGitRemoteSettings']]:\n return pulumi.get(self, \"git_remote_settings\")", "def get_remote(repo, name='origin'):\n config_name = 'remote.{}.url'.format(name)\n return subprocess.check_output(['git', 'config', '--get',\n config_name], cwd=repo).rstrip()", "def default_config(self) -> Optional['outputs.FeatureSpecFleetobservabilityLoggingConfigDefaultConfig']:\n return pulumi.get(self, \"default_config\")", "def configuration_info(self) -> Optional['outputs.ConfigurationInfoResponse']:\n return pulumi.get(self, \"configuration_info\")", "def get_config():\n return _config", "def git_remote_settings(self) -> Optional[pulumi.Input['RepositoryGitRemoteSettingsArgs']]:\n return pulumi.get(self, \"git_remote_settings\")", "def git_remote_settings(self) -> Optional[pulumi.Input['RepositoryGitRemoteSettingsArgs']]:\n return pulumi.get(self, \"git_remote_settings\")", "def getLogFile(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FILE_KEY)", "def get_config():\n return CONFIG", "def configuration(self):\n if self.integration is None:\n return None\n return self.integration.configuration", "def get_config(self):\r\n if not os.path.exists(self.config_file):\r\n return None\r\n return json.loads(file(self.config_file).read())", "def config(self) -> Optional[pulumi.Input['NodeConfigArgs']]:\n return pulumi.get(self, \"config\")", "def config_server(self) -> Optional[pulumi.Input['ConfigServerSettingsArgs']]:\n return pulumi.get(self, \"config_server\")", "def third_party_configuration_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"third_party_configuration_url\")", "def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)", "def _load_config(configuration_file='source.yaml'):\n\n # Fetch Config from Endpoint\n _status, _config = _load_configuration_from_endpoint(EXTRACTOR_ID)\n\n if _status == 404:\n # 404: - Setup Config by source.yaml\n _file_config = _load_configuration_from_file(configuration_file)\n _config = _file_config\n _status, result = _set_remote_configuration(_file_config)\n\n if _status != 200:\n print(result)", "def get_config(location):\n import urlparse # urlparse\n if urlparse.urlparse(location).scheme:\n if location.endswith('.git'):\n return get_config_from_remote_git(location)\n elif location.endswith('.zip'):\n return get_config_from_remote_zip(location)\n else:\n raise ConfigError('%s is not a valid URL to a git repository or '\n 'zip file.' % (location))\n else:\n if not os.path.exists(location):\n raise ConfigError('%s does not exist locally.' % (location))\n elif os.path.isfile(location):\n return get_config_from_local_zip(location)\n elif os.path.isdir(location):\n return get_config_from_local_dir(location)\n else:\n raise ConfigError('%s is not a directory.' % (location))", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def host_config(self) -> Optional['outputs.RuleRuleConditionHostConfig']:\n return pulumi.get(self, \"host_config\")", "def _remote_or_local(fn, branch='master', remote=False):\n if remote:\n url = (\n 'https://raw.githubusercontent.com/bioconda/bioconda-recipes/'\n '{branch}/{path}'.format(branch=branch, path=fn)\n )\n print('Using config file {}'.format(url))\n with conda.fetch.TmpDownload(url) as f:\n cfg = yaml.load(open(f))\n else:\n cfg = yaml.load(open(os.path.join(HERE, fn)))\n return cfg", "def __previous_config(self):\n if self.__during_ha_import:\n return self.__config_before_ha_import\n try:\n return self.core.config_manager.get(\n self.NAME,\n which_configuration='applied'\n )\n except (KeyError, ConfigError):\n return None", "def get_config() -> Config:\n return get_or_set('config', build_configuration)" ]
[ "0.68369645", "0.6830208", "0.67188877", "0.63710445", "0.62929714", "0.62837815", "0.62123984", "0.61937004", "0.59980434", "0.58134604", "0.5746437", "0.57139", "0.5709636", "0.56954515", "0.5683042", "0.5662481", "0.5662481", "0.564444", "0.5639923", "0.5633198", "0.56311613", "0.5628773", "0.5627505", "0.56134987", "0.5597827", "0.5570008", "0.5566288", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55542004", "0.55356044", "0.5510082", "0.5508598", "0.54904824" ]
0.5990378
9
Returns the updated log_conf, taking into account new log files present on the instance as well as modifications made to the corresponding logentries host.
def update_instance_conf(log_paths, log_conf): log_client = LogClient.Client(account_key) instance_id, config = get_ssh_config(env.host) if log_conf is None and len(log_paths)>0: print 'log_conf is None' log_conf = create_host_logs(log_client,instance_id,log_paths) elif log_conf is not None: print 'log_conf is not None' conf_host = log_conf.get_host() if conf_host is None: print 'Error. This instance configuration is missing the corresponding model!! instance_id=%s'%instance_id logger.error('Error. This instance configuration is missing the corresponding model!! instance_id=%s',instance_id) log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf if conf_host.get_key() is None: print 'Host %s has an logentries-rsyslog config file but no account key!!'%host.get_name() logger.warning('Host %s has an logentries-rsyslog config file but no account key!!',host.get_name()) log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf account = log_client.get_account() matching_host = None for host in account.get_hosts(): if host.get_key() == conf_host.get_key(): matching_host = host break # If there is no matching host, then it is assumed that it was deleted from Logentries and that no configuration should be associated to this instance. if matching_host is None: log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf for new_log in get_new_logs(log_paths, log_conf): # Update matching host so that each new log becomes part of it. matching_host = log_client.create_log_token(host=matching_host,log_name=new_log) log_conf.set_host(matching_host) return log_conf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instance_log_conf(instance_id):\n # Retrieve current log config file\n log_conf_file = None\n\n filename = 'logentries_%s.conf'%instance_id\n rsyslog_conf_name = '/etc/rsyslog.d/%s'%filename\n local_conf_name = '/tmp/%s'%filename\n \n # Clean file present\n try:\n local('rm %s'%local_conf_name)\n except:\n print 'Could not remove %s. It may not exist'%(local_conf_name)\n logger.warning('Could not remove %s. It may not exist'%(local_conf_name))\n # Get remote conf file or return None if it cannot be retrieved\n try:\n get(rsyslog_conf_name,local_conf_name)\n except:\n print '%s does not exist on instance %s'%(rsyslog_conf_name,instance_id)\n logger.warning('%s does not exist on instance %s',rsyslog_conf_name,instance_id)\n return None\n # Open conf file or return None if it cannot be opened\n try:\n log_conf_file = open(local_conf_name,'r')\n except:\n print 'Cannot open %s from instance %s'%(local_conf_name,instance_id)\n logger.warning('Cannot open %s from instance %s',local_conf_name,instance_id)\n return None\n return log_conf_file", "def get_new_logs(log_paths,log_conf):\n if log_conf is None or log_conf.get_host() is None:\n return log_paths\n conf_logs = log_conf.get_host().get_logs()\n new_logs = [log_path for log_path in log_paths if log_path not in conf_logs]\n print 'New logs detected on %s: %s'(log_conf.get_host().get_name(), new_logs)\n logger.info('New logs detected on %s: %s',log_conf.get_host().get_name(), new_logs)\n return new_logs", "def update_rally_logs(res_dir, rally_conf='/etc/rally/rally.conf'):\n if not os.path.exists(res_dir):\n os.makedirs(res_dir)\n rconfig = configparser.RawConfigParser()\n rconfig.read(rally_conf)\n rconfig.set('DEFAULT', 'debug', True)\n rconfig.set('DEFAULT', 'use_stderr', False)\n rconfig.set('DEFAULT', 'log-file', 'rally.log')\n rconfig.set('DEFAULT', 'log_dir', res_dir)\n with open(rally_conf, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def update_config(update):\n global _config\n new_config = copy.deepcopy(_config)\n _update_dict_recursive(new_config, update)\n logging.config.dictConfig(new_config)\n _configure_ulog_bridge()\n _config = new_config", "def updateConfig(self, conf=None):\r\n if conf is not None:\r\n self.config.update(conf)\r\n if self.visprotocol is not None:\r\n self.visprotocol.updateSettings(self.getConfigData())\r\n # else:\r\n # _LOGGER.warning(\"Visonic link is not set\")\r\n # make the changes to the platform parameters (used in alarm_control_panel)\r\n # the original idea was to keep these separate for multiple partitions but now i'm not so sure its necessary\r\n\r\n self.hass.data[DOMAIN][\"arm_without_code\"] = self.toBool(self.config.get(CONF_ARM_CODE_AUTO, False))\r\n self.hass.data[DOMAIN][\"force_keypad\"] = self.toBool(self.config.get(CONF_FORCE_KEYPAD, False))\r\n self.hass.data[DOMAIN][\"arm_away_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_AWAY, False))\r\n self.hass.data[DOMAIN][\"arm_home_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_HOME, False))\r\n\r\n _LOGGER.debug(\"[Settings] Log Max Entries %s\", self.config.get(CONF_LOG_MAX_ENTRIES))\r\n _LOGGER.debug(\"[Settings] Log Reverse %s\", self.config.get(CONF_LOG_REVERSE))\r\n _LOGGER.debug(\"[Settings] Log Create Event %s\", self.config.get(CONF_LOG_EVENT))\r\n _LOGGER.debug(\"[Settings] Log Final Event %s\", self.config.get(CONF_LOG_DONE))\r\n _LOGGER.debug(\"[Settings] Log XML Filename %s\", self.config.get(CONF_LOG_XML_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV Filename %s\", self.config.get(CONF_LOG_CSV_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV title Row %s\", self.config.get(CONF_LOG_CSV_TITLE))", "def update_log_config(self, monitor_name, log_config):\n pass", "def get_log_config(conf_file: str):\n with open(conf_file, 'r') as c:\n config = json.load(c)\n if not os.path.exists('log'):\n os.mkdir('log')\n logging.config.dictConfig(config)\n # disable urllib3 DEBUG messages\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)", "def merge_config(log_conf: LogConf, conf: Config) -> Config:\n #pylint: disable=too-many-locals\n\n name = conf.name # take individual conf value, ignore common log_conf value\n filename = _ITEM_OR_DEFAULT(log_conf.filename, conf.filename)\n logger_level = _ITEM_OR_DEFAULT(log_conf.logger_level, conf.logger_level)\n log_fmt = _ITEM_OR_DEFAULT(log_conf.log_fmt, conf.log_fmt)\n log_datefmt = _ITEM_OR_DEFAULT(log_conf.log_datefmt, conf.log_datefmt)\n log_level = _ITEM_OR_DEFAULT(log_conf.log_level, conf.log_level)\n log_enabled = _ITEM_OR_DEFAULT(log_conf.log_enabled, conf.log_enabled)\n cout_fmt = _ITEM_OR_DEFAULT(log_conf.cout_fmt, conf.cout_fmt)\n cout_datefmt = _ITEM_OR_DEFAULT(log_conf.cout_datefmt, conf.cout_datefmt)\n cout_level = _ITEM_OR_DEFAULT(log_conf.cout_level, conf.cout_level)\n cout_enabled = _ITEM_OR_DEFAULT(log_conf.cout_enabled, conf.cout_enabled)\n propagate = _ITEM_OR_DEFAULT(log_conf.propagate, conf.propagate)\n log_dir = _ITEM_OR_DEFAULT(log_conf.log_dir, conf.log_dir)\n sub_dir = _ITEM_OR_DEFAULT(log_conf.sub_dir, conf.sub_dir)\n override_allowed = conf.override_allowed # take individual conf value, ignore common log_conf value\n\n n_conf: Config = Config(name, filename, logger_level, log_fmt, log_datefmt, log_level, log_enabled, cout_fmt,\n cout_datefmt, cout_level, cout_enabled, propagate, log_dir, sub_dir, override_allowed)\n\n return n_conf", "def get_hash_log_curr(self):\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n try:\n log = open(self.log_path + r'\\hash_log.txt', 'U')\n #first line is header, skip\n log.readline()\n for line in log:\n try:\n line = line.replace('\\n','')\n # log maintenance. only keep number of days designated\n line = line.split('|')\n if len(line) != 6:\n raise Exception\n if line[4] > self.log_cut_off_date:\n self.hash_log_curr[line[2]] = line\n except:\n self.print_to_log('Bad log Line: ' + str(line))\n self.print_to_log('Hash Log read Successfully')\n except IOError:\n self.print_to_log('No log found')\n self.hash_log_curr = None\n except IndexError:\n self.print_to_log('Bad Log File')\n raise\n except:\n self.print_to_log('Unknown Error, Exiting ')\n raise", "def conf_update(self):\n pass", "def log_config(self) -> 'outputs.ConnectorsLogConfigResponse':\n return pulumi.get(self, \"log_config\")", "def set_rsyslog_old_configuration():\n add_udp = False\n add_tcp = False\n # Do the configuration lines exist\n is_exist_udp_conf = False\n is_exist_tcp_conf = False\n with open(rsyslog_conf_path, \"rt\") as fin:\n for line in fin:\n if \"imudp\" in line or \"UDPServerRun\" in line:\n is_exist_udp_conf = True\n add_udp = True if \"#\" in line else False\n elif \"imtcp\" in line or \"InputTCPServerRun\" in line:\n is_exist_tcp_conf = True\n add_tcp = True if \"#\" in line else False\n fin.close()\n if add_udp or not is_exist_udp_conf:\n append_content_to_file(rsyslog_old_config_udp_content, rsyslog_conf_path)\n if add_tcp or not is_exist_tcp_conf:\n append_content_to_file(rsyslog_old_config_tcp_content, rsyslog_conf_path)\n print_ok(\"Rsyslog.conf configuration was changed to fit required protocol - \" + rsyslog_conf_path)\n return True", "def getLogFile(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FILE_KEY)", "def _load_config_log(self):\n config_path = os.path.join(self.runtime.working_dir, '.config')\n if not os.path.isfile(config_path):\n return {}\n with open(config_path, 'r') as f:\n data = yaml.load(f)\n return data", "def update_log_files(self, control_log, helper_log, server_log):\n if control_log is not None:\n self.control_log_file.update(\n control_log, \"server_config.control_log_file\")\n if helper_log is not None:\n self.helper_log_file.update(\n helper_log, \"server_config.helper_log_file\")\n if server_log is not None:\n for index, server_params in enumerate(self.server_params):\n log_name = list(os.path.splitext(server_log))\n if len(self.server_params) > 1:\n log_name.insert(1, \"_{}\".format(index))\n server_params.log_file.update(\n \"\".join(log_name),\n \"server_config.server[{}].log_file\".format(index))", "def get_logger_config(log_dir,\r\n logging_env=\"no_env\",\r\n tracking_filename=\"tracking.log\",\r\n edx_filename=\"edx.log\",\r\n dev_env=False,\r\n syslog_addr=None,\r\n debug=False,\r\n local_loglevel='INFO',\r\n console_loglevel=None,\r\n service_variant=None):\r\n\r\n # Revert to INFO if an invalid string is passed in\r\n if local_loglevel not in LOG_LEVELS:\r\n local_loglevel = 'INFO'\r\n\r\n if console_loglevel is None or console_loglevel not in LOG_LEVELS:\r\n console_loglevel = 'DEBUG' if debug else 'INFO'\r\n\r\n if service_variant is None:\r\n # default to a blank string so that if SERVICE_VARIANT is not\r\n # set we will not log to a sub directory\r\n service_variant = ''\r\n\r\n hostname = platform.node().split(\".\")[0]\r\n syslog_format = (\"[service_variant={service_variant}]\"\r\n \"[%(name)s][env:{logging_env}] %(levelname)s \"\r\n \"[{hostname} %(process)d] [%(filename)s:%(lineno)d] \"\r\n \"- %(message)s\").format(service_variant=service_variant,\r\n logging_env=logging_env,\r\n hostname=hostname)\r\n\r\n handlers = ['console', 'local'] if debug else ['console',\r\n 'syslogger-remote', 'local']\r\n\r\n logger_config = {\r\n 'version': 1,\r\n 'disable_existing_loggers': False,\r\n 'formatters': {\r\n 'standard': {\r\n 'format': '%(asctime)s %(levelname)s %(process)d '\r\n '[%(name)s] %(filename)s:%(lineno)d - %(message)s',\r\n },\r\n 'syslog_format': {'format': syslog_format},\r\n 'raw': {'format': '%(message)s'},\r\n },\r\n 'handlers': {\r\n 'console': {\r\n 'level': console_loglevel,\r\n 'class': 'logging.StreamHandler',\r\n 'formatter': 'standard',\r\n 'stream': sys.stderr,\r\n },\r\n 'syslogger-remote': {\r\n 'level': 'INFO',\r\n 'class': 'logging.handlers.SysLogHandler',\r\n 'address': syslog_addr,\r\n 'formatter': 'syslog_format',\r\n },\r\n 'newrelic': {\r\n 'level': 'ERROR',\r\n 'class': 'lms.lib.newrelic_logging.NewRelicHandler',\r\n 'formatter': 'raw',\r\n }\r\n },\r\n 'loggers': {\r\n 'tracking': {\r\n 'handlers': ['tracking'],\r\n 'level': 'DEBUG',\r\n 'propagate': False,\r\n },\r\n '': {\r\n 'handlers': handlers,\r\n 'level': 'DEBUG',\r\n 'propagate': False\r\n },\r\n }\r\n }\r\n\r\n if dev_env:\r\n tracking_file_loc = os.path.join(log_dir, tracking_filename)\r\n edx_file_loc = os.path.join(log_dir, edx_filename)\r\n logger_config['handlers'].update({\r\n 'local': {\r\n 'class': 'logging.handlers.RotatingFileHandler',\r\n 'level': local_loglevel,\r\n 'formatter': 'standard',\r\n 'filename': edx_file_loc,\r\n 'maxBytes': 1024 * 1024 * 2,\r\n 'backupCount': 5,\r\n },\r\n 'tracking': {\r\n 'level': 'DEBUG',\r\n 'class': 'logging.handlers.RotatingFileHandler',\r\n 'filename': tracking_file_loc,\r\n 'formatter': 'raw',\r\n 'maxBytes': 1024 * 1024 * 2,\r\n 'backupCount': 5,\r\n },\r\n })\r\n else:\r\n # for production environments we will only\r\n # log INFO and up\r\n logger_config['loggers']['']['level'] = 'INFO'\r\n logger_config['handlers'].update({\r\n 'local': {\r\n 'level': local_loglevel,\r\n 'class': 'logging.handlers.SysLogHandler',\r\n 'address': '/dev/log',\r\n 'formatter': 'syslog_format',\r\n 'facility': SysLogHandler.LOG_LOCAL0,\r\n },\r\n 'tracking': {\r\n 'level': 'DEBUG',\r\n 'class': 'logging.handlers.SysLogHandler',\r\n 'address': '/dev/log',\r\n 'facility': SysLogHandler.LOG_LOCAL1,\r\n 'formatter': 'raw',\r\n },\r\n })\r\n\r\n return logger_config", "def sanitize_new_config(self):\n config_log = self._load_config_log()\n if 'new' in config_log:\n for cfg in config_log['new']:\n with open(cfg, 'r+') as f:\n data = yaml.load(f)\n f.seek(0)\n yaml.safe_dump(data, f, default_flow_style=False)\n f.truncate()\n del config_log['new']\n\n self._save_config_log(config_log)", "def getLogs():", "def getLogs():", "def _config_log(self):\n config_worker = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'handlers': {\n 'queue': {\n 'class': 'hqc_meas.utils.log.tools.QueueHandler',\n 'queue': self.log_queue,\n },\n },\n 'root': {\n 'level': 'DEBUG',\n 'handlers': ['queue']\n },\n }\n logging.config.dictConfig(config_worker)", "def logging_config(self) -> 'outputs.LoggingConfigResponse':\n return pulumi.get(self, \"logging_config\")", "def logging(self):\n conf = self.get(\"logging\")\n level = conf[\"level\"]\n if os.environ.get(\"DEBUG_FG21SIM\"):\n print(\"DEBUG: Force 'DEBUG' logging level\", file=sys.stderr)\n level = \"DEBUG\"\n # logging handlers\n handlers = []\n stream = conf[\"stream\"]\n if stream:\n handlers.append(StreamHandler(getattr(sys, stream)))\n logfile = conf[\"filename\"]\n filemode = conf[\"filemode\"]\n if logfile:\n handlers.append(FileHandler(logfile, mode=filemode))\n #\n logconf = {\n \"level\": getattr(logging, level),\n \"format\": conf[\"format\"],\n \"datefmt\": conf[\"datefmt\"],\n \"filemode\": filemode,\n \"handlers\": handlers,\n }\n return logconf", "def config_logger( self, ):\r\n logger = logging.getLogger( self.logger_id )\r\n\r\n logger.handlers = []\r\n logger.setLevel( self.parameters.logging_level ) # DEBUG , INFO WARNING ERROR CRITICAL\r\n\r\n # create the logging file handler.....\r\n fh = logging.FileHandler( self.parameters.pylogging_fn )\r\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n fh.setFormatter( formatter )\r\n logger.addHandler( fh )\r\n\r\n msg = \"Done config_logger\"\r\n print( msg )\r\n logger.info( msg ) # .debug .info .warn .error\r\n AppGlobal.set_logger( logger )\r\n\r\n return logger", "def __init__(self):\n# ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>[^=\\s][^=]*)\\s*(?P<vi>[=])\\s*(?P<value>.*)$')\n self.CONFIG = ConfigParser.ConfigParser()\n self.CONFIG_FILENAME = os.path.join(ROOT_DIR,__config__)\n self.CONFIG.read(self.CONFIG_FILENAME)\n\n self.LISTEN_IP = self.CONFIG.get('server', 'ip') if self.CONFIG.has_option('server', 'ip') else '0.0.0.0'\n self.LISTEN_PORT = self.CONFIG.getint('server', 'port') if self.CONFIG.has_option('server', 'port') else 6210\n \n self.LOG_DIR = self.CONFIG.get('log','dir') if self.CONFIG.has_option('log', 'dir') else 'log'\n if self.LOG_DIR.find(':') == -1 and not self.LOG_DIR.startswith('/'):\n self.LOG_DIR = os.path.join(ROOT_DIR,self.LOG_DIR)\n \n self.LOG_NAME = self.CONFIG.get('log','name') if self.CONFIG.has_option('log', 'name') else 'test.log'\n self.MAXFILESIZE = self.CONFIG.get('log','maxfilesize') if self.CONFIG.has_option('log', 'maxfilesize') else '20*1024*1024'\n self.MAXFILESIZE = eval(self.MAXFILESIZE)\n self.MAXBACKUP = self.CONFIG.getint('log','maxbackup') if self.CONFIG.has_option('log', 'maxbackup') else 20\n self.SEPARATOR = self.CONFIG.get('log','separator') if self.CONFIG.has_option('log', 'separator') else '\\\\n'\n self.SEPARATOR = self.SEPARATOR.strip().replace('\\'','').replace(\"\\\"\",'')\n self.FORMAT = self.CONFIG.get('log','format') if self.CONFIG.has_option('log','format') else '%(message)s'\n self.BUFSIZE = self.CONFIG.get('log','bufsize') if self.CONFIG.has_option('log','bufsize') else '1024*4'\n self.BUFSIZE = eval(self.BUFSIZE) \n self.SEPARATOR = eval('\\''+self.SEPARATOR+'\\'')\n self.DELETELOG = self.CONFIG.getint('log','cleanlog') if self.CONFIG.has_option('log','cleanlog') else 0\n if self.DELETELOG and os.path.exists(self.LOG_DIR):\n try:\n shutil.rmtree(self.LOG_DIR)\n except:\n print('remove dir error')\n self.LOG_DIR = os.path.join(self.LOG_DIR,datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n print(self.LOG_DIR)\n self.STDSHOW = self.CONFIG.getint('std','show') if self.CONFIG.has_option('std', 'show') else 0", "def config_logger(log_cfg_file, experiment_name=None, output_dir='logs'):\n timestr = time.strftime(\"%Y.%m.%d-%H%M%S\")\n exp_full_name = timestr if experiment_name is None else experiment_name + '___' + timestr\n logdir = os.path.join(output_dir, exp_full_name)\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n log_filename = os.path.join(logdir, exp_full_name + '.log')\n if os.path.isfile(log_cfg_file):\n logging.config.fileConfig(log_cfg_file, defaults={'logfilename': log_filename})\n msglogger = logging.getLogger()\n msglogger.logdir = logdir\n msglogger.log_filename = log_filename\n msglogger.info('Log file for this run: ' + os.path.realpath(log_filename))\n\n # Create a symbollic link to the last log file created (for easier access)\n try:\n os.unlink(\"latest_log_file\")\n except FileNotFoundError:\n pass\n try:\n os.unlink(\"latest_log_dir\")\n except FileNotFoundError:\n pass\n try:\n os.symlink(logdir, \"latest_log_dir\")\n os.symlink(log_filename, \"latest_log_file\")\n except OSError:\n msglogger.debug(\"Failed to create symlinks to latest logs\")\n return msglogger", "def _get_logs(self):\n logstart = self.LOGSTART%(self.session.uuid, self.session.run_counter)\n logend = self.LOGEND%(self.session.uuid, self.session.run_counter)\n log = self.container.logs().decode('UTF-8')\n while log.find(logstart) == -1 or log.find(logend) == -1:\n log = self.container.logs().decode('UTF-8')\n cleaned_log = self._get_cleaned_logs(log, logstart, logend)\n self.session.run_counter = self.session.run_counter + 1\n self.session.save()\n return cleaned_log", "def logging_conf_dict(mocker: MockerFixture) -> dict:\n return mocker.patch.dict(logging_conf_module.LOGGING_CONFIG)", "async def new_guild_log(guild_config_dict: dict) -> \"GuildEventLog\":\n try:\n guild_log = GuildEventLog(guild_config_dict)\n\n # Retrieve server channels\n guild_log.approval_channel = guild_config_dict[\"approval_channel\"]\n guild_log.calendar_channel = guild_config_dict[\"calendar_channel\"]\n\n # Get staff role\n guild_log.staff_role = guild_config_dict[\"staff_role\"]\n\n # Parse events and edits\n guild_log.approval_events = guild_config_dict[\"approval_events\"]\n guild_log.upcoming_events = guild_config_dict[\"upcoming_events\"]\n guild_log.ongoing_events = guild_config_dict[\"ongoing_events\"]\n guild_log.approval_edits = guild_config_dict[\"approval_edits\"]\n\n return guild_log\n\n except (\n GuildEventInvalidConfig, EventLoadError, KeyError, ValueError\n ) as e:\n raise GuildEventInvalidConfig from e", "def last_update(self):\n # get modification time of QWC2 themes config file\n config_updated_at = None\n if os.path.isfile(self.themes_config_path):\n config_updated_at = datetime.utcfromtimestamp(\n os.path.getmtime(self.themes_config_path)\n )\n\n # create session for ConfigDB\n session = self.config_models.session()\n\n # query timestamp\n LastUpdate = self.config_models.model('last_update')\n query = session.query(LastUpdate.updated_at)\n last_update = query.first()\n if last_update is not None:\n if config_updated_at is not None:\n # use latest of both timestamps\n updated_at = max(last_update.updated_at, config_updated_at)\n else:\n # use timestamp from ConfigDB\n updated_at = last_update.updated_at\n else:\n # no entry in ConfigDB, use config timestamp or now\n updated_at = config_updated_at or datetime.utcnow()\n\n # close session\n session.close()\n\n return {\n 'permissions_updated_at': updated_at.strftime(\"%Y-%m-%d %H:%M:%S\")\n }", "def logs():\n with open(configs.LOG_PATH) as f:\n return f.read()", "def changes(self) -> dict:\n return self.config['changes']", "def get_addons_changelogs(hass):\n return hass.data.get(DATA_ADDONS_CHANGELOGS)", "def config_update(self, update: io.BytesIO) -> None:\n self.__logger.debug('Eva.config_update called')\n return self.__http_client.config_update(update)", "def init_logs(self):\n\n handler = logging.FileHandler(self.app.config['LOG'])\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))\n self.app.logger.addHandler(handler)\n if self.app.config.get(\"LOG_LEVEL\") == \"DEBUG\":\n self.app.logger.setLevel(logging.DEBUG)\n elif self.app.config.get(\"LOG_LEVEL\") == \"WARN\":\n self.app.logger.setLevel(logging.WARN)\n else:\n self.app.logger.setLevel(logging.INFO)\n self.app.logger.info('Startup with log: %s' % self.app.config['LOG'])", "def get_log_info(self, hosts, dev, env_state, log_file):\n\t# anticipate log switch\n cmd = \"if [ -f {0}.old ]; then head -50 {0}.old; else head -50 {0};\" \\\n \"fi\".format(log_file)\n err = \"Error getting log data.\"\n pattern = r\"Using\\s+client\\s+provided\\s+OFI_INTERFACE:\\s+{}\".format(dev)\n\n detected = 0\n for output in get_host_data(hosts, cmd, log_file, err).values():\n detected = len(re.findall(pattern, output))\n self.log.info(\n \"Found %s instances of client setting up OFI_INTERFACE=%s\",\n detected, dev)\n\n # Verify\n status = True\n if env_state and detected != 1:\n status = False\n elif not env_state and detected == 1:\n status = False\n return status", "def audit_log_configs(self) -> Sequence['outputs.AuditLogConfigResponse']:\n return pulumi.get(self, \"audit_log_configs\")", "def get_added_logs(self):\n with open(self.path, \"r\") as log_file:\n log_file.seek(self.position)\n contents = log_file.read()\n self.position = log_file.tell()\n return contents", "def _get_config(self, updates=None):\n updated_cfg = copy.deepcopy(self.default_config.config)\n if updates is not None:\n DexNet._deep_update_config(updated_cfg, updates)\n return updated_cfg", "def get_log(self):\n return self._get_property(core.SVN_PROP_REVISION_LOG)", "def update_log(self, plog, clog):\n if plog:\n self.converter.update_log(plog)\n if clog:\n self.converter.update_log(clog)", "def set_rsyslog_new_configuration():\n with open(rsyslog_conf_path, \"rt\") as fin:\n with open(\"tmp.txt\", \"wt\") as fout:\n for line in fin:\n if \"imudp\" in line or \"imtcp\" in line:\n # Load configuration line requires 1 replacement\n if \"load\" in line:\n fout.write(line.replace(\"#\", \"\", 1))\n # Port configuration line requires 2 replacements\n elif \"port\" in line:\n fout.write(line.replace(\"#\", \"\", 2))\n else:\n fout.write(line)\n else:\n fout.write(line)\n command_tokens = [\"sudo\", \"mv\", \"tmp.txt\", rsyslog_conf_path]\n write_new_content = subprocess.Popen(command_tokens, stdout=subprocess.PIPE)\n time.sleep(3)\n o, e = write_new_content.communicate()\n if e is not None:\n handle_error(e,\n error_response_str=\"Error: could not change Rsyslog.conf configuration in -\" + rsyslog_conf_path)\n return False\n print_ok(\"Rsyslog.conf configuration was changed to fit required protocol - \" + rsyslog_conf_path)\n return True", "def fusion_api_get_remote_syslog_configuration(self, api=None, headers=None, param=None):\n return self.remote_syslog.get(api=api, headers=headers, param=param)", "def load_logging_conf(name):\n # Shortcut - check if logging already exists.\n logconf_path = os.path.join(\n os.environ.get('TREADMILL_APPROOT', ''),\n 'logging',\n name\n )\n\n if os.path.exists(logconf_path):\n with io.open(logconf_path) as f:\n return json.loads(f.read())\n\n # load core logging config first\n conf = _load_logging_file(__name__, name)\n # get 'treadmill' default log configure\n default_conf = conf['loggers'].get(_package_root(__name__), {})\n\n # then load plugin component config\n import pkg_resources\n\n for plugin in plugin_manager.load_all(__name__):\n\n contents = pkg_resources.resource_listdir(__name__, '.')\n try:\n plugin_conf = _load_logging_file(plugin.__name__, name)\n\n # TODO: deep merge conf\n conf['loggers'].update(plugin_conf.get('loggers', {}))\n except FileNotFoundError as _e:\n # it is possible for module to be lack of specific log file\n # e.g. some module does not have daemon logging configuration\n # we use default configure for it\n plugin_package_root_name = _package_root(plugin.__name__)\n conf['loggers'][plugin_package_root_name] = default_conf\n\n return conf", "async def parse_guild_config(\n guild: Guild,\n guild_config_dict: dict\n ) -> \"GuildEventLog\":\n try:\n guild_log = GuildEventLog(guild_config_dict)\n\n # Retrieve server channels\n guild_log.approval_channel = guild.get_channel(\n guild_config_dict[\"approval_channel\"]\n )\n guild_log.calendar_channel = guild.get_channel(\n guild_config_dict[\"calendar_channel\"]\n )\n\n # Get staff role\n guild_log.staff_role = guild.get_role(\n guild_config_dict[\"staff_role\"]\n )\n\n # Parse events and edits\n guild_log.approval_events = await guild_log.load_events(\n guild_config_dict[\"approval_events\"],\n guild\n )\n guild_log.upcoming_events = await guild_log.load_events(\n guild_config_dict[\"upcoming_events\"],\n guild\n )\n guild_log.ongoing_events = await guild_log.load_ongoing_events(\n guild_config_dict[\"ongoing_events\"]\n )\n guild_log.approval_edits = await guild_log.load_edits(\n guild_config_dict[\"approval_edits\"]\n )\n\n return guild_log\n\n except (\n GuildEventInvalidConfig, EventLoadError, KeyError, ValueError\n ) as e:\n raise GuildEventInvalidConfig from e", "def updater_log_file(self,request):\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/logfile invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\t\tresult = None\n\t\tjob = ''\n\t\tif self._current_job and 'job' in self._current_job:\n\t\t\tjob = self._current_job['job']\n\t\telse:\n\t\t\tjob = request.options.get('job','')\n\n\t\tcount = request.options.get('count',0)\n\t\tif count < 0:\n\t\t\tresult = 0\n\t\telse:\n\t\t\tresult = []\n\t\tif not job in INSTALLERS:\n\t\t\t# job empty: this is the first call I can't avoid\n\t\t\tif job != '':\n\t\t\t\tMODULE.warn(\" ?? Don't know a '%s' job\" % job)\n\t\telse:\n\t\t\tif not 'logfile' in INSTALLERS[job]:\n\t\t\t\tMODULE.warn(\" ?? Job '%s' has no associated log file\" % job)\n\t\t\telse:\n\t\t\t\tfname = INSTALLERS[job]['logfile']\n\t\t\t\tif count < 0:\n\t\t\t\t\tresult = self._logstamp(fname)\n\t\t\t\telse:\n\t\t\t\t\t# don't read complete file if we have an 'ignore' count\n\t\t\t\t\tif ('lines' in self._current_job) and (self._current_job['lines']):\n\t\t\t\t\t\tcount += int(self._current_job['lines'])\n\t\t\t\t\tresult = self._logview(fname, -count)\n\n\t\t# again debug, shortened\n\t\tif isinstance(result,int):\n\t\t\tMODULE.info(\" >> %d\" % result)\n\t\telse:\n\t\t\tMODULE.info(\" >> %d lines\" % len(result))\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/logfile returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id, result)", "def reloadConfig(self):\r\n self._update('reloadConfig')\r\n try:\r\n self.supervisord.options.process_config(do_usage=False)\r\n except ValueError as msg:\r\n raise RPCError(Faults.CANT_REREAD, msg)\r\n\r\n added, changed, removed = self.supervisord.diff_to_active()\r\n\r\n added = [group.name for group in added]\r\n changed = [group.name for group in changed]\r\n removed = [group.name for group in removed]\r\n return [[added, changed, removed]] # cannot return len > 1, apparently\r", "def fusion_api_update_remote_syslog_configuration(self, body, api=None, headers=None, param=None):\n return self.remote_syslog.update(body, api, headers, param)", "def logger_settings(self):\n LOG_CONFIG['root']['handlers'].append(self.logmode)\n flask_log = logging.getLogger(DEFAULT_NAME_FLASK_LOGGER)\n flask_log.setLevel(logging.ERROR)\n dictConfig(LOG_CONFIG)\n self.logger = logging.getLogger()", "def audit_log_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AuditLogConfigArgs']]]]:\n return pulumi.get(self, \"audit_log_configs\")", "def init_log(log_instance):\r\n base_dir = os.path.dirname(os.path.abspath(__file__))\r\n log_dir = os.path.join(base_dir, \"logs\")\r\n if not os.path.exists(log_dir):\r\n os.makedirs(log_dir)\r\n log_file = log_instance + \"_\" + datetime.datetime.now().strftime(\"%Y-%m-%d\") + \".log\"\r\n logging_conf = {\r\n \"version\": 1,\r\n \"disable_existing_loggers\": False,\r\n \"formatters\": {\r\n \"simple\": {\r\n 'format': '%(asctime)s [%(filename)s:%(lineno)d] [%(levelname)s]- %(message)s'\r\n },\r\n 'standard': {\r\n 'format': '%(asctime)s [%(threadName)s:%(thread)d] [%(filename)s:%(lineno)d] [%(levelname)s]- %(message)s'\r\n },\r\n },\r\n\r\n \"handlers\": {\r\n \"console\": {\r\n \"class\": \"logging.StreamHandler\",\r\n \"level\": \"DEBUG\",\r\n \"formatter\": \"simple\",\r\n \"stream\": \"ext://sys.stdout\"\r\n },\r\n\r\n \"default\": {\r\n \"class\": \"logging.handlers.RotatingFileHandler\",\r\n \"level\": \"DEBUG\",\r\n \"formatter\": \"standard\",\r\n \"filename\": os.path.join(log_dir, log_file),\r\n 'mode': 'w+',\r\n \"maxBytes\": 1024 * 1024 * 5, # 5 MB\r\n \"backupCount\": 20,\r\n \"encoding\": \"utf8\"\r\n },\r\n },\r\n\r\n \"root\": {\r\n 'handlers': ['default', 'console'],\r\n 'level': \"INFO\",\r\n 'propagate': False\r\n }\r\n }\r\n\r\n logging.config.dictConfig(logging_conf)\r\n\r\n # configure application log\r\n return logging.getLogger(log_instance)", "def configs(self):\n run_path_pairs = list(self.run_paths.items())\n # If there are no summary event files, the projector should still work,\n # treating the `logdir` as the model checkpoint directory.\n if not run_path_pairs:\n run_path_pairs.append(('.', self.logdir))\n if (self._run_paths_changed() or\n _latest_checkpoints_changed(self._configs, run_path_pairs)):\n self.readers = {}\n self._configs, self.config_fpaths = self._read_latest_config_files(\n run_path_pairs)\n self._augment_configs_with_checkpoint_info()\n return self._configs", "def _update_from_extra_log(env: Environment, history: History, log: str) -> Environment:\n index = history.logs.index(log)\n if log.startswith(\"conda\"):\n env = _handle_conda_extra_log(env=env, history=history, index=index)\n elif log.startswith(\"pip\"):\n env = _handle_pip_extra_log(env=env, history=history, index=index)\n elif log.startswith(R_COMMAND):\n _handle_r_extra_log(env=env, history=history, index=index)\n\n env = _update_history_packages_spec_from_log(env, history, log)\n return env", "def UpdateConfigs(): # pylint: disable=unused-variable\n try:\n configs = luci_config.FindAllSheriffConfigs(config_client)\n luci_config.StoreConfigs(datastore_client, configs.get('configs', []))\n return jsonify({})\n except (luci_config.InvalidConfigError,\n luci_config.InvalidContentError) as error:\n logging.warning('loading configs from luci-config failed: %s', error)\n return jsonify({}), 500", "def logfile(self):\n return self._get('logfile')", "def update_lxc_conf(name, lxc_conf, lxc_conf_unset, path=None):\n _ensure_exists(name, path=path)\n cpath = get_root_path(path)\n lxc_conf_p = os.path.join(cpath, name, \"config\")\n if not os.path.exists(lxc_conf_p):\n raise SaltInvocationError(f\"Configuration file {lxc_conf_p} does not exist\")\n\n changes = {\"edited\": [], \"added\": [], \"removed\": []}\n ret = {\"changes\": changes, \"result\": True, \"comment\": \"\"}\n\n # do not use salt.utils.files.fopen !\n with salt.utils.files.fopen(lxc_conf_p, \"r\") as fic:\n filtered_lxc_conf = []\n for row in lxc_conf:\n if not row:\n continue\n for conf in row:\n filtered_lxc_conf.append((conf.strip(), row[conf].strip()))\n ret[\"comment\"] = \"lxc.conf is up to date\"\n lines = []\n orig_config = salt.utils.stringutils.to_unicode(fic.read())\n for line in orig_config.splitlines():\n if line.startswith(\"#\") or not line.strip():\n lines.append([line, \"\"])\n else:\n line = line.split(\"=\")\n index = line.pop(0)\n val = (index.strip(), \"=\".join(line).strip())\n if val not in lines:\n lines.append(val)\n for key, item in filtered_lxc_conf:\n matched = False\n for idx, line in enumerate(lines[:]):\n if line[0] == key:\n matched = True\n lines[idx] = (key, item)\n if \"=\".join(line[1:]).strip() != item.strip():\n changes[\"edited\"].append(({line[0]: line[1:]}, {key: item}))\n break\n if not matched:\n if (key, item) not in lines:\n lines.append((key, item))\n changes[\"added\"].append({key: item})\n dest_lxc_conf = []\n # filter unset\n if lxc_conf_unset:\n for line in lines:\n for opt in lxc_conf_unset:\n if not line[0].startswith(opt) and line not in dest_lxc_conf:\n dest_lxc_conf.append(line)\n else:\n changes[\"removed\"].append(opt)\n else:\n dest_lxc_conf = lines\n conf = \"\"\n for key, val in dest_lxc_conf:\n if not val:\n conf += f\"{key}\\n\"\n else:\n conf += f\"{key.strip()} = {val.strip()}\\n\"\n conf_changed = conf != orig_config\n chrono = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n if conf_changed:\n # DO NOT USE salt.utils.files.fopen here, i got (kiorky)\n # problems with lxc configs which were wiped !\n with salt.utils.files.fopen(f\"{lxc_conf_p}.{chrono}\", \"w\") as wfic:\n wfic.write(salt.utils.stringutils.to_str(conf))\n with salt.utils.files.fopen(lxc_conf_p, \"w\") as wfic:\n wfic.write(salt.utils.stringutils.to_str(conf))\n ret[\"comment\"] = \"Updated\"\n ret[\"result\"] = True\n\n if not any(changes[x] for x in changes):\n # Ensure an empty changes dict if nothing was modified\n ret[\"changes\"] = {}\n return ret", "def logging_config(logfile_path, logfile_level='debug', console_level='debug'):\n cfg = dict(LOGGING_CONFIG)\n cfg['handlers']['logfile']['filename'] = logfile_path\n cfg['handlers']['logfile']['level'] = logfile_level.upper()\n cfg['handlers']['console']['level'] = console_level.upper()\n return cfg", "def last_log(self) -> List:\n logs_list: List = os.listdir(LOGS_BASE_PATH)\n full_list = [os.path.join(LOGS_BASE_PATH, i) for i in logs_list]\n time_sorted_list: List = sorted(full_list, key=os.path.getmtime)\n return time_sorted_list[-1]", "def _set_logvars(config, logger=None):\n\n if logger is None:\n logger = config.log()\n\n log_timestamp_template = config.getstr('config', 'LOG_TIMESTAMP_TEMPLATE',\n '')\n if config.getbool('config', 'LOG_TIMESTAMP_USE_DATATIME', False):\n if util.is_loop_by_init(config):\n loop_by = 'INIT'\n else:\n loop_by = 'VALID'\n\n date_t = datetime.datetime.strptime(\n config.getstr('config', f'{loop_by}_BEG'),\n config.getstr('config', f'{loop_by}_TIME_FMT')\n )\n else:\n date_t = datetime.datetime.now()\n\n log_filenametimestamp = date_t.strftime(log_timestamp_template)\n\n log_dir = config.getdir('LOG_DIR')\n\n # NOTE: LOG_METPLUS or metpluslog is meant to include the absolute path\n # and the metpluslog_filename,\n # so metpluslog = /path/to/metpluslog_filename\n\n # if LOG_METPLUS = unset in the conf file, means NO logging.\n # Also, assUmes the user has included the intended path in LOG_METPLUS.\n user_defined_log_file = None\n if config.has_option('config', 'LOG_METPLUS'):\n user_defined_log_file = True\n # strinterp will set metpluslog to '' if LOG_METPLUS = is unset.\n metpluslog = config.strinterp(\n 'config',\n '{LOG_METPLUS}',\n LOG_TIMESTAMP_TEMPLATE=log_filenametimestamp\n )\n\n # test if there is any path information, if there is,\n # assume it is as intended, if there is not, than add log_dir.\n if metpluslog:\n if os.path.basename(metpluslog) == metpluslog:\n metpluslog = os.path.join(log_dir, metpluslog)\n else:\n # No LOG_METPLUS in conf file, so let the code try to set it,\n # if the user defined the variable LOG_FILENAME_TEMPLATE.\n # LOG_FILENAME_TEMPLATE is an 'unpublished' variable - no one knows\n # about it unless you are reading this. Why does this exist ?\n # It was from my first cycle implementation. I did not want to pull\n # it out, in case the group wanted a stand alone metplus log filename\n # template variable.\n\n # If metpluslog_filename includes a path, python joins it intelligently\n # Set the metplus log filename.\n # strinterp will set metpluslog_filename to '' if template is empty\n if config.has_option('config', 'LOG_FILENAME_TEMPLATE'):\n metpluslog_filename = config.strinterp(\n 'config',\n '{LOG_FILENAME_TEMPLATE}',\n LOG_TIMESTAMP_TEMPLATE=log_filenametimestamp\n )\n else:\n metpluslog_filename = ''\n if metpluslog_filename:\n metpluslog = os.path.join(log_dir, metpluslog_filename)\n else:\n metpluslog = ''\n\n # Adding LOG_TIMESTAMP to the final configuration file.\n logger.info('Adding LOG_TIMESTAMP=%s' % repr(log_filenametimestamp))\n config.set('config', 'LOG_TIMESTAMP', log_filenametimestamp)\n\n # Setting LOG_METPLUS in the configuration object\n # At this point LOG_METPLUS will have a value or '' the empty string.\n if user_defined_log_file:\n logger.info('Replace LOG_METPLUS with %s' % repr(metpluslog))\n else:\n logger.info('Adding LOG_METPLUS=%s' % repr(metpluslog))\n # expand LOG_METPLUS to ensure it is available\n config.set('config', 'LOG_METPLUS', metpluslog)", "def update(self, log_ids: list, dest='logs'):\n self.logs_updated = []\n for i in range(len(log_ids)):\n self.logs_updated.append(dict(\n filename=self.logs[i].get('filename'),\n data=log_ids[i],\n filesize=len(self.logs[i].get('data')) if self.logs[i].get('data') else 0,\n ))\n\n for item in self.items:\n item[dest] = self.logs_updated", "def config_logging():\n\n root_logger = logging.getLogger()\n root_logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\n \"[%(asctime)s][%(levelname)s] %(name)s %(filename)s:%(funcName)s:%(lineno)d | %(message)s\")\n\n # Enable debug HTTP request/response\n if root_logger.getEffectiveLevel() == logging.DEBUG:\n http_client.HTTPConnection.debuglevel = 1\n else:\n http_client.HTTPConnection.debuglevel = 0\n\n zabbix_handler = logging.StreamHandler(sys.stdout)\n zabbix_handler.setFormatter(formatter)\n root_logger.addHandler(zabbix_handler)\n return root_logger", "def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files", "def get_log()->dict:\n return execute_command(\"SELECT log FROM log\").fetchall()[0][0]", "def get_logs(build_dir, log_files, pod_name, filters, objref_dict):\n all_logs = {}\n results = {}\n old_dict_len = len(objref_dict)\n\n all_logs = get_all_logs(build_dir, True)\n apiserver_filename = find_log_files(all_logs, \"kube-apiserver.log\")\n kubelet_filenames = find_log_files(all_logs, \"kubelet.log\")\n if not pod_name and not objref_dict:\n return get_logs_no_pod(apiserver_filename, kubelet_filenames, filters,\n objref_dict, all_logs)\n for kubelet_log in kubelet_filenames:\n if pod_name:\n parsed_dict, pod_in_file = parse_log_file(kubelet_log, pod_name, make_dict=True,\n objref_dict=objref_dict)\n objref_dict.update(parsed_dict)\n if len(objref_dict) > old_dict_len or not pod_name or pod_in_file or not objref_dict:\n if log_files == []:\n log_files = [kubelet_log]\n if apiserver_filename:\n log_files.extend(apiserver_filename)\n for log_file in log_files:\n parsed_file = parse_log_file(log_file, pod_name, filters,\n objref_dict=objref_dict)\n if parsed_file:\n results[log_file] = parsed_file\n break\n\n return all_logs, results, objref_dict, log_files", "def getLogFormat(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FORMAT_KEY)", "def get_hedge_logfile(dir):\n logfiles = Path(dir).glob('*')\n max_m_time = -1\n current_log = None\n for lf in logfiles:\n mtime = lf.stat().st_mtime\n if mtime > max_m_time:\n max_m_time = mtime\n current_log = lf\n if current_log:\n logging.debug(f\"Reading hedge logfile {current_log}.\")\n else:\n logging.warning(f\"No hedge log found. Ensure hedge logfile is being written and correct address is entered in settings.\")\n return current_log", "def _config_list(res, ctx):\n\n if _has_error_code(res):\n return print_errors(res, ctx)\n\n lines = []\n for config in res['configs']:\n line = '* ' if config['current'] else ' '\n\n if ctx.verbose:\n line += config['mtime'] + ' '\n\n line += config['name']\n lines.append(line)\n\n return \"\\n\".join(lines)", "def update_logs(event, log, action_log, error_log):\n\tif event[\"type\"] == \"error\":\n\t\t#Update the error log file\n\telse:\n\t\t# event[\"type\"] == \"action\"\n\t\t#Update action file", "def refresh_configuration(self):\n pass", "def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder", "def logs(self):\n return self._logs", "def logs(self):\n return self._logs", "def _set_instance_config(self):\n\t\t\n\t\tif \"PARAMETERS_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own PARAMETERS_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"PARAMETERS_NAME\"] = self._get_params_filepath()\n\t\t\n\t\tif \"FILTER_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own FILTER_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"FILTER_NAME\"] = self._get_conv_filepath()\n\t\t\n\t\t\n\t\tif \"CATALOG_NAME\" in self.config.keys():\n\t\t\tlogger.warning(\"You specified your own CATALOG_NAME, but I will *NOT* use it !\")\n\t\t\tdel self.config[\"CATALOG_NAME\"]\n\n\t\tif \"PSF_NAME\" in self.config.keys():\n\t\t\tlogger.info(\"You specified your own PSF_NAME, I will use it.\")\n\t\telse:\n\t\t\tself.config[\"PSF_NAME\"] = self._get_psf_filepath()", "def get_config(nagios_conf, events=False, service_perf=False, host_perf=False):\n nagios_conf = ensure_string(nagios_conf)\n\n nagios_cfg_file = tempfile.NamedTemporaryFile(mode=\"a+b\", delete=False)\n nagios_cfg_file.write(nagios_conf)\n nagios_cfg_file.flush()\n\n config = {\n 'instances': [\n {\n 'nagios_conf': nagios_cfg_file.name,\n 'collect_events': events,\n 'collect_service_performance_data': service_perf,\n 'collect_host_performance_data': host_perf\n }\n ]\n }\n\n return config, nagios_cfg_file", "def init_configs():\n conf = ExtendedConfigParser()\n try:\n conf.read([vsa_conf_file])\n except ConfigParser.ParsingError, e:\n # non of the services has started, so just exit and print message directly to log file (and to stdout).\n #TODO: loggit\n #fd = open(log_file, 'w+')\n message = \"%s - Configuration file was corrupted.\" %\\\n strftime(\"%Y-%m-%d %X\", localtime())\n #fd.write(message)\n print message\n sys.exit(0)\n else:\n return conf", "def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances", "def update_logfile(mem_id):\n with open(\"logfile.txt\",\"r+\") as log:\n logs_list=log.readlines()\n today=date.today()\n given_date=today.strftime(\"%d/%m/%Y\")\n if book_loan_status.status == \"On Loan\":\n log_list_pos=0\n count=len(logs_list)\n #For loop check to see if a books return status is not returned\n for x in logs_list:\n log_str=x.strip()\n log_selection=log_str.split(\":\")\n if str(log_selection[0]) == str(book_id)\\\n and str(log_selection[-2])==\"Not Returned\":\n del log_selection[-2]\n #Then inserts given date instead\n log_selection.insert(2,given_date)\n updated=':'.join(log_selection)\n del logs_list[log_list_pos]\n logs_list.insert(log_list_pos,updated)\n for index in range(0,count):\n strip_list=logs_list[index]\n stripped_str=strip_list.strip()\n del logs_list[index]\n logs_list.insert(index,stripped_str)\n log.seek(0)\n log.truncate(0)\n for index in range(0,count):\n log.write(str(logs_list[index]+'\\n'))\n log.close()\n log_list_pos+=1\n elif book_loan_status.status == \"Available\":\n new_entry=str(\"%d:%s:Not Returned:%d\"%(book_id,given_date,mem_id))\n logs_list.append(new_entry)\n count=len(logs_list)\n for x in range(0,count):\n strip_list=logs_list[x]\n stripped_str=strip_list.strip()\n del logs_list[x]\n logs_list.insert(x,stripped_str)\n log.seek(0)\n log.truncate(0)\n for x in range(0,count):\n log.write(str(logs_list[x]+'\\n'))\n log.close()\n else:\n print(\"Error \")\n log.close()", "def logs(self):\n if not self._logs:\n self.read_logs()\n return self._logs", "def write_configs(logconf_dir):\n for name in list_logging_conf():\n conf = load_logging_conf(name)\n with io.open(os.path.join(logconf_dir, name), 'w') as f:\n f.write(json.dumps(conf))", "def __update(self):\n if self.__file:\n target_file = open(self.__file)\n for attr in dir(self):\n if not attr.startswith(\"_\") and \\\n (self.__overwrite or (attr not in self.__exclude)) \\\n and not self.__is_attr_callable(attr):\n try:\n delattr(self, attr)\n except AttributeError:\n pass\n pool = yaml.load(target_file)\n target_file.close()\n if pool: # could be None\n for key, val in pool.iteritems():\n if not key.startswith(\"_\") and \\\n (self.__overwrite or (key not in self.__exclude)) \\\n and not self.__is_attr_callable(key):\n setattr(self, key, val)\n if hasattr(self, 'log_config_file_changes')\\\n and self.log_config_file_changes:\n logging.getLogger(__name__).info(\"Config file has updated.\")", "def _get_logs(self):\n contents = dict()\n contents[\"Scheduler\"] = self._parse_log_content(\n self.scheduler.client.get_scheduler_logs()\n )\n log_workers = self.scheduler.client.get_worker_logs()\n for i, (_, worker_content) in enumerate(log_workers.items()):\n contents[f\"Worker-{i}\"] = self._parse_log_content(worker_content)\n return contents", "def get_changelog(self, when=0, db=None):\r\n if not db:\r\n db = self.env.get_db_cnx()\r\n cursor = db.cursor()\r\n if when:\r\n cursor.execute(\"SELECT time,author,field,oldvalue,newvalue \"\r\n \"FROM ticket_change WHERE ticket=%s AND time=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'attachment',null,filename \"\r\n \"FROM attachment WHERE id=%s AND time=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'comment',null,description \"\r\n \"FROM attachment WHERE id=%s AND time=%s \"\r\n \"ORDER BY time\",\r\n (self.id, when, str(self.id), when, self.id, when))\r\n else:\r\n cursor.execute(\"SELECT time,author,field,oldvalue,newvalue \"\r\n \"FROM ticket_change WHERE ticket=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'attachment',null,filename \"\r\n \"FROM attachment WHERE id=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'comment',null,description \"\r\n \"FROM attachment WHERE id=%s \"\r\n \"ORDER BY time\", (self.id, str(self.id), self.id))\r\n log = []\r\n for t, author, field, oldvalue, newvalue in cursor:\r\n log.append((int(t), author, field, oldvalue or '', newvalue or ''))\r\n return log", "def get_effective_configuration(analysis_run_id):\n session = connection.Session()\n try: \n run_config = aliased(Configuration)\n analysis_config = aliased(Configuration)\n workflow_config = aliased(Configuration)\n \n my_configs = session.query(AnalysisRun.analysis_run_id, run_config.config.label(\"run_config\"),\n analysis_config.config.label(\"analysis_config\"), \n workflow_config.config.label(\"workflow_config\")).\\\n join(Analysis, AnalysisRun.analysis_id == Analysis.analysis_id).\\\n join(Workflow, AnalysisRun.workflow_id == Workflow.workflow_id).\\\n outerjoin(run_config, AnalysisRun.config_id == run_config.config_id).\\\n outerjoin(analysis_config, Analysis.config_id == analysis_config.config_id).\\\n outerjoin(workflow_config, Workflow.config_id == workflow_config.config_id).\\\n filter(AnalysisRun.analysis_run_id == analysis_run_id).first()\n config_list = [my_configs.workflow_config,\n my_configs.analysis_config, my_configs.run_config]\n except:\n session.rollback()\n raise\n finally:\n session.close()\n connection.engine.dispose()\n\n return merge_configurations(config_list)", "def logs_directory(self):", "def mutate_config_files(self):\n self.__cache.clear()\n\n old_mutate_ns = self._mutable_ns or self._namespace\n self._mutable_ns = self._reload_config_files()\n self._warn_immutability()\n fresh = self._diff_ns(old_mutate_ns, self._mutable_ns)\n\n def key_fn(item):\n # Py3 won't sort heterogeneous types. Sort None as TAB which has a\n # very low ASCII value.\n (groupname, optname) = item[0]\n return item[0] if groupname else ('\\t', optname)\n sorted_fresh = sorted(fresh.items(), key=key_fn)\n for (groupname, optname), (old, new) in sorted_fresh:\n groupname = groupname if groupname else 'DEFAULT'\n LOG.info(\"Option %(group)s.%(option)s changed from \"\n \"[%(old_val)s] to [%(new_val)s]\",\n {'group': groupname,\n 'option': optname,\n 'old_val': old,\n 'new_val': new})\n for hook in self._mutate_hooks:\n hook(self, fresh)\n return fresh", "def load_config(self):\n if (self.last_updated != os.stat(self.config_file).st_mtime):\n with open(self.config_file, 'r') as stream:\n self.config = yaml.load(stream, Loader=yaml.FullLoader)\n self.last_updated = os.stat(self.config_file).st_mtime\n return self.config", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def get_hosts(self):\n self.logger.debug(colorama.Fore.BLUE +\n \"jsnapy.cfg file location used : %s\" %\n get_config_location(), extra=self.log_detail)\n self.logger.debug(colorama.Fore.BLUE +\n \"Configuration file location used : %s\" %\n get_path('DEFAULT', 'config_file_path'), extra=self.log_detail)\n \n if self.args.pre_snapfile is not None:\n output_file = self.args.pre_snapfile\n elif self.args.snapcheck is True and self.args.pre_snapfile is None:\n output_file = \"snap_temp\"\n self.snap_del = True\n else:\n output_file = \"\"\n conf_file = self.args.file\n check = self.args.check\n snap = self.args.snap\n if conf_file is not None:\n if os.path.isfile(conf_file):\n config_file = open(conf_file, 'r')\n self.main_file = yaml.load(config_file)\n elif os.path.isfile(os.path.join(get_path('DEFAULT', 'config_file_path'), conf_file)):\n fpath = get_path('DEFAULT', 'config_file_path')\n config_file = open(os.path.join(fpath, conf_file), 'r')\n self.main_file = yaml.load(config_file)\n else:\n self.logger.error(\n colorama.Fore.RED +\n \"ERROR!! Config file '%s' is not present \" %\n conf_file, extra=self.log_detail)\n sys.exit(1)\n else:\n if self.args.hostname and self.args.testfiles:\n temp_dict = {'hosts':[{'device':'', 'username':'', 'passwd':''}], 'tests':[]}\n temp_dict['hosts'][0]['device'] = self.args.hostname\n temp_dict['hosts'][0]['username'] = self.args.login\n temp_dict['hosts'][0]['passwd'] = self.args.passwd\n for tfile in self.args.testfiles:\n temp_dict['tests'].append(tfile)\n self.main_file = temp_dict\n\n\n #### if --check option is given for sqlite, then snap file name is not compulsory ####\n #### else exit the function saying arguments not correct ####\n if self.main_file.__contains__(\n 'sqlite') and self.main_file['sqlite'] and self.main_file['sqlite'][0]:\n self.chk_database(\n self.main_file,\n self.args.pre_snapfile,\n self.args.post_snapfile,\n check,\n snap)\n else:\n if (self.args.check is True and (\n self.args.file is None or self.args.pre_snapfile is None or self.args.post_snapfile is None)):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\",\n extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n self.login(output_file)", "def get_dict(self):\n\tself.log.debug('Getting dictionary from config files: %s', str(self.file_list))\n\tfor cfg_file in self.file_list:\n\t \"\"\"\n\t We want to append dictionaries from all the config files.\n\t \"\"\"\n\t if self.cfg_type == None: self.cfg_type = self._get_cfg_type(cfg_file)\n\t self.log.debug('Updating dictionary from config file in the order provided: %s',str(cfg_file) )\n\t if self.cfg_type.lower() in ['yaml', \"yml\"]: self._get_dict_yaml(cfg_file)\n\t elif self.cfg_type.lower() == 'xml': self._get_dict_xml(cfg_file)\n\t elif self.cfg_type.lower() == 'json': self._get_dict_json(cfg_file)\n\t elif self.cfg_type.lower() == 'ini': self._get_dict_ini(cfg_file)\n\t \n\treturn self.cfg_dict", "def log(self):\r\n return self._log", "def log(self):\n resp = requests.get(\"%s/api/log\"%self.urlbase, verify=False)\n return resp.json[\"log\"]", "def _add_change_log(self):\n wiz = self.machine_email_id\n change_log = self.env['machine.instance.change_log'].create({\n 'name': wiz.sub_subject,\n 'date': wiz.date,\n 'duration': wiz.duration,\n 'user_id': wiz.user_id.id,\n 'priority': wiz.priority,\n 'machine_instance_id': self.machine_instance_id.id,\n\n })\n self.change_log_id = change_log.id", "def config_logger(logger: logging.Logger, conf: Config) -> None:\n logger.setLevel(conf.logger_level)\n logger.propagate = conf.propagate\n\n # remove existing handlers and filters\n for handler in logger.handlers[:]:\n if isinstance(handler, (logging.FileHandler, logging.StreamHandler)):\n handler.close()\n logger.removeHandler(handler)\n\n for fltr in logger.filters[:]:\n logger.removeFilter(fltr)\n\n if conf.log_enabled and conf.filename is not None:\n l_formatter = logging.Formatter(conf.log_fmt, conf.log_datefmt)\n filename = _create_log_filename(conf.log_dir, conf.sub_dir, conf.filename)\n l_handler = TimedRotatingFileHandler(filename=filename, when='midnight')\n l_handler.setLevel(conf.log_level)\n l_handler.setFormatter(l_formatter)\n logger.addHandler(l_handler)\n\n if conf.cout_enabled:\n c_formatter = logging.Formatter(conf.cout_fmt, conf.cout_datefmt)\n c_handler = logging.StreamHandler()\n c_handler.setLevel(conf.cout_level)\n c_handler.setFormatter(c_formatter)\n logger.addHandler(c_handler)\n\n if len(logger.handlers) == 0:\n logger.addHandler(logging.NullHandler())", "def getConfiguration(self):\n # TODO: Split metadata (e.g. name and version) from configuration data.\n # Currently, we do this by selectively copying from __dict__. A\n # cleaner separation would require refactoring all the way through how\n # we create update objects.\n config = {}\n for key in self.__dict__:\n if key in self.CONFIG_FIELDS:\n config[key] = self.__dict__[key]\n return config", "def logging_config(self) -> Optional['outputs.FeatureSpecFleetobservabilityLoggingConfig']:\n return pulumi.get(self, \"logging_config\")", "def view_config_changes(config):\n rev = reverter.Reverter(config)\n rev.recovery_routine()\n rev.view_config_changes()", "def configure_logger(self, detached):\n\n log_level = self.log_conf['level'].upper()\n if not hasattr(logging, log_level):\n raise mcadminpanel.agent.errors.ConfigurationError(\n 'Improperly configured log level: {}'.format(log_level),\n )\n log_level = getattr(logging, log_level)\n\n handlers = []\n\n file_handler = logging.handlers.TimedRotatingFileHandler(\n self.log_conf['file'],\n when='midnight',\n )\n file_handler.setLevel(log_level)\n handlers.append(file_handler)\n\n if not detached:\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(log_level)\n handlers.append(stream_handler)\n\n logging.basicConfig(\n level=log_level,\n datefmt=self.log_conf['date_format'],\n format=self.log_conf['format'],\n handlers=handlers,\n )", "def get_old_log(self):\n return Gumtree.gumtree.getOldLog() + ';'", "def diff_configuration(self, ignore_warning=False):\n try:\n diff = self.config.diff(rb_id=0, ignore_warning=ignore_warning)\n self.queue_message(\"log\", \"Configuration diff completed.\")\n return diff\n except (self.pyez_exception.RpcError,\n self.pyez_exception.ConnectError) as ex:\n raise AnsibleError('Failure diffing the configuraton: %s' %\n (str(ex)))", "def getLog(self):\n return self.log", "def getLog(self):\n return self.log" ]
[ "0.7078796", "0.6454262", "0.6153468", "0.609547", "0.6065723", "0.6051836", "0.60207915", "0.6019159", "0.59815073", "0.59241617", "0.5814336", "0.58045876", "0.5802499", "0.5684129", "0.56608063", "0.5597701", "0.5580219", "0.55438983", "0.55438983", "0.54896456", "0.5426725", "0.5403126", "0.53940386", "0.5368825", "0.5354504", "0.5310601", "0.5284269", "0.52640504", "0.5241901", "0.5240994", "0.523443", "0.52321434", "0.5211462", "0.51818484", "0.5172571", "0.5165425", "0.5164364", "0.516226", "0.5127854", "0.51204795", "0.51204014", "0.5118479", "0.51163447", "0.51106936", "0.5106495", "0.5105921", "0.5102235", "0.50983244", "0.5070865", "0.50452095", "0.5038051", "0.50188816", "0.50137264", "0.5004678", "0.5002013", "0.49995095", "0.4982921", "0.49789265", "0.49781975", "0.4965833", "0.49596116", "0.4947799", "0.4937836", "0.49366102", "0.49273258", "0.4920504", "0.49200946", "0.49141085", "0.48991168", "0.48989698", "0.48989698", "0.4896921", "0.4890568", "0.4868451", "0.48645222", "0.48563677", "0.48539194", "0.48472747", "0.4847123", "0.48382515", "0.48335353", "0.48297283", "0.4829447", "0.48261172", "0.48246372", "0.48227662", "0.4821163", "0.4816054", "0.48142394", "0.48127368", "0.48106426", "0.48087507", "0.480709", "0.48056895", "0.4804547", "0.48031566", "0.479973", "0.47994116", "0.47916722", "0.47916722" ]
0.77249867
0
Do lots of magic to make alembic work programmatically from the CLI.
def get_alembic_config(database_url: str) -> Config: migrations_dir = os.path.dirname(os.path.abspath(__file__)) directory = os.path.join(migrations_dir, "migrations") config = Config(os.path.join(migrations_dir, "alembic.ini")) config.set_main_option("script_location", directory.replace("%", "%%")) config.set_main_option("sqlalchemy.url", database_url) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(arguments):\n migration = Migration(arguments)\n return migration.run()", "def current():\n alembic_command.current(alembic_config, verbose=True)", "def _init_db():\n import alembic.config\n import alembic.command\n alembic_cfg = alembic.config.Config('alembic.ini')\n alembic_cfg.attributes['configure_logger'] = False\n alembic.command.upgrade(alembic_cfg, 'head')\n _reset_db(get_test_db_session())", "def cli() -> None:\n @app.shell_context_processor\n def make_shell_context() -> Dict[str, Any]:\n return dict(app=app, db=db)", "def cli(self, env):\n raise NotImplementedError", "def configure(self) -> \"Alembic\":\n\n if self.alembic_config is None:\n self.alembic_config = alembic.config.Config()\n\n self.alembic_config.set_main_option(\"script_location\", self.migration_directory)\n self.alembic_config.set_main_option(\n \"sqlalchemy.url\",\n PyFunceble.cli.facility.CredentialLoader.get_uri(),\n )\n\n return self", "def main():\n ensure_not_root()\n config.setup()\n model.init_db()\n manager.run()", "def run_database_migration(db_session=None):\n # type: (Optional[Session]) -> None\n ini_file = get_constant(\"MAGPIE_ALEMBIC_INI_FILE_PATH\")\n LOGGER.info(\"Using file '{}' for migration.\".format(ini_file))\n alembic_args = [\"-c\", ini_file, \"upgrade\", \"heads\"]\n if not isinstance(db_session, Session):\n alembic.config.main(argv=alembic_args)\n else:\n engine = db_session.bind\n with engine.begin() as connection:\n alembic_cfg = alembic.config.Config(file_=ini_file)\n alembic_cfg.attributes['connection'] = connection\n alembic.command.upgrade(alembic_cfg, \"head\")", "def cli():\n parser = get_argparser()\n\n args = parser.parse_args()\n check_args(args)\n if args.v:\n print('ERAlchemy version {}.'.format(__version__))\n exit(0)\n render_er(\n args.i,\n args.o,\n include_tables=args.include_tables,\n include_columns=args.include_columns,\n exclude_tables=args.exclude_tables,\n exclude_columns=args.exclude_columns,\n schema=args.s\n )", "def main():\n db = _db.Database(experiment.ORACLE_PATH)\n db.populate_kernel_names_table()\n db.commit()", "def do_command(self, args):\n chk_arg_count(args, 0)\n dbops.init_database()", "def init_db_command():\n init_db()\n # click.command() defines a command line command called init-db that calls the init_db function and shows a success message to the user. \n click.echo('Initialized the database.')", "def cli(ctx):\n pass", "def cli(ctx):\n pass", "def initialize(directory=None, multidb=False):\n config = Config()\n config.set_main_option(\"script_location\", directory)\n config.config_file_name = os.path.join(directory, \"alembic.ini\")\n command.init(config, directory, \"singledb\")", "def cli(ctx):", "def cli(ctx):", "def command_dbtool(self):\n dbtool.main(*self.args())", "def do_command(self, args):\n testops = dbops.Tests()\n testops.add(args)", "def pytest_configure():\n exec(open(\"script/generate_sql\").read())", "def cli(ctx):\n #TODO", "def init_db_command():\n # db.create_all()\n Role.insert_roles()\n admin = User(email=current_app.config[\"FLASK_ADMIN\"],\n password=\"secure\", username=\"zhangzhi_up\", confirmed=True)\n db.session.add(admin)\n db.session.commit()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo(\"Initialized the database.\")", "def init_db_command():\n init_db()\n click.echo(\"Initialized the database.\")", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def main(ctx: click.core.Context, host: str, user: str, password: typing.Optional[str], schema: str) -> None:\n ctx.obj = {}\n creds = tribble.database.Creds(host, user, password, schema)\n engine = tribble.database.connect_db(creds)\n contract.Session.configure(bind=engine)\n ctx.obj['creds'] = creds\n ctx.obj['engine'] = engine", "def init_db_command():\n db_init()\n click.echo('Initialized the database.')", "def main(argv: t.List[str] = sys.argv):\n if len(argv) < 2:\n usage_message(argv)\n\n config_uri = get_config_uri(argv)\n request = init_websauna(config_uri)\n\n with transaction.manager:\n engine = request.dbsession.get_bind()\n # Always enable UUID extension for PSQL\n # TODO: Convenience for now, because we assume UUIDs, but make this somehow configurable\n engine.execute('CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\"')\n\n Base.metadata.create_all(engine)", "def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)\n model.metadata.create_all()\n\n # Initialisation here ... this sort of stuff:\n\n # some_entity = model.Session.query(model.<modelfile>.<Some_Entity>).get(1)\n # e.g. foo = model.Session.query(model.identity.User).get(1)\n # from datetime import datetime\n # some_entity.poked_on = datetime.now()\n # model.Session.add(some_entity)\n u = User(name=\"cemeyer2\", superuser=True, enabled=True)\n populate_user_from_active_directory(u)\n s = SolutionSemester(year=-1, season=u\"Fall\", isSolution=True)\n s2 = BaseSemester(year=-2, season=u'Fall', isSolution=True)\n Session.commit()", "def cli():\n pass", "def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)\n\n # Create the tables if they don't already exist\n model._Base.metadata.create_all(bind=meta.engine)", "def cli(args): # noqa; pylint: disable=unused-argument", "def _migrate(self):\n with self.engine.begin() as conn:\n context = alembic.migration.MigrationContext.configure(conn)\n current_rev = context.get_current_revision()\n self.log.debug('Current migration revision: %s' % current_rev)\n\n config = alembic.config.Config()\n config.set_main_option(\"script_location\",\n \"zuul:driver/sql/alembic\")\n config.set_main_option(\"sqlalchemy.url\",\n self.connection_config.get('dburi'))\n\n # Alembic lets us add arbitrary data in the tag argument. We can\n # leverage that to tell the upgrade scripts about the table prefix.\n tag = {'table_prefix': self.table_prefix}\n alembic.command.upgrade(config, 'head', tag=tag)", "def init_db_command():\n click.echo(\"Initializing the database.\")\n init_db()\n click.echo(\"Initialized the database.\")", "def upgradedb(ctx):\n path = Path(__file__).resolve().parent.parent\n conf = Config(str(path / \"migrations\" / \"alembic.ini\"))\n conf.set_main_option(\"script_location\", str(path / \"migrations\"))\n command.upgrade(conf, \"heads\")", "def test_app() -> Generator[FastAPI, Any, None]:\n Base.metadata.create_all(engine) # Create tables\n yield app\n Base.metadata.drop_all(engine) # Drop tables", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def model_orchestrator():\n\n from offline import orchestrator\n orchestrator.orchestrator()", "def migrate(ctx):\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"migrate\")", "def cli():\n config, auth, execute_now = read_command_line_arguments()\n main(config, auth, execute_now)", "def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)\n\n # Create the tables if they don't already exist\n # meta.metadata.drop_all(bind=meta.engine, checkfirst=True)\n meta.metadata.create_all(bind=meta.engine)", "def init_db_command():\r\n init_db()\r\n click.echo('Initialized the database.')", "def run_migrations_online():\n # Overload db config on top of alembic config\n alembic_config = config.get_section(config.config_ini_section)\n db_config = config.get_section(db_name)\n for key in db_config:\n alembic_config[key] = db_config[key]\n\n if os.environ[\"DEPLOYMENT_STAGE\"] != db_name:\n raise Exception(\"Deployment stage os environ var and target db arg are different\")\n\n alembic_config[\"sqlalchemy.url\"] = UploadDbConfig().database_uri\n\n engine = engine_from_config(\n alembic_config,\n prefix='sqlalchemy.',\n poolclass=pool.NullPool)\n\n with engine.connect() as connection:\n context.configure(\n connection=connection,\n target_metadata=target_metadata\n )\n\n with context.begin_transaction():\n context.run_migrations()", "def cli_init(dbfile, demo):\n with atomic(dbfile) as cursor:\n create_tables(cursor)\n if demo:\n create_user(cursor, 'tester', 'pw', '[email protected]')\n create_user(cursor, 'special_tester', 'pw',\n '[email protected]',\n groups=['special'])", "def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)\n\n # Create the tables if they don't already exist\n from practice.model.meta import Base, Session\n\n log.info(\"Creating tables\")\n\n Base.metadata.drop_all(checkfirst=True, bind=Session.bind)\n Base.metadata.create_all(bind=Session.bind)\n\n log.info(\"Successfully setup\")", "def init_db_command() -> None:\n db.create_all()\n click.echo(\"Initialized database.\")", "def cli() -> None:\r\n config_argparse = _configfile_parser()\r\n config_args, _ = config_argparse.parse_known_args()\r\n\r\n defaults = {}\r\n\r\n if config_args.config: \r\n defaults = _load_config(config_args)\r\n\r\n parser = _cli(config_argparse, defaults)\r\n _add_standard_args(parser) \r\n \r\n subparser = parser.add_subparsers()\r\n _add_create_command(subparser)\r\n _add_update_command(subparser) \r\n\r\n args = parser.parse_args()\r\n command = args.cmd\r\n command.execute(args)", "def migration():", "def initdb_command():\n init_db()", "def initdb_command():\n init_db()", "def main(db_path, schema_json):\n create_db(db_path, schema_json)", "def cli():\r\n pass", "def cli():\n\n pass", "def cli():\n ...", "def init_new_db(args):\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n session = Session()\n session.add(Environment(name='normal', slickurl='http://slicker.homestead-corp.com/slickij', buildurl='?', filename='hs-tcrunij.tar.gz', tcrunijsubdir='hs-tcrunij/tcrunij'))\n session.add(Environment(name='dev', slickurl='http://octomom.homestead-corp.com/slickij', buildurl='?', filename='tcrunij.tar.gz', tcrunijsubdir='tcrunij/tcrunij'))\n session.commit()", "def main():\r\n db = connect_database()\r\n with db:\r\n if sys.argv[1] == \"-s\":\r\n select_all(db, sys.argv[2])\r\n elif sys.argv[1] == \"-i\":\r\n cus_data = []\r\n for i in range(2, len(sys.argv)):\r\n cus_data.append(sys.argv[i])\r\n insert_customer(db, cus_data)\r\n elif sys.argv[1] == \"-c\":\r\n create_tables()\r\n elif sys.argv[1] == \"-pw\":\r\n pop_waiting(db, sys.argv[2])\r\n elif sys.argv[1] == \"-ph\":\r\n pop_help(db, sys.argv[2])\r\n elif sys.argv[1] == \"-r\":\r\n refresh_tables(db)\r\n elif sys.argv[1] == \"-e\":\r\n export_helped_table(db)\r\n else:\r\n print errorArgument\r\n db.close()", "def initdb_command():\n init_db()\n print('Initialized the database.')", "def initdb_command():\n init_db()\n print('Initialized the database.')", "def initdb_command():\n init_db()\n print('Initialized the database.')", "def initdb_command():\n init_db()\n print('Initialized the database.')", "def initdb_command():\n init_db()\n print('Initialized the database.')", "def setup_app(command, conf, vars):\n\n load_environment(conf.global_conf, conf.local_conf)\n\n if asbool(conf.get('reset_database', 'false')):\n reset_db(meta.engine)\n\n initialize_dictionaries(meta.engine)\n\n initialize_db_defaults(meta.engine)", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass" ]
[ "0.64835244", "0.6423896", "0.63449913", "0.6249926", "0.61865556", "0.618295", "0.6082049", "0.5999133", "0.59890723", "0.5959365", "0.5921134", "0.58983696", "0.5896638", "0.5896638", "0.5881768", "0.58784425", "0.58784425", "0.5840882", "0.58390534", "0.5833645", "0.5830525", "0.5797615", "0.5782999", "0.57668823", "0.57668823", "0.5762105", "0.5762105", "0.5762105", "0.5762105", "0.5762105", "0.5762105", "0.5762105", "0.5762105", "0.5713805", "0.57136905", "0.5711573", "0.5710952", "0.56944203", "0.56865895", "0.56766534", "0.5676165", "0.5674308", "0.56720245", "0.56604505", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5657305", "0.5656742", "0.5647469", "0.56371653", "0.5636755", "0.5636584", "0.563336", "0.5614824", "0.5608179", "0.5600321", "0.5580306", "0.5558251", "0.5551738", "0.5551738", "0.5534978", "0.55332714", "0.55207574", "0.5514212", "0.5505607", "0.54934406", "0.54797107", "0.54797107", "0.54797107", "0.54797107", "0.54797107", "0.5477696", "0.5472448", "0.5472448", "0.5472448", "0.5472448" ]
0.0
-1
Runs the migrations and creates all of the database objects.
def init_db(database_url: str, fidesctl_config: FidesctlConfig) -> None: alembic_config = get_alembic_config(database_url) upgrade_db(alembic_config) load_default_taxonomy(fidesctl_config)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_db():\n database.db.create_all()\n get_ulm()\n for fixture_file in glob.glob(config.DevelopmentConfig.FIXTURES_DIRS + '/*.json'):\n fixtures = JSONLoader().load(fixture_file)\n load_fixtures(database.db, fixtures)\n MigrationManager().stamp_db()", "def db_create_all(self):\n with self.app_context():\n self.db.create_all()", "def initialise_database():\n with cd(code_dir):\n run(python_add_str + \"python manage.py syncdb --all\")\n run(python_add_str + \"python manage.py migrate --fake\")", "def init_db():\n db.drop_all()\n db.create_all()\n seed_companies()\n seed_emission_reports()\n seed_reduction_targets()\n seed_milestones()", "def migrate_database(self):\n\n self.db.migrate_database()", "def bootstrap():\n Base.metadata.create_all(engine)", "def init_db():\n import cerbereapp.models\n Base.metadata.create_all(bind=engine)", "def setUp(self):\n db.create_all()", "def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)", "def init_db() -> None: \n \n Base.metadata.create_all(bind=engine)", "def create(self):\n db.create_all()", "def setup_db():\n\n engine = config['tg.app_globals'].sa_engine\n # model.init_model(engine)\n # model.metadata.create_all(engine)", "def post_migrations(self):", "def create_all():\n db.create_all()", "def migrate_db():\n Base.metadata.create_all(ENGINE)", "def setup_db(self) -> None:\n conn = mysql.connector.connect(\n user=self.app.config[\"DATABASE_USER\"], password=self.app.config[\"DATABASE_PASSWORD\"],\n host=self.app.config[\"DATABASE_HOST\"], port=self.app.config[\"DATABASE_PORT\"], raise_on_warnings=True\n )\n try:\n cursor = conn.cursor()\n cursor.execute(\n \"CREATE DATABASE IF NOT EXISTS {} CHARACTER SET utf8\".format(self.app.config[\"DATABASE_NAME\"])\n )\n conn.commit()\n except:\n raise\n else:\n with self.DBManager(self.app) as connection:\n for model in sorted(lib.get_subclasses(lib.models.Model), key=lambda x: x.index):\n model.setup_table(connection=connection)\n finally:\n conn.close()", "def db_initialise():\n generate_migration_file()\n if not MySQLScheme.fetch_one(IS_MIGRATION_TABLE,\n **{\"args\": {'schema': SCHEMA}}):\n with open(MIGRATION_FILE, 'r') as init_sql:\n data = init_sql.read()\n\n if f\"CREATE TABLE IF NOT EXISTS {MIGRATION_TABLE}\" not in data:\n when = str(int(time.time()))\n sql_file = os.path.join(MIGRATION_FOLDER, f\"{when}.sql\")\n\n with open(sql_file, 'w') as save_sql:\n up = MYSQL_MIGRATION_UP.format(f\"upgrade-{when}\", when,\n MIGRATION_TABLE)\n down = MYSQL_MIGRATION_DOWN.format(f\"downgrade-{when}\",\n MIGRATION_TABLE)\n\n save_sql.write(\"\\n\\n\".join([up, down]))\n LOGGER.info(f\"migration file: \"\n f\"{os.path.join('migrations', sql_file)}\")\n else:\n when = re.findall('[0-9]+', data)[0]\n\n generate_migration_file()\n dbi_query = anosql.from_path(MIGRATION_FILE, 'psycopg2')\n MySQLScheme.commit(getattr(dbi_query, f\"upgrade_{when}\").sql)\n LOGGER.info(f\"initial successful migration: {when}\")", "def setup_database(self):\n self.db.setup_database()", "def db_createall():\n db.create_all()", "def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()", "def createdb():\n db.create_all()", "def _initial_setup(self):\n logger.info(\"Performing initial database setup...\")\n\n # Set up the migration_version table\n self._execute(\n \"\"\"\n CREATE TABLE migration_version (\n version INTEGER PRIMARY KEY\n )\n \"\"\"\n )\n\n # Initially set the migration version to 0\n self._execute(\n \"\"\"\n INSERT INTO migration_version (\n version\n ) VALUES (?)\n \"\"\",\n (0,),\n )\n\n # Set up any other necessary database tables here\n\n logger.info(\"Database setup complete\")", "def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)", "def setUp(self):\n db.drop_all() # clean up the last tests\n db.create_all() # make our sqlalchemy tables", "def make_db():\n\n db.create_all()", "def setup(self):\n #print \"Creating test database...\"\n files = glob.glob(os.path.join(self.home_dir, 'sqlFiles', '*.sql'))\n for fls in files:\n loc = fls.rfind('/')\n #print(\" \" + fls.replace('.sql', '')[loc + 1:])\n flh = open(fls, 'r')\n curs = self.cursor()\n curs.executescript(flh.read())\n self.commit()\n curs.close()\n flh.close()\n for fls in ['INSERTS', 'TRIGGERS']:\n #print(fls)\n flh = open(os.path.join(self.home_dir, 'sqlFiles', fls), 'r')\n curs = self.cursor()\n curs.executescript(flh.read())\n self.commit()\n curs.close()\n flh.close()", "def migrate(self):\n\tpass", "def create_database_structure(self):\n Base.metadata.create_all(self.engine)", "def create_database_tables():\n with APP.app_context():\n DB.create_all()", "def initdb():\n db.create_all()", "def initdb():\n db.create_all()", "def set_up_db():\n DATABASE.drop_tables([Customer])\n DATABASE.close()\n DATABASE.create_tables([Customer])\n DATABASE.close()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def run_migrations(self, migrations):\n for migration in migrations:\n name = migration[\"name\"]\n migration[\"script\"] = self.get_sql_script(name)\n\n if self.dry_run:\n for migration in migrations:\n print(f'---------------- {migration[\"name\"]} ----------------')\n print(migration[\"script\"])\n return\n\n if not self.accept_all and not self.prompt_for_migrations(migrations):\n return\n\n applied_migrations = []\n with self.target_db.begin() as conn:\n for migration in migrations:\n name = migration[\"name\"]\n script = migration[\"script\"]\n if self.apply_migrations:\n print(f\"Applying {name}\")\n conn.execute(script)\n applied_migrations.append(name)\n if self.register:\n self.register_migrations(applied_migrations)", "def initialize_db(self) -> None:\n if not self.check_schema_initialized():\n self._create_genes_table()\n self._create_meta_data_table()", "def create_all() -> None:\n Base.metadata.create_all(db.engine)", "def init_db():\n current_app.logger.info('Creating database...')\n db.drop_all()\n db.create_all()\n db.session.commit()", "def db_create_models():\n # db_createall doesn't work if the models aren't imported\n import_string('models', silent=True)\n for blueprint_name, blueprint in app.blueprints.items():\n import_string('%s.models' % blueprint.import_name, silent=True)\n db.create_all()", "def create_db(self):", "def execute_migrations(self):\n response = self._connection.session.post(\n self._connection.url + \"object_manager_attributes_execute_migrations\"\n )\n return self._raise_or_return_json(response)", "def create_tables() -> None:\n print(\"Creating database tables using SQLAlchemy ORM\")\n Base.metadata.create_all(engine)\n print(\"Done creating tables\")", "def setup_db():\n logger.info('Setting up db')\n setup_all_db()\n setup_emails()", "def create_db():\n _init_db()\n db.create_all()", "def create_db():\n db.create_all()\n print ('Intialized....!')", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def initdb():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()", "def create_db():\n db.create_all()\n print(\"DB Created\")", "def create_tables():\n db.create_all()", "def create_tables():\n db.create_all()", "def init_db():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()", "def create_database():\n with connection:\n connection.execute(CREATE_MOVIE_TABLE)\n connection.execute(CREATE_USER_TABLE)\n connection.execute(CREATE_WATCHED_TABLE)", "def create_db():\n db.create_all()\n print('Database structure created successfully')", "async def migrate(self):\n # Controlla se ci sono tabelle nel db\n async with self.db.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(\"\"\"SELECT COUNT(DISTINCT table_name) as c\n FROM information_schema.columns\n WHERE table_schema = %s\"\"\", (conn.db,))\n db_empty = (await cur.fetchone())[\"c\"] <= 0\n\n # Se ci sono tabelle, prova a leggere `db_version`\n if not db_empty:\n await cur.execute(\"SELECT db_version FROM db_version LIMIT 1\")\n db_version_in_db = await cur.fetchone()\n db_version = 0 if db_version_in_db is None else db_version_in_db[\"db_version\"]\n else:\n db_version = 0\n\n # Prendi la lista di file sql e py da eseguire\n new_migrations = [x for x in self.migrations if x.id > db_version]\n\n # Controlla se ci sono migration da eseguire\n if not new_migrations:\n self.logger.info(\"No new migrations. The database is already up to date!\")\n return\n\n # Esegui migrations\n self.logger.info(\"Current db version: @{}\".format(db_version))\n db_version += 1\n current_migration = self.get_migration(db_version)\n while current_migration is not None:\n self.logger.info(\"Executing {}\".format(current_migration.file_name))\n\n if current_migration.type == \"sql\":\n # Leggi ed esegui file sql\n with open(\n os.path.join(os.path.dirname(__file__), \"migrations/{}\".format(current_migration.file_name)), \"r\"\n ) as f:\n data = f.read()\n async with self.db.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(data)\n await conn.commit()\n elif current_migration.type == \"py\":\n # Importa modulo py\n module = importlib.import_module(\"migrator.migrations.{}\".format(current_migration.file_name[:-3]))\n migr = getattr(module, \"do\")\n await migr()\n\n # Migration eseguita, aggiorna `db_version`\n self.logger.info(\"Migration {} executed with no errors\".format(current_migration.file_name))\n await self.save_db_version(db_version)\n\n # Vai alla prossima migration\n db_version += 1\n current_migration = self.get_migration(db_version)\n self.logger.info(\"All migrations executed correctly\")", "def createDb():\n db.drop_all()\n db.create_all()", "def initialize_empty_database(self):\r\n Base.metadata.create_all(self.engine)", "def run_migrations_online():\n configuration = config.get_section(config.config_ini_section)\n configuration[\"sqlalchemy.url\"] = get_url()\n connectable = engine_from_config(\n configuration, prefix=\"sqlalchemy.\", poolclass=pool.NullPool\n )\n\n with connectable.connect() as connection:\n context.configure(\n connection=connection,\n target_metadata=target_metadata,\n compare_type=True,\n include_schemas=True, # schemas,\n version_table_schema=POSTGRES_SCHEMA,\n include_object=include_schemas([None, POSTGRES_SCHEMA])\n )\n with context.begin_transaction():\n\n context.execute(f\"CREATE SCHEMA IF NOT EXISTS {POSTGRES_SCHEMA};\")\n context.execute(f\"SET search_path TO {POSTGRES_SCHEMA}\")\n context.run_migrations()", "def migration():", "def db_setup(self):\n revision: Table = Table(self.revision_table,\n self.sql_metadata,\n Column(self._MigrationTableColumns.revisions.value, Text, primary_key=True),\n schema=self.revision_table_schema)\n revision.create(self.psql_engine)", "def create_db_structure(self):\n logger.info(\"Creating CRH database structure.\")\n CrhDbModel.metadata.create_all(bind=self.engine)", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def migrate():\n run('cd /home/indabom/web && source ./bin/activate && cd ./site && python manage.py migrate')", "def init_db(base):\n base.metadata.create_all(engine, checkfirst=True)", "def setup_schema(self):\n models.Base.metadata.create_all(self.session.bind)", "def run_migrations_online():\n engine = create_engine(AppConfig.SQLALCHEMY_DATABASE_URI)\n\n with engine.connect() as connection:\n context.configure(\n connection=connection,\n target_metadata=target_metadata\n )\n\n with context.begin_transaction():\n context.run_migrations()", "def create_db_tables(self):\n self.releases = Table(\n RELEASES,\n self.metadata,\n Column(\"id\", Integer, primary_key=True, autoincrement=True),\n Column(\"repo\", String),\n Column(\"tag_name\", String),\n Column(\"release_name\", String),\n Column(\"created\", DateTime, default=datetime.datetime.now()),\n extend_existing=True,\n )\n self.commits = Table(\n COMMITS,\n self.metadata,\n Column(\"id\", Integer, primary_key=True, autoincrement=True),\n Column(\"repo\", String),\n Column(\"branch\", String),\n Column(\"sha\", String),\n Column(\"created\", DateTime, default=datetime.datetime.now()),\n extend_existing=True,\n )\n self.tags = Table(\n TAGS,\n self.metadata,\n Column(\"id\", Integer, primary_key=True, autoincrement=True),\n Column(\"repo\", String),\n Column(\"tag_name\", String),\n Column(\"sha\", String),\n Column(\"created\", DateTime, default=datetime.datetime.now()),\n extend_existing=True,\n )\n try:\n self.metadata.create_all()\n log.info(\"Tables created\")\n except Exception as e:\n log.error(\"Error occurred during Table creation!\")\n log.exception(e)", "def run_migration_checks():\n check_model_state()\n check_migration_state()", "def deploy():\n from flask_migrate import upgrade\n\n upgrade() # upgrade to the latest db schema\n\n # setup necessary data to initialize database\n if Conference.query.filter_by(short_name='main').first():\n print('database already initialized')\n else:\n # add registration form questions\n FormConfiguration.insert_formConfiguration()\n Role.insert_roles() # create user roles\n generate_main_conf() # generate default main conference\n generate_admin() # generate the site admin", "def createdb():\n print \"here\"\n db.create_all()", "def db_create():\n db.drop_all()\n db.create_all()\n db.session.commit()", "def run_migrations_online():\n db_host = context.get_x_argument(as_dictionary=True).get('DB_HOST')\n db_port = context.get_x_argument(as_dictionary=True).get('DB_PORT')\n db_user = context.get_x_argument(as_dictionary=True).get('DB_USER')\n db_password = context.get_x_argument(as_dictionary=True).get('DB_PASSWORD')\n db_name = context.get_x_argument(as_dictionary=True).get('DB_NAME')\n\n try_to_create_database(db_host, db_port, db_user, db_password, db_name)\n\n connectable = get_connectable(db_host, db_port, db_user, db_password, db_name)\n with connectable.connect() as connection:\n context.configure(\n connection=connection,\n target_metadata=target_metadata,\n compare_type=True,\n render_item=render_item\n )\n with context.begin_transaction():\n context.run_migrations()", "def init_db_command() -> None:\n db.create_all()\n click.echo(\"Initialized database.\")", "def migrate(ctx):\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"migrate\")", "def init_db():\n engine = create_engine(SQLALCHEMY_ENGINE_STR)\n Base.metadata.drop_all(bind=engine)\n Base.metadata.create_all(bind=engine)", "def setUp(self):\n super(MigrationTestCase, self).setUp()\n\n self.executor = MigrationExecutor(connection)\n self.executor.migrate(self.migrate_from)", "def create_tables():\n db.create_all()", "def create_database():\n Base.metadata.create_all(bind=engine)", "def setup(self):\n return self.setupDatabases()", "def _db_setup(self):\n self.get_connection()\n sql_file = open(db_config.DATABASE_TABLES_SETUP_FILE, 'r')\n with self.conn.cursor() as cur:\n cur.execute(sql_file.read())\n self.conn.commit()\n logger.info(f'The script {db_config.DATABASE_TABLES_SETUP_FILE} has run.')", "def deploy():\n db.drop_all()\n create_DB()\n app.run()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n setup_db(self.app, TEST_DB_PATH)\n\n # binds the app to the current context\n with self.app.app_context():\n # create all tables\n db.create_all()\n self._populate_db()", "def clean_up():\n drop_all_tables()\n create_all()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"Capstone\"\n self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app , self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n\n self.db.create_all()", "def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)", "def create_db_execute(self):\n self.execute(query=self.db_create_schema.format(self.db_name))", "def setup_db(app):\n db.app = app\n Migrate(app, db)\n db.init_app(app)", "def initialise(self):\n\n if self.db_type == 'sqlite':\n try:\n # Attempt to create schema if not present, to cope with fresh DB file\n BaseSQLite.metadata.create_all(self.engine)\n except OperationalError:\n print(\"Error creating database schema, possible invalid path? ('\" + self.db_name + \"'). Quitting\")\n exit()\n elif self.db_type == 'postgres':\n try:\n # ensure that create schema scripts created before create table scripts\n event.listen(BasePostgres.metadata, 'before_create', CreateSchema('datastore_schema'))\n BasePostgres.metadata.create_all(self.engine)\n except OperationalError:\n print(f\"Error creating database({self.db_name})! Quitting\")\n exit()", "def setUp(self):\n model.db.create_all()\n (self.conversation,\n self.users,\n self.msgs) = model.seed(server.app)", "def create_all():\n Base.metadata.create_all(engine)", "def create_all():\n Base.metadata.create_all(engine)", "def set_up(self, drop=False):\n\n # todo extract database name from engine url and report for brevity\n engine = self.__orm.engine\n if database_exists(engine.url):\n print(\"Database {} already exists.\".format(engine.url))\n if drop:\n print(\"Dropping old database {}\".format(engine.url))\n drop_database(engine.url)\n with possibly_talking_action(\"Re-creating database...\"):\n create_database(engine.url)\n else:\n with possibly_talking_action(\"Creating database...\"):\n create_database(engine.url)\n\n with possibly_talking_action(\"Creating tables...\"):\n Base.metadata.create_all(engine)\n\n print(\"Database {} created successfully\".format(engine.url))", "def recreate_db():\n drop_db()\n create_db()\n populate_db()" ]
[ "0.74945307", "0.74615896", "0.7393201", "0.7345915", "0.7331787", "0.73113805", "0.72740376", "0.7251851", "0.71819717", "0.7157426", "0.71482134", "0.7122612", "0.70904714", "0.7079735", "0.7049788", "0.70419806", "0.7040114", "0.70348716", "0.7018809", "0.70154107", "0.6996983", "0.6996938", "0.6979835", "0.69749767", "0.6962391", "0.695958", "0.6939626", "0.6914694", "0.68688786", "0.6868683", "0.6868683", "0.6832654", "0.68224496", "0.68224496", "0.68224496", "0.68224496", "0.68224496", "0.68224496", "0.68224496", "0.68224496", "0.68224496", "0.68224496", "0.68224496", "0.6818681", "0.68119663", "0.6799067", "0.6784581", "0.678241", "0.67680603", "0.6765261", "0.67577696", "0.67503726", "0.67285067", "0.67252964", "0.6709694", "0.66823083", "0.6681633", "0.66810656", "0.66810656", "0.6676774", "0.66508085", "0.66457415", "0.66442", "0.66407204", "0.6624077", "0.6620791", "0.6618848", "0.6611118", "0.66062623", "0.66037035", "0.6601475", "0.6599281", "0.6589557", "0.65834206", "0.65810114", "0.65802544", "0.65798104", "0.6576942", "0.65688205", "0.6565873", "0.6561398", "0.6559788", "0.6545553", "0.6543264", "0.6537243", "0.6535039", "0.65284693", "0.65225106", "0.6515079", "0.6508229", "0.650756", "0.6506393", "0.6502", "0.6499964", "0.64928544", "0.6491729", "0.6481652", "0.647587", "0.647587", "0.6472736", "0.6465183" ]
0.0
-1
Drops all tables/metadata from the database.
def reset_db(database_url: str) -> None: engine = get_db_engine(database_url) connection = engine.connect() SqlAlchemyBase.metadata.drop_all(connection) migration_context = MigrationContext.configure(connection) version = migration_context._version # pylint: disable=protected-access if version.exists(connection): version.drop(connection)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_db():\n db = get_db()\n tables = db.tables\n for table in tables:\n db[table].drop()", "def tear_down():\n db.flush()\n for table in metadata.tables.values():\n db.execute(table.delete())", "def drop_tables() -> None:\n print(\"Dropping database tables using SQLAlchemy ORM\")\n Base.metadata.drop_all(engine)\n print(\"Done dropping tables\")", "def drop_all() -> None:\n Base.metadata.drop_all(db.engine)", "def db_cleanup(self):\n with self.context():\n meido.db.session.remove()\n meido.db.drop_all()", "def delete_all_tables(self):\n\t\tif self.__dbfile is not None:\n\t\t\tfor table_name in list(LocalData.table_info.keys()):\n\t\t\t\tif self.table_exists(table_name):\n\t\t\t\t\tself._conn.execute(\"DROP TABLE %s\" % table_name)\n\t\t\tself._conn.commit()", "def clear_db():\n from flask_monitoringdashboard.database import get_tables, engine\n\n for table in get_tables():\n table.__table__.drop(engine)\n table.__table__.create(engine)", "def drop_all_tables():\n\tcommon_db.drop_all_tables()", "def clearDatabase():\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)", "def drop_all_tables(args):\n engine = sqlalchemy.create_engine(CONFIG.db_uri)\n print(\"Dropping all tables on {}...\".format(CONFIG.db_uri), end=\" \")\n Base.metadata.drop_all(bind=engine)\n print(\"finished.\")", "def drop_db(self) -> None:\n try:\n if not self._check_delete_okay():\n return\n except DatabaseWriteException as e:\n raise e\n\n existing_tables = self.list_tables()\n for table_name in existing_tables:\n self.dynamodb.Table(table_name).delete()", "def drop_all():\n db.drop_all()", "def drop(drop_all=False):\n\n engine = current_app.extensions['meowth_dbutils'].db.engine\n if current_app.extensions['meowth_dbutils'].metadata.bind is None:\n current_app.extensions['meowth_dbutils'].metadata.bind = engine\n with perform(\n name='dbutils drop',\n before='Dropping all project tables',\n fail='Error occured while droping project tables',\n ):\n current_app.extensions['meowth_dbutils'].metadata.drop_all()\n with perform(\n name='dbutils drop',\n before='Dropping alembic versioning table',\n fail='Error occured while dropping alembic table',\n ):\n engine.execute('drop table if exists alembic_version')\n if drop_all:\n with perform(\n name='dbutils drop',\n before='Dropping all other tables in database',\n fail='Error occured while dropping other tables',\n ):\n current_app.extensions['meowth_dbutils'].db.reflect()\n current_app.extensions['meowth_dbutils'].db.drop_all()", "def reset_db():\n\n metadata = sa.MetaData()\n metadata.reflect(engine)\n for tbl in reversed(metadata.sorted_tables):\n tbl.drop(engine)\n create_tables()", "def clean_up():\n drop_all_tables()\n create_all()", "def drop_database_tables(cls):\n cursor = Database.connect_to_db()\n # drop users table\n sql_command = \"\"\" DROP TABLE IF EXISTS users CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop parties table\n sql_command = \"\"\" DROP TABLE IF EXISTS parties CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop offices table\n sql_command = \"\"\" DROP TABLE IF EXISTS offices CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop candidates table\n sql_command = \"\"\" DROP TABLE IF EXISTS candidates CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop votes table\n sql_command = \"\"\" DROP TABLE IF EXISTS votes CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop applications table\n sql_command = \"\"\" DROP TABLE IF EXISTS applications CASCADE;\n \"\"\"\n cursor.execute(sql_command)", "def drop_everything():\n from sqlalchemy.engine.reflection import Inspector\n from sqlalchemy.schema import DropConstraint, DropTable, MetaData, Table\n\n con = db.engine.connect()\n trans = con.begin()\n inspector = Inspector.from_engine(db.engine)\n\n # We need to re-create a minimal metadata with only the required things to\n # successfully emit drop constraints and tables commands for postgres (based\n # on the actual schema of the running instance)\n meta = MetaData()\n tables = []\n all_fkeys = []\n\n for table_name in inspector.get_table_names():\n fkeys = []\n\n for fkey in inspector.get_foreign_keys(table_name):\n if not fkey[\"name\"]:\n continue\n\n fkeys.append(db.ForeignKeyConstraint((), (), name=fkey[\"name\"]))\n\n tables.append(Table(table_name, meta, *fkeys))\n all_fkeys.extend(fkeys)\n\n for fkey in all_fkeys:\n con.execute(DropConstraint(fkey))\n\n for table in tables:\n con.execute(DropTable(table))\n\n trans.commit()", "def drop_everything():\n from sqlalchemy.engine.reflection import Inspector\n from sqlalchemy.schema import DropConstraint, DropTable, MetaData, Table\n\n con = db.engine.connect()\n trans = con.begin()\n inspector = Inspector.from_engine(db.engine)\n\n # We need to re-create a minimal metadata with only the required things to\n # successfully emit drop constraints and tables commands\n # for postgres (based on the actual schema of the running instance)\n meta = MetaData()\n tables = []\n all_fkeys = []\n\n for table_name in inspector.get_table_names():\n fkeys = []\n\n for fkey in inspector.get_foreign_keys(table_name):\n if not fkey[\"name\"]:\n continue\n\n fkeys.append(db.ForeignKeyConstraint((), (), name=fkey[\"name\"]))\n\n tables.append(Table(table_name, meta, *fkeys))\n all_fkeys.extend(fkeys)\n\n for fkey in all_fkeys:\n con.execute(DropConstraint(fkey))\n\n for table in tables:\n con.execute(DropTable(table))\n\n trans.commit()", "def drop_all(self):\n self._engine.execute(\n DDL(f\"drop schema if exists {_schema.CUBEDASH_SCHEMA} cascade\")\n )", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def dropdb():\n db.drop_all()", "def drop():\n if prompt_bool(\"Are you sure you want to lose all your data\"):\n db.drop_all()\n db.engine.execute(\"drop table if exists alembic_version\")", "def clear_db():\n for name in TABLES:\n result = execute_query('truncate table {};'.format(name)), ())", "def _drop_db(keep_tables=None):\n server.db.session.remove()\n if keep_tables is None:\n keep_tables = []\n meta = server.db.metadata\n for table in reversed(meta.sorted_tables):\n if table.name in keep_tables:\n continue\n server.db.session.execute(table.delete())\n server.db.session.commit()", "def drop(self):\n c = self.cursor()\n for table in ['experiment','fact']:\n c.execute(\"drop table if exists {}\".format(table))\n self.commit()", "def delete_db():\n db.drop_all()", "def erase_database():\n metadata = MetaData(engine)\n metadata.reflect()\n metadata.drop_all()\n Base.metadata.create_all(engine)\n return None", "def drop(self):\n cursor = self.connect.create_cursor()\n queries = (\n (\"USE dbPurBeurre\"),\n (\"SET foreign_key_checks = 0\"),\n (\"DROP TABLE IF EXISTS Asso_Prod_Cat\"),\n (\"DROP TABLE IF EXISTS Categories\"),\n (\"DROP TABLE IF EXISTS Produits\")\n )\n\n for query in queries:\n cursor.execute(query)", "def drop(self):\n self.__init__()\n cursor = self.connection.cursor()\n cursor.execute(drop_tables)\n queries = cursor.fetchall()\n for i in queries:\n cursor.execute(i[0])\n\n self.commit()\n self.__init__()", "def teardown_schema(self):\n models.Base.metadata.drop_all(self.session.bind)", "def drop_all(self, *args, **kwargs):\n _metadata.drop_all(self._engine, *args, **kwargs)\n _metadata.clear()", "def clean_database(self):\n for name in list(self.database):\n self._remove_database_entry(name)", "def drop_db():\n database.db.reflect()\n database.db.drop_all()\n print('Dropped the database')", "def song_clear():\r\n try:\r\n # Drop all tables then recreate them.\r\n Base.metadata.drop_all(bind=engine)\r\n print colored.red(\"Database cleared successfully.\", bold=12)\r\n Base.metadata.create_all(bind=engine)\r\n except:\r\n session.rollback()", "def db_dropall():\n # db_dropall doesn't work if the models aren't imported\n import_string('models', silent=True)\n for blueprint_name, blueprint in app.blueprints.items():\n import_string('%s.models' % blueprint.import_name, silent=True)\n db.drop_all()", "def drop_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"DROP TABLE tweets\")\n conn.execute(\"DROP TABLE tweet_peaks\")", "def drop_tables():\n drop_table(ShoppingList)\n drop_table(User)\n drop_table(Category)", "def cleanup(self):\n for table in filter(lambda x: self.cmd.exists(x, silent=(log.level < DEBUG)), self.tables):\n log.info(\"MLoad\", \"Dropping table '{}'...\".format(table))\n self.cmd.drop_table(table, silent=True)", "def drop_tables(self):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"\"\"DROP TABLE IF EXISTS users CASCADE\"\"\")\n cursor.close()\n con.commit()\n con.close()", "def _drop_tables(self):\n logging.info(\"Dropping all tables from the database!\")\n db_conn = self.engine.connect()\n query_result = list()\n query_result.append(db_conn.execute(\n \"DROP SCHEMA public CASCADE;CREATE SCHEMA public;\"))\n\n if self.database_choice == 'remote_database' or self.database_choice \\\n == 'remote_database_master':\n query_result.append(db_conn.execute('''\n GRANT ALL PRIVILEGES ON SCHEMA public TO housingcrud;\n GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO housingcrud;\n GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO housingcrud;\n GRANT ALL ON SCHEMA public TO public;\n '''))\n return query_result", "def teardown_db():\n engine = config['tg.app_globals'].sa_engine\n connection = engine.connect()\n\n # INFO - D.A. - 2014-12-04\n # Recipe taken from bitbucket:\n # https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/DropEverything\n\n inspector = reflection.Inspector.from_engine(engine)\n metadata = MetaData()\n\n tbs = []\n all_fks = []\n views = []\n\n # INFO - D.A. - 2014-12-04\n # Sequences are hard defined here because SQLA does not allow to reflect them from existing schema\n seqs = [\n Sequence('seq__groups__group_id'),\n Sequence('seq__contents__content_id'),\n Sequence('seq__content_revisions__revision_id'),\n Sequence('seq__permissions__permission_id'),\n Sequence('seq__users__user_id'),\n Sequence('seq__workspaces__workspace_id')\n ]\n\n for view_name in inspector.get_view_names():\n v = Table(view_name,metadata)\n views.append(v)\n\n for table_name in inspector.get_table_names():\n\n fks = []\n for fk in inspector.get_foreign_keys(table_name):\n if not fk['name']:\n continue\n fks.append(\n ForeignKeyConstraint((),(),name=fk['name'])\n )\n t = Table(table_name,metadata,*fks)\n tbs.append(t)\n all_fks.extend(fks)\n\n if not config['sqlalchemy.url'].startswith('sqlite'):\n for fkc in all_fks:\n connection.execute(DropConstraint(fkc))\n\n for view in views:\n drop_statement = 'DROP VIEW {}'.format(view.name)\n # engine.execute(drop_statement)\n connection.execute(drop_statement)\n\n for table in tbs:\n connection.execute(DropTable(table))\n\n\n for sequence in seqs:\n try:\n connection.execute(DropSequence(sequence))\n except Exception as e:\n logger.debug(teardown_db, 'Exception while trying to remove sequence {}'.format(sequence.name))\n\n transaction.commit()\n connection.close()\n engine.dispose()", "def clean_db():\n yield\n logging.info(\"Delete table\")\n db.delete_table(\"TestRules\")", "def db_drop_and_create_all():\n db.drop_all()\n db.create_all()", "def tearDown(self):\n self.database.truncate_all_tables()", "def drop_all_tables():\n # Taken from http://www.sqlalchemy.org/trac/wiki/UsageRecipes/DropEverything\n conn = engine.connect()\n trans = conn.begin()\n inspector = reflection.Inspector.from_engine(engine)\n metadata = MetaData()\n\n tbs = []\n all_fks = []\n for table_name in inspector.get_table_names():\n fks = []\n for fk in inspector.get_foreign_keys(table_name):\n if not fk['name']:\n continue\n fks.append(ForeignKeyConstraint((), (), name=fk['name']))\n t = Table(table_name, metadata, *fks)\n tbs.append(t)\n all_fks.extend(fks)\n\n for fkc in all_fks:\n conn.execute(DropConstraint(fkc))\n\n for table in tbs:\n conn.execute(DropTable(table))\n\n trans.commit()", "def reset_db():\n db.drop_all()\n _init_db()", "def drop_schema(engine):\n Base.metadata.drop_all(bind=engine)", "def cleanup_database():\n with sqlite3.connect(DB_STRING) as con:\n con.execute(\"DROP TABLE data\")", "def tearDown(self):\n db.session.commit()\n db.drop_all()", "def tearDown(self):\n self.db.drop_all()\n pass", "def tearDown(self):\n db.drop_all()", "def tearDown(self):\n db.drop_all()", "def clear_db(self):\n self.cursor.execute(\"DELETE FROM TrackPoint\")\n self.cursor.execute(\"DELETE FROM Activity\")\n self.cursor.execute(\"DELETE FROM User\")\n self.db_connection.commit()", "def dropall():\n if hasattr(running_app, 'db') and hasattr(running_app.db, 'drop_all'):\n if prompt_bool(\"Are you sure ? You will lose all your data !\"):\n running_app.db.drop_all()", "def clean_all_db():\n for model in [\n Component, Arch, AutoCase, AutoCaseFailure, Bug, Linkage, WorkItem,\n Document, Project, Framework]:\n model.objects.all().delete()", "def tearDown(self):\n\n db.session.rollback()\n db.session.remove()\n db.drop_all()", "def reset_db():\n\n webapp.dbsql.drop_all()\n webapp.dbsql.create_all()\n webapp.dbsql.session.commit()", "def tearDown(self):\n\n InitializeDb('TEST_DATABASE_URI').drop_tables()", "def tearDown(self):\r\n\r\n db.session.rollback()\r\n db.drop_all()", "def empty_db(self):\n try:\n self.cur.execute(\"DELETE FROM Crashes;\")\n self.con.commit()\n print 'Deleted all records'\n\n except sqlite.Error, e:\n print 'Unable to delete all records.'\n print 'Exception follows:'\n print e", "def dropall_cmd():\n drop_all()\n print(\"all tables dropped\")", "def drop_db() -> None:\n \n if os.environ.get('DATABASE_URL').startswith('sqlite:///'):\n sqlite_s, sqlite_f = os.environ.get('DATABASE_URL').split(\"sqlite:///\") \n os.unlink(sqlite_f)\n else: \n Base.metadata.drop_all(bind=engine)", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\", force_drop=True)\n cat.drop_table(\"batting\", force_drop=True)\n cat.drop_table(\"teams\", force_drop=True)", "def drop_all_tables(self):\n\n # Retrieve database name from application config\n app = self.db.app\n mongo_settings = app.config['MONGODB_SETTINGS']\n database_name = mongo_settings['db']\n\n # Flask-MongoEngine is built on MongoEngine, which is built on PyMongo.\n # To drop database collections, we need to access the PyMongo Database object,\n # which is stored in the PyMongo MongoClient object,\n # which is stored in app.extensions['mongoengine'][self]['conn']\n py_mongo_mongo_client = app.extensions['mongoengine'][self.db]['conn']\n py_mongo_database = py_mongo_mongo_client[database_name]\n\n # Use the PyMongo Database object\n for collection_name in py_mongo_database.collection_names():\n py_mongo_database.drop_collection(collection_name)", "def tearDown(self) -> None:\n things.db.session.remove()\n things.db.drop_all()", "def _cleanup_object_tables(self, engine, metadata):\n tables = metadata.tables\n tables_to_drop = [\n table\n for tablename, table in tables.items()\n if not tablename.endswith(\"sf_ids\")\n ]\n if tables_to_drop:\n metadata.drop_all(tables=tables_to_drop)", "def delete_all_records(db):\n with tables(db.engine) as (connection,):\n metadata = sqlalchemy.MetaData(bind=connection)\n metadata.reflect()\n # We delete the tables in order of dependency, so that foreign-key\n # relationships don't prevent a table from being deleted.\n for tbl in reversed(metadata.sorted_tables):\n tbl.delete().execute()", "def deleteAll():\n _table.deleteAll()\n _initialiseGlobals()\n\n return", "def drop_db():\n if prompt_bool(\"Are you sure you want to lose all your data?\"):\n app = create_app(dotenv.get('FLASK_CONFIG'))\n with app.app_context():\n db.drop_all()", "def delete():\n\n from slicr.extensions import db\n\n click.echo('deleting database...')\n\n db.drop_all()", "def tearDown(self):\n db.session.close()\n db.drop_all()", "def drop_data():\n DATABASE['product'].drop()\n DATABASE['customer'].drop()\n DATABASE['rental'].drop()", "def tearDown(self):\n\n # Remove all tables from test db\n db.session.remove()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self) -> None:\n self.things.db.session.remove() # type: ignore\n self.things.db.drop_all() # type: ignore", "def tearDown(self) -> None:\n self.things.db.session.remove() # type: ignore\n self.things.db.drop_all() # type: ignore" ]
[ "0.84615856", "0.8291717", "0.81522286", "0.81405354", "0.8026864", "0.80001897", "0.79949796", "0.79896367", "0.79855984", "0.7966979", "0.7918987", "0.7878273", "0.78614014", "0.7860039", "0.7847825", "0.7836062", "0.78076893", "0.77844214", "0.7772575", "0.77703017", "0.77703017", "0.77703017", "0.77703017", "0.77703017", "0.77703017", "0.77703017", "0.77703017", "0.77703017", "0.7732046", "0.769641", "0.76794064", "0.76726747", "0.7669715", "0.7664657", "0.7662664", "0.7615924", "0.7611969", "0.758113", "0.7577802", "0.7553343", "0.75402284", "0.7473311", "0.74727845", "0.7465661", "0.74419534", "0.74401456", "0.7431881", "0.74181235", "0.7396163", "0.73864305", "0.7339361", "0.72733253", "0.72274244", "0.7226345", "0.721155", "0.7199036", "0.7189522", "0.7189368", "0.71843934", "0.71843934", "0.7160484", "0.71508944", "0.71466136", "0.7134219", "0.71279615", "0.7113617", "0.711306", "0.71080655", "0.7096613", "0.7096296", "0.7093358", "0.7091751", "0.7082326", "0.70767236", "0.7070737", "0.70689857", "0.7066264", "0.706142", "0.70477045", "0.70449746", "0.7041496", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.70100343", "0.7009746", "0.6996512" ]
0.0
-1
Epsilon is used here to avoid conditional code for checking that neither P nor Q is equal to 0.
def KL(P,Q): epsilon = 0.00001 #You may want to instead make copies to avoid changing the np arrays. P = P+epsilon Q = Q+epsilon divergence = np.sum(P*np.log(P/Q)) return divergence
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def epsilon():\n return _EPSILON", "def get_initial_epsilon(self):\n return self.epsilon_percentile, True, self.max_rounds == 0", "def epsilon(self):\n return self.__epsilon", "def epsilon(self):\n return self._epsilon", "def MccEpsilon(self):\n if getattr(self, '_MccEpsilon', None) is None:\n self._MccEpsilon = Utils.sdiag(self.epsilon)\n return self._MccEpsilon", "def epsilon(current_episode, num_episodes):\n # return 1 - (current_episode/num_episodes)\n return .5 * .9**current_episode", "def _define_epsilon(n,T,a=1):\n\n return np.sqrt(np.log(n)/T)*a", "def greedy_eps(self, Q):\r\n s = self.get_state()\r\n s_x, s_y = s[0][0], s[0][1]\r\n s_vx, s_vy = s[1][0], s[1][1]\r\n if np.random.rand() > self.EPS:\r\n print(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n if (np.max(Q[s_x, s_y, s_vx, s_vy, :, :]) ==\r\n np.min(Q[s_x, s_y, s_vx, s_vy, :, :])):\r\n a = (0, 0)\r\n else:\r\n a = np.argmax(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n a = np.unravel_index(a, (3, 3)) - np.array([1, 1])\r\n a = (a[0], a[1])\r\n else:\r\n a = self.action_to_tuple(random.randrange(9))\r\n\r\n return a", "def epsilon_delta(self):", "def __init__(self, epsilon=1e-14):\n self.epsilon = epsilon", "def get_epsilon(self):\n step_size = float(self._eps_begin - self._eps_end) / self._total_steps\n self._epsilon = max(self._eps_end, self._epsilon - step_size)\n return self._epsilon", "def _epsilon(self, step):\n if step < 0:\n return self._start\n elif step > self._steps:\n return self._stop\n else:\n return self._step_size * step + self._start", "def get_epsilon(step: int):\n return (epsilon_0 - epsilon) * math.exp(-step) + epsilon", "def epsilon(dimension):\n return DriverUtils.EPSILON[dimension] if dimension in DriverUtils.EPSILON \\\n else DriverUtils.EPSILON['default']", "def set_epsilon(self,epsilon):\r\n\t\tself.epsilon = epsilon", "def find_reasonable_epsilon(theta0, grad0, logp0, f):\n epsilon = 1.\n r0 = np.random.normal(0., 1., len(theta0))\n\n # Figure out what direction we should be moving epsilon.\n _, rprime, gradprime, logpprime = leapfrog(theta0, r0, grad0, epsilon, f)\n # brutal! This trick make sure the step is not huge leading to infinite\n # values of the likelihood. This could also help to make sure theta stays\n # within the prior domain (if any)\n k = 1.\n while np.isinf(logpprime) or np.isinf(gradprime).any():\n k *= 0.5\n _, rprime, _, logpprime = leapfrog(theta0, r0, grad0, epsilon * k, f)\n\n epsilon = 0.5 * k * epsilon\n\n # acceptprob = np.exp(logpprime - logp0 - 0.5 * (np.dot(rprime, rprime.T) - np.dot(r0, r0.T)))\n # a = 2. * float((acceptprob > 0.5)) - 1.\n logacceptprob = logpprime-logp0-0.5*(np.dot(rprime, rprime)-np.dot(r0,r0))\n a = 1. if logacceptprob > np.log(0.5) else -1.\n # Keep moving epsilon in that direction until acceptprob crosses 0.5.\n # while ( (acceptprob ** a) > (2. ** (-a))):\n while a * logacceptprob > -a * np.log(2):\n epsilon = epsilon * (2. ** a)\n _, rprime, _, logpprime = leapfrog(theta0, r0, grad0, epsilon, f)\n # acceptprob = np.exp(logpprime - logp0 - 0.5 * ( np.dot(rprime, rprime.T) - np.dot(r0, r0.T)))\n logacceptprob = logpprime-logp0-0.5*(np.dot(rprime, rprime)-np.dot(r0,r0))\n\n print(\"find_reasonable_epsilon=\", epsilon)\n\n return epsilon", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n def policyFunction(state):\n\n Action_probabilities = np.ones(num_actions,\n dtype = float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def set_epsilon(value):\n global _EPSILON\n _EPSILON = value", "def multiplicative_epsilon(front, **kargs):\n wobj = numpy.array([ind.fitness.wvalues for ind in front]) * -1\n\n def contribution(i):\n mwobj = numpy.ma.array(wobj)\n mwobj[i] = numpy.ma.masked\n return numpy.min(numpy.max(wobj[i] / mwobj, axis=1))\n\n contrib_values = list(map(contribution, list(range(len(front)))))\n\n # Select the minimum contribution value\n return numpy.argmin(contrib_values)", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n\n def policyFunction(state):\n Action_probabilities = np.ones(num_actions,\n dtype=float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def current_epsilon(self):\n t = self.action_requests\n T = self.exploration_period\n if(t >= T):\n return self.epsilon_final\n\n epsilon0 = self.epsilon_initial\n epsilonT = self.epsilon_final\n\n return epsilon0 - (t * (epsilon0 - epsilonT)) / T", "def get_epsilongreedy_policy(epsilon):\n \n def epsilongreedy_policy(Qvalues_oa):\n \"\"\"Returns softmax action probabilites from Qvalues\"\"\"\n \n X = np.zeros_like(Qvalues_oa)\n \n # where are the actions with maximal value?\n maxX = Qvalues_oa == np.max(Qvalues_oa, axis=-1, keepdims=True)\n \n # assign 1-eps probability to max actions\n X += (1-epsilon) * maxX / maxX.sum(axis=-1, keepdims=True)\n \n # assign eps probability to other actions\n othX = np.logical_not(maxX)\n X += epsilon * othX / othX.sum(axis=-1, keepdims=True)\n \n assert np.allclose(X.sum(-1), 1.0)\n \n return X\n \n return epsilongreedy_policy", "def KB_Dist(P,Q):\r\n \r\n \r\n K=0\r\n Epsilon=0.000001\r\n Q+=Epsilon\r\n P+=Epsilon\r\n for x in range(len(Q)):\r\n K-=P[x]*np.log(Q[x]/P[x])\r\n return K", "def _get_epsilon(self, is_evaluation, power=1.0):\n if is_evaluation:\n return 0.0\n decay_steps = min(self._step_counter, self._epsilon_decay_duration)\n decayed_epsilon = (\n self._epsilon_end + (self._epsilon_start - self._epsilon_end) *\n (1 - decay_steps / self._epsilon_decay_duration) ** power)\n return decayed_epsilon", "def __init__(self, epsilon=1e-7):\n super().__init__()\n self.epsilon = epsilon", "def update_epsilon(self):\n\t\tif self.epsilon > self.epsilon_min:\n\t\t\tself.epsilon *= self.epsilon_decay", "def KeqPrime(self):\n dg0_prime = self.DeltaG0Prime()\n if dg0_prime is None:\n return None\n \n rt = constants.R * constants.DEFAULT_TEMP\n keq = numpy.exp(-dg0_prime / rt)\n return keq", "def epsilon_greedy(Q, epsilon, state):\n random_number = random.random()\n if (random_number < epsilon) and (state not in critical_states):\n return env.action_space.sample()\n\n else:\n return np.argmax(Q[state])", "def epsilon(self, length: int, time: int) -> float:\n return (self.beta ** (1.0 / (length / 2))) ** time", "def _epsilon(vds) -> np.ndarray:\n return vds[\"rhod_tot\"] / vds[\"rho\"]", "def epsilon_greedy_probs(self, nA, Q_s, i_count, eps=None):\r\n epsilon = 1.0 / i_count\r\n if eps is not None:\r\n epsilon = eps\r\n \r\n policy_s = np.ones(nA) * epsilon / nA\r\n policy_s[np.argmax(Q_s)] = 1 - epsilon + (epsilon / nA)\r\n return policy_s", "def squareRootExhaustive(x, epsilon):\n step = espilon**2\n ans = 0.0\n while abs(ans**2 - x) >= epsilon and ans*ans <= x: \n # The ans*ans <= is there because of floating point arithmetic I think.\n ans += step \n if ans*ans > x:\n raise ValueError\n return ans", "def __init__(self, nA=6, gamma=.9, alpha=.9,\n epsilon_start=1, epsilon_decay=.999, epsilon_min=0.25):\n self.nA = nA\n self.Q = defaultdict(lambda: np.zeros(self.nA))\n self.epsilon = epsilon_start\n self.epsilon_decay = epsilon_decay\n self.epsilon_min = epsilon_min\n self.gamma = gamma\n self.alpha = alpha\n print(\"Epsilon: {}, E Decay: {}, E Min: {}, Gamma: {}, Alpha: {}\".format(self.epsilon, self.epsilon_decay, self.epsilon_min, self.gamma, self.alpha))", "def epsilongreedy_policy(Qvalues_oa):\n \n X = np.zeros_like(Qvalues_oa)\n \n # where are the actions with maximal value?\n maxX = Qvalues_oa == np.max(Qvalues_oa, axis=-1, keepdims=True)\n \n # assign 1-eps probability to max actions\n X += (1-epsilon) * maxX / maxX.sum(axis=-1, keepdims=True)\n \n # assign eps probability to other actions\n othX = np.logical_not(maxX)\n X += epsilon * othX / othX.sum(axis=-1, keepdims=True)\n \n assert np.allclose(X.sum(-1), 1.0)\n \n return X", "def deviation_ok(norm, value, epsilon):\n deviation = abs(norm-value)/norm\n # print(abs(d-epsilon))\n return deviation <= epsilon", "def testEpsK1Changes(self):\n with self.test_context() as session:\n initial_eps = 1e-3\n num_classes = 5\n rm = gpflow.likelihoods.RobustMax(num_classes, initial_eps)\n\n expected_eps_k1 = initial_eps / (num_classes - 1.)\n actual_eps_k1 = session.run(rm._eps_K1)\n self.assertAlmostEqual(expected_eps_k1, actual_eps_k1)\n\n new_eps = 0.412\n rm.epsilon.assign(new_eps, session=session)\n expected_eps_k2 = new_eps / (num_classes - 1.)\n actual_eps_k2 = session.run(rm._eps_K1)\n self.assertAlmostEqual(expected_eps_k2, actual_eps_k2)", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n best_action = np.argmax(Q[observation])\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def test_q(self):\n assert np.allclose(self.stepper.q, self.ODE.exact(self.stepper.t), rtol=1e-3, atol=1e-5)", "def GetEpsilonBeer(Abs, conc, pathLength):\n return Abs / (conc * pathLength)", "def update_q(self):\n beta = self.EC_beta\n self.gamma_q = (self.gamma_s - self.gamma_r) * beta + (1 - beta) * self.gamma_q\n self.Sigma_q = (self.Sigma_s - self.Sigma_r) * beta + (1 - beta) * self.Sigma_q\n try:\n assert np.all(np.logical_not(np.isnan(self.gamma_q)))\n except:\n print(\"Invalid update encountered...\")", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n if random.random() < (1 - epsilon):\n return np.argmax(Q[observation])\n else:\n return random.choice(np.arange(nA))\n\n return policy_fn", "def em_epsilon_cdp(epsilon, delta, k):\n if delta <= 0:\n return epsilon / k\n else:\n log_delta = np.log(1 / delta)\n return max(\n epsilon / k,\n np.sqrt((8 * log_delta + 8 * epsilon) / k) -\n np.sqrt(8 * log_delta / k))", "def decay_epsilon(self, epsilon, MIN_EPSILON,\r\n EPSILON_DECAY: float) -> float:\r\n if epsilon > MIN_EPSILON:\r\n epsilon *= EPSILON_DECAY\r\n epsilon = max(MIN_EPSILON, epsilon)\r\n return epsilon", "def eps_greedy(Q, epsilon, num_actions):\n if np.random.uniform(0,1,1) > epsilon:\n action = np.argmax(Q)\n else:\n action = np.random.randint(low=0, high=num_actions)\n \n Q_value = Q[action]\n return action, Q_value", "def AreItemsEqualToWithinEpsilon(Item0, Item1, epsilon=1e-06):\n \n AbsDiff = abs(Item0 - Item1)\n \n if AbsDiff < epsilon:\n IsEqual = True\n else:\n IsEqual = False\n \n return IsEqual", "def __init__(self, eps: float=1e-5):\n self.eps = eps", "def get_epsilon(self, round, abc_history):\n if round > len(abc_history):\n t = np.percentile(abc_history[-1]['distances'], self.epsilon_percentile)\n else:\n t = np.percentile(abc_history[round - 1]['distances'], self.epsilon_percentile)\n return t, False, self.max_rounds and round + 1 == self.max_rounds", "def test_keras_unsafe_min_epsilon():\n model, X, y, Xval, yval = make_small_model()\n\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=False, reduction=tf.losses.Reduction.NONE\n )\n model.compile(loss=loss, optimizer=None)\n\n isDP, msg = safekeras.check_optimizer_is_DP(model.optimizer)\n assert isDP, \"failed check that optimizer is dP\"\n\n model.min_epsilon = 4\n\n model.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n\n DPused, msg = safekeras.check_DP_used(model.optimizer)\n assert (\n DPused\n ), \"Failed check that DP version of optimiser was actually used in training\"\n\n loss, acc = model.evaluate(X, y)\n expected_accuracy = UNSAFE_ACC\n assert round(acc, 6) == round(\n expected_accuracy, 6\n ), \"failed check that accuracy is as expected\"\n\n msg, disclosive = model.preliminary_check()\n correct_msg = (\n \"WARNING: model parameters may present a disclosure risk:\"\n \"\\n- parameter min_epsilon = 4 identified as less than the recommended min value of 5.\"\n )\n\n assert msg == correct_msg, \"failed check correct warning message\"\n assert disclosive is True, \"failed check disclosive is True\"", "def assertEqualEpsilon(self, first, second, msg=None):\n\n def epsilonCompare(value):\n return abs(value) <= epsilon\n\n comparison = map(epsilonCompare, (first - second))\n return self.assertTrue(all(comparison), msg)", "def additive_epsilon(front, **kargs):\n wobj = numpy.array([ind.fitness.wvalues for ind in front]) * -1\n\n def contribution(i):\n mwobj = numpy.ma.array(wobj)\n mwobj[i] = numpy.ma.masked\n return numpy.min(numpy.max(wobj[i] - mwobj, axis=1))\n\n contrib_values = list(map(contribution, list(range(len(front)))))\n\n # Select the minimum contribution value\n return numpy.argmin(contrib_values)", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n\n def policy_fn(observation):\n\n # get random number\n random_number = random.uniform(0, 1)\n\n # get actions with maximum value\n greedy_actions = np.argwhere(Q[observation] == np.amax(Q[observation])).squeeze()\n if not len(greedy_actions.shape):\n greedy_actions = [greedy_actions]\n action = random.choice(greedy_actions)\n\n # if number less than epsilon, get random other actions\n if random_number <= epsilon:\n all_actions = list(range(0, nA))\n if not len(greedy_actions) == nA:\n action = random.choice(all_actions)\n\n return int(action)\n\n return policy_fn", "def epsilon_greedy(q, s, eps = 0.5):\n if random.random()<eps:\n return uniform_dist(q.actions).draw()\n else:\n return greedy(q,s)", "def calc_epsilon(self, state_number, evaluation=False):\n if evaluation:\n return self.eps_evaluation\n elif state_number < self.replay_buffer_start_size:\n return self.eps_initial\n elif self.replay_buffer_start_size <= state_number < self.replay_buffer_start_size + self.eps_annealing_states:\n return self.slope * state_number + self.intercept\n elif state_number >= self.replay_buffer_start_size + self.eps_annealing_states:\n return self.slope_2 * state_number + self.intercept_2", "def _sigma_ee_nonrel(self,gam,eps):\n s0 = 4 * r0**2 * alpha / (15 * eps)\n x = 4 * eps / (gam**2 - 1)\n sigma_nonrel = s0 * self._F(x,gam)\n sigma_nonrel[np.where(eps >= 0.25*(gam**2 - 1.))] = 0.0\n sigma_nonrel[np.where(gam*np.ones_like(eps) < 1.0)] = 0.0\n return sigma_nonrel / mec2_unit", "def make_epsilon_greedy_policy(self, Q, epsilon, nA):\n\n def policy_fn(observation,p):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q=Q(observation,p)\n\n best_action = np.argmax(q)\n print(\"action called:\",self.env.action_labels[best_action])\n A[best_action] += (1.0 - epsilon)\n return A\n\n return policy_fn", "def __init__(self, p=1.5, eps=1e-8):\n assert 1 < p < 2, \"make sure 1 < p < 2\" \n self.p, self.eps = p, eps", "def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err", "def __init__(self, alpha=0.5, epsilon=0.1):\n self.q = dict()\n self.alpha = alpha\n self.epsilon = epsilon", "def find_epsilon(self, ltarget):\n\n dnu = self.find_large_separation()\n one = n = nu = 0.0\n for i in range(len(self.modes)):\n if (self.modes['l'][i] != ltarget): continue\n one += 1.0\n n += self.modes['n'][i]\n nu += self.modes['freq'][i]\n if (one == 0.0):\n return 0.0\n else:\n return (nu/dnu-n)/one", "def update_parameter(self):\n\n if self.testing: # 1. No random choice when testing\n self.epsilon = 0\n else: # 2. Update parameters when learning\n if self.epsilon > 0.:\n self.epsilon -= 0.01\n\n return self.epsilon", "def test_validate_epsilon():\n with pytest.raises(ValueError):\n validate_epsilon([0.1], 2)\n with pytest.raises(ValueError):\n validate_epsilon([-0.1, 1], 2)\n\n assert (validate_epsilon(0.1, 2) == np.array([0.1, 0.1])).all()\n assert (validate_epsilon([0.1, 0.1], 2) == np.array([0.1, 0.1])).all()\n assert (validate_epsilon(np.array([0.1, 0.1]), 2) == np.array([0.1, 0.1])).all()", "def metropolis ( delta ):\n\n import numpy as np\n \n exponent_guard = 75.0\n\n if delta > exponent_guard: # Too high, reject without evaluating\n return False\n elif delta < 0.0: # Downhill, accept without evaluating\n return True\n else:\n zeta = np.random.rand() # Uniform random number in range (0,1)\n return np.exp(-delta) > zeta # Metropolis test", "def get_probs(Q_s, epsilon, nA):\n policy_s = np.ones(nA) * epsilon / nA\n best_a = np.argmax(Q_s)\n policy_s[best_a] = 1 - epsilon + (epsilon / nA)\n return policy_s", "def epsilon_greedily_update_policy(self, current_Q, iterations_completed):\n iteration = iterations_completed+1\n # epsilon = min(1/np.log(iterations_completed+.0001),1)\n # epsilon = 1/iteration\n epsilon = 0.1\n def new_policy(state):\n heads = True if random.random() < epsilon else False # Flip our epsilon greedy coin\n if heads: # If heads comes up, choose random action\n return random.randint(0, self.nA-1)\n else: # If tails comes up, choose greedy option\n return np.argmax(current_Q[state]['Q(s,a)'])\n return new_policy", "def call_epsilon_GaN(l_onde):\n v=1e4/l_onde\n epsinfE = 5.04\n epsinfA = 5.01\n wLE = 742.1\n wLA = 732.5\n wTE = 560.1\n wTA = 537\n gLE = 3.8\n gLA = 4 \n \n epsilonE = epsinfE*(1+(wLE**2-wTE**2)/(wTE**2-v**2-1j*gLE*v))\n epsilonA = epsinfA*(1+(wLA**2-wTA**2)/(wTA**2-v**2-1j*gLA*v))\n \n return epsilonE,epsilonA", "def isSmall(number):\n return abs(number) < epsilon", "def determine_threshold(yval,pval):\n\n F1 = 0\n epsilon = 0\n for _epsilon in np.linspace(min(pval),max(pval),1000):\n ## Compute stats\n _F1,stats = evaluate_epsilon(yval,pval,_epsilon)\n\n if _F1 > F1:\n F1 = _F1\n epsilon = _epsilon\n print(\"Better threshold found! {} ==> F1 {}\".format(epsilon,F1))\n \n return epsilon, F1", "def find_epsilon(self, inverse=False):\n e_s = self.e_s if not inverse else self.e_s_inv\n\n max_score = -10000000\n\n for z in np.arange(2.5, self.sd_lim, 0.5):\n epsilon = self.mean_e_s + (self.sd_e_s * z)\n\n pruned_e_s = e_s[e_s < epsilon]\n\n i_anom = np.argwhere(e_s >= epsilon).reshape(-1,)\n buffer = np.arange(1, self._error_buffer)\n i_anom = np.sort(np.concatenate((i_anom,\n np.array([i+buffer for i in i_anom])\n .flatten(),\n np.array([i-buffer for i in i_anom])\n .flatten())))\n i_anom = i_anom[(i_anom < len(e_s)) & (i_anom >= 0)]\n i_anom = np.sort(np.unique(i_anom))\n\n if len(i_anom) > 0:\n # group anomalous indices into continuous sequences\n groups = [list(group) for group\n in mit.consecutive_groups(i_anom)]\n E_seq = [(g[0], g[-1]) for g in groups if not g[0] == g[-1]]\n\n mean_perc_decrease = (self.mean_e_s - np.mean(pruned_e_s)) \\\n / self.mean_e_s\n sd_perc_decrease = (self.sd_e_s - np.std(pruned_e_s)) \\\n / self.sd_e_s\n score = (mean_perc_decrease + sd_perc_decrease) \\\n / (len(E_seq) ** 2 + len(i_anom))\n\n # sanity checks / guardrails\n if score >= max_score and len(E_seq) <= 5 and \\\n len(i_anom) < (len(e_s) * 0.5):\n max_score = score\n if not inverse:\n self.sd_threshold = z\n self.epsilon = self.mean_e_s + z * self.sd_e_s\n else:\n self.sd_threshold_inv = z\n self.epsilon_inv = self.mean_e_s + z * self.sd_e_s", "def _compute_rdp(q: float, sigma: float, alpha: float) -> float:\n if q == 0:\n return 0\n\n # no privacy\n if sigma == 0:\n return np.inf\n\n if q == 1.0:\n return alpha / (2 * sigma ** 2)\n\n if np.isinf(alpha):\n return np.inf\n\n return _compute_log_a(q, sigma, alpha) / (alpha - 1)", "def epsilonEffective(epsilon1=0.9, epsilon2=0.9):\n result=1/(1/epsilon1+1/epsilon2-1)\n return result", "def __init__(self, eps: float = 1e-5):\n if eps <= 0.0:\n raise ValueError(\"The epsilon value must be positive\")\n super().__init__()\n self.register_buffer('eps', torch.tensor(eps))", "def play_epsilon_greedy_policy(self, board):\n policy = np.random.choice(['random', 'q_agent'], 1, p=[self.epsilon, 1-self.epsilon])[0]\n if policy == 'random':\n move, _ = RandomAgent.play(board)\n else:\n move, q_value = self.play(board)\n self.after_move()\n return move, policy", "def calc_epsilon(y_true, y_pred, weights):\n return float(np.dot(weights, y_pred == y_true))", "def test_grade_infinity(self):\r\n\r\n sample_dict = {'x': (1, 2)}\r\n\r\n # Test problem\r\n problem = self.build_problem(sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=\"1%\",\r\n answer=\"x\")\r\n # Expect such a large answer to be marked incorrect\r\n input_formula = \"x*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")\r\n # Expect such a large negative answer to be marked incorrect\r\n input_formula = \"-x*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")", "def makeEpsilonTransition(self, currentStates):\n nextStates = self.makeTransition(currentStates, '$', True)\n #if epsilon transition did not occur or it started an infitine loop\n if not nextStates or nextStates == currentStates:\n return currentStates #end recursion\n else:\n return nextStates.union(self.makeEpsilonTransition(nextStates))", "def eps_greedy_policy(q_values, eps, forbidden_actions):\r\n\r\n q_values[forbidden_actions] = np.NINF\r\n indices = torch.nonzero(q_values == q_values.max())\r\n random_index = random.randint(0, indices.shape[1]-1)\r\n best_action_index = indices[random_index]\r\n l = len(q_values)\r\n n_forbidden_actions = np.count_nonzero(forbidden_actions)\r\n p = eps / (l-n_forbidden_actions)\r\n\r\n policy = np.full([l], p)\r\n policy[forbidden_actions] = 0\r\n policy[best_action_index] += 1 - eps\r\n\r\n return policy", "def calc_alpha(epsilon): \n return float(0.5 * np.log((1-epsilon)/epsilon))", "def _epsilon_greedy(self, info_state, legal_actions, epsilon):\n probs = np.zeros(self._num_actions)\n if np.random.rand() < epsilon:\n action = np.random.choice(legal_actions)\n probs[legal_actions] = 1.0 / len(legal_actions)\n else:\n info_state = np.reshape(info_state, [1, -1])\n q_values = self._session.run(\n self._q_values, feed_dict={self._info_state_ph: info_state})[0]\n legal_q_values = q_values[legal_actions]\n action = legal_actions[np.argmax(legal_q_values)]\n probs[action] = 1.0\n return action, probs", "def checkpsq(i):\n min=int(math.floor(math.sqrt(i)))\n max=int(math.ceil(math.sqrt(i)))\n if(i==min*min or i==max*max):\n return True", "def update_epsilon(self):\n self.epsilon = self.epsilon * self.decay", "def perplexity(p: np.ndarray, q: np.ndarray, eps: float = 1e-10) -> List[float]:\n kl_div_pq = kl_div(p, q, eps)[0]\n perplexity_pq = np.exp(-kl_div_pq)\n return [perplexity_pq]", "def make_epsilon_greedy_policy(estimator, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(observation)\n# print(q_values)\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def __init__(self, nA=6):\n self.nA = nA\n self.Q = defaultdict(lambda: np.zeros(self.nA))\n self.gamma=1.0\n self.alpha=0.2 #this can be potentially changed\n self.epsilon=1.0\n self.eps_start=1.0\n self.eps_decay=.9\n self.eps_min=0.0005", "def _check_epsilon_delta(epsilon_delta):\n if len(epsilon_delta) != 2:\n raise ValueError(\"epsilon_delta parameter should be a tuple with \"\n \"two elements, but {} were given.\"\n .format(len(epsilon_delta)))\n if epsilon_delta[0] < 0:\n raise ValueError(\"Epsilon has to be greater than zero.\")\n if epsilon_delta[1] < 0:\n raise ValueError(\"Delta has to be greater than 0 and less than 1.\")", "def get_probs(self,Q_s, epsilon, nA):\n policy_s = np.ones(nA) * epsilon / nA\n best_a = np.argmax(Q_s)\n policy_s[best_a] = 1 - epsilon + (epsilon / nA)\n return policy_s", "def eps_neighborhood(M, p, q, eps):\n if distance(M, p, q) == 0: # zwraca false przy dystansie miedzy jednakowymi punktami np 1 i 1\n return False\n return distance(M, p, q) < eps # jesli dystans miedzy punktami jest mniejszy niz eps to zwraca true", "def _epsilon_eval(z, A, ord=2):\n z=np.array(z)\n A=np.array(A)\n zc = complex(z[0], z[1])\n try :\n ep = 1/spl.norm(spl.inv(zc*np.eye(*A.shape)-A),ord=ord)\n # ep = spl.norm(zc*np.eye(*A.shape)-A,ord=ord)\n except TypeError:\n if ord==\"svd\":\n ep = np.min(spl.svdvals(zc*np.eye(*A.shape)-A))\n else: raise Exception(\"invalid method\")\n return ep", "def test_is_prime_invalid(self):\n sol = solution.Solution();\n self.assertFalse(sol.isPrime(1))\n self.assertFalse(sol.isPrime(4))\n self.assertFalse(sol.isPrime(6))\n #self.assertFalse(sol.isPrime(864))", "def test_quality_lt_zero(self):\n with pytest.raises(StateError):\n State(substance=\"water\", x=Q_(-1.0, \"dimensionless\"), p=Q_(101325, \"Pa\"))", "def call_epsilon_AlN(l_onde):\n v=1e4/l_onde\n epsinfE = 4.160\n epsinfA = 4.350\n wLE = 909.6\n wLA = 888.9\n wTE = 667.2\n wTA = 608.5\n g = 2.2\n \n epsilonE = epsinfE*(1+(wLE**2-wTE**2)/(wTE**2-v**2-1j*g*v))\n epsilonA = epsinfA*(1+(wLA**2-wTA**2)/(wTA**2-v**2-1j*g*v))\n \n return epsilonE,epsilonA", "def eps(self):\n return self.eps_mask*self.eps_scheduler.value", "def Q1_test():\n A, p1, p2 = [0,0], [2,4], [6,5]\n return (distance(A,p1) > 4.472135) and (distance(p1,p2) < 4.472136)", "def confirm_Alex_fast(p, q, r):\n if p*q*r < 0:\n return False\n if q*r + p*r + p*q -1 == 0:\n return True\n else:\n return False", "def test_ge_1():\n a = FixedPoint(1, 'Q2.8')\n assert a > 0.9", "def test_qing(self):\n fun = get_problem('qing', self.dimension, -500, 500)\n self.assertAlmostEqual(fun(self.array10), 584.0, delta=1e-4)", "def epsilon_decreasing(options, iteration, rate=1., reverse=False):\n e = np.exp(-steps * rate)\n return epsilon(options, e=e, reverse=reverse)", "def pvalue_test(self, alpha=0.01):\n CL = int((1-alpha)*100) # confidence level\n \n if self.p_value < alpha:\n print(\"Null hypothesis rejected at {:d}%CL => distributions are different\".format(CL))\n else:\n print(\"Null hypothesis NOT rejected => distributions are the same\")", "def eps(self):\n return self._eps", "def fabs(x):\n return 0.0", "def littlewood_paley_condition(lp_sum):\n all_elems_lq_one = np.all(lp_sum <= 1)\n epsilons = 1 - lp_sum\n epsilon = np.max(epsilons)\n average_epsilon = np.average(epsilons)\n\n return all_elems_lq_one, epsilon, average_epsilon", "def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n print(f'q_values: {q_values}')\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn" ]
[ "0.71985364", "0.6888928", "0.6771035", "0.6715826", "0.6521258", "0.64338565", "0.63468957", "0.63350797", "0.6333442", "0.6308612", "0.62801576", "0.6250783", "0.62459266", "0.6223243", "0.6196995", "0.6183421", "0.61769325", "0.6173163", "0.61617094", "0.61539805", "0.61403835", "0.61224276", "0.6121558", "0.60432017", "0.60354793", "0.60113376", "0.60065085", "0.5995425", "0.59922904", "0.5991275", "0.5982108", "0.5958562", "0.5947242", "0.5934432", "0.5926942", "0.5894289", "0.5880271", "0.5821699", "0.5816076", "0.5794661", "0.57822156", "0.57763726", "0.57749", "0.57721287", "0.57631505", "0.57173705", "0.57135534", "0.5699425", "0.5697889", "0.5683326", "0.5683176", "0.56745195", "0.5669321", "0.5666892", "0.5659923", "0.5632832", "0.562825", "0.5626064", "0.5624641", "0.5612427", "0.56060886", "0.55829346", "0.5582602", "0.55825466", "0.5577389", "0.55735564", "0.557264", "0.55713415", "0.5563734", "0.5539521", "0.5535305", "0.55349374", "0.5534629", "0.55212605", "0.550851", "0.55043036", "0.55038786", "0.5502868", "0.5498631", "0.5497381", "0.5495726", "0.5495015", "0.54833007", "0.5466929", "0.54535997", "0.544007", "0.54365253", "0.543613", "0.5425343", "0.5417582", "0.5416348", "0.5413093", "0.5395564", "0.53921837", "0.53916454", "0.5389541", "0.5388106", "0.5384719", "0.5384275", "0.5379222", "0.53723127" ]
0.0
-1
Returns the item with the highest valued key
def pop(self): if len(self.heap)==0: raise ValueError("Tried popping empty heap") return heapq.heappop(self.heap)[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_key(self):\n return self._price_list[-1]", "def key_of_max(d):\n keys = list(d.keys())\n keys.sort()\n return max(keys, key=lambda x: d[x])", "def keywithmaxval(d):\n\treturn max(d, key=lambda k: d[k])", "def max_key (dict):\n output = -1\n for key, value in dict.items():\n output = max(output, key)\n return output", "def max(self, key=lambda _: _):\n return max(self, key=key)", "def MostPopularKey(d, default):\n x = [(v, k) for (k, v) in d.iteritems()]\n if not x: return default\n x.sort()\n return x[-1][1]", "def get_max_key(dico):\n our_max = 0\n argmax = None\n for key, val in dico.items():\n if val > our_max:\n argmax = key\n our_max = val\n return argmax", "def find_max_key_val(value):\n most_sold = max(value, key=value.get)\n keys_val = value.get(most_sold)\n print(f\"We can see the highest count is for {most_sold},\")\n print(f\"with a total of {keys_val} sale(s).\")\n return keys_val", "def keywithmaxval(dictionary): # from https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary/12343826#12343826 \n\tv=list(dictionary.values())\n\tk=list(dictionary.keys())\n\treturn k[v.index(max(v))]", "def most_recent_use_key(self):\n l = [x for x in self.key_tracker.keys()]\n self.most_use_key = l[0]\n most_use = self.key_tracker.get(self.most_use_key)\n for x in l:\n recent_use = self.key_tracker.get(x)\n if recent_use > most_use:\n most_use = recent_use\n self.most_use_key = x\n return self.most_use_key", "def getMaxKey(self):\n print self.freq\n if self.freq:\n max_freq = max(self.freq.keys())\n return list(self.freq[max_freq])[0]\n\n return ''", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def argMax(self):\n if len(self.keys()) == 0: return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def argMax(self):\n if len(list(self.keys())) == 0:\n return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def getMaxKey(self) -> str:\n if self.buckets.empty():\n return \"\"\n return iter(self.buckets.back().keys).__next__()", "def getMaxKey(self):\n if self.last is None:\n return \"\"\n return self.last.first.key", "def keywithmaxval(d): \r\n v=list(d.values())\r\n k=list(d.keys())\r\n return k[v.index(max(v))]", "def largest_item(list):\n pass", "def keymaxval (dictionary):\n values = list (dictionary.values())\n return list(dictionary.keys())[values.index(max(values))]", "def argmax(d):\n return max(d.iteritems(), key=operator.itemgetter(1))", "def find_max(self):\r\n maxVal = self.items[1]\r\n if maxVal is None:\r\n return None\r\n \r\n for i in range(1,len(self.items)):\r\n if self.items[i] is not None:\r\n if self.items[i] > maxVal:\r\n maxVal = self.items[i]\r\n return maxVal", "def get_key_with_max_value(dictionary):\n values = list(dictionary.values())\n keys = list(dictionary.keys())\n return keys[values.index(max(values))]", "def keywithmaxval(kwmv_dict):\n values = list(kwmv_dict.values())\n keys = list(kwmv_dict.keys())\n return keys[values.index(max(values))]", "def argmax(table):\n return max((v,k) for k,v in table.iteritems())[1]", "def findHighestPkPair(x, pkp): #{\n vrbMsg(5, 'findHighestPkPair() x = [...], pkp = ' + str(pkp))\n mi = [0,0]\n mv = x[pkp[0]]\n for i in range(1, len(pkp)): #{\n nv = x[pkp[i]]\n if(nv > mv): #{\n mi[0] = i\n mv = nv\n #}\n #}\n mv = None\n for i in range(0, len(pkp)): #{\n if(i != mi[0]): #{\n nv = x[pkp[i]]\n if((mv is None) or (nv > mv)): #{\n mi[1] = i\n mv = nv\n #}\n #}\n #}\n if(mi[0] == mi[1]): #{\n mi = [mi[0]]\n elif(mi[0] > mi[1]): #{\n mi = [mi[1], mi[0]]\n #}\n vrbMsg(5, 'findHighestPkPair() mi = ' + str(mi))\n return mi", "def max(self):\n if not self.root:\n return None\n\n node, parent = Treap._traverse(self.root, 'right')\n return node.key", "def max(self):\n return self.get_first()", "def greatest_product_one(self, key):\n return self.greatest_product(key)[0]", "def max_pk(self):\n end = self.execute(self.commands.max_pk(self.name, self.primary_key_column))\n return end[0][0]", "def find_max_key_val_in_dict(in_dict):\n\tmax_key = None\n\tmax_val = -np.inf\n\tfor key,val in in_dict.iteritems():\n\t\tif val >= max_val:\n\t\t\tmax_val = val\n\t\t\tmax_key = key\n\treturn (max_key,max_val)", "def getMaxKey(self) -> str:\n if self.tail.prev.val == 0:\n return \"\"\n return next(iter(self.tail.prev.keys))", "def get_highest_priority(self):\n for i in self.query.index.values.tolist():\n if not int(self.query.loc[i,'in_%s'%self.program]):\n pick = self.query.loc[i]\n break\n return pick", "def __argmax(l: list, key):\n max = float('-inf')\n max_i = -1\n for i in range(len(l)):\n if key(l[i]) > max:\n max = key(l[i])\n max_i = i\n return max_i", "def get_max(self):\n if self.root is None: # BC1\n return float('-inf')\n\n current = self.root\n while current.right is not None: # Traverse like a linked-list\n current = current.right\n\n return current.key", "def getHighest(key, values, num):\n assert isinstance(key, list)\n assert isinstance(values, list)\n assert isinstance(num, int)\n key, values = getSorted(key, values)\n newKey = key[:num]\n newValue = values[:num]\n return newKey, newValue", "def max(self):\n if self.right is None:\n return self.item\n else:\n return self.right.max()", "def max(self, include_zero=False):\n for key, value in reversed(self.items()):\n if value > 0 or include_zero:\n return key", "def getMaxKey(self):\n if self.head.next_cl and self.head.next_cl != self.tail:\n return self.head.next_cl._next.key\n else:\n return \"\"", "def getMaxKey(self):\n return self.tail.prev.key if self.tail.prev != self.head else \"\"", "def getMaxKey(self):\n if self.tail.pre == self.head:\n return \"\" \n else:\n key = self.tail.pre.keySet.pop()\n self.tail.pre.keySet.add(key)\n return key", "def getNextHighest(self):\r\n maxScore = -1\r\n idx = -1\r\n for i, s in enumerate(self.scores):\r\n if s.score > maxScore:\r\n maxScore = s.score\r\n idx = i\r\n if idx != -1:\r\n score = self.scores[idx]\r\n del self.scores[idx]\r\n return score\r\n else:\r\n return None", "def test_get_max_key_entry(self):\n order_dict = OrderDict()\n\n order_2 = StockOrderWrapper(self.order_2)\n order_3 = StockOrderWrapper(self.order_3)\n order_5 = StockOrderWrapper(self.order_5)\n order_7 = StockOrderWrapper(self.order_7)\n\n order_2.stock_order.order_status = DEFINITIVE\n order_3.stock_order.order_status = DEFINITIVE\n order_5.stock_order.order_status = DEFINITIVE\n order_7.stock_order.order_status = DEFINITIVE\n\n order_dict.add_order(1.125, order_2)\n order_dict.add_order(10.321, order_3)\n order_dict.add_order(1.4, order_5)\n order_dict.add_order(9.321, order_7)\n\n # =================================================================\n # test: max_key is created\n # =================================================================\n\n max_key_entry = order_dict.get_max_key_entry()\n exp_entry = [order_5]\n self.assertEqual(max_key_entry, exp_entry)\n\n # =================================================================\n # test: max_key is updated after remove order\n # =================================================================\n\n # order_dict.remove_order(key=.4, order=order_5)\n order_dict.remove_max_key()\n max_key_entry = order_dict.get_max_key_entry()\n exp_entry = [order_3, order_7]\n self.assertEqual(max_key_entry, exp_entry)\n\n # =================================================================\n # test: max_key is updated after remove entry\n # =================================================================\n\n # order_dict.remove_entry(key=.321)\n order_dict.remove_max_key()\n max_key_entry = order_dict.get_max_key_entry()\n exp_entry = [order_2]\n self.assertEqual(max_key_entry, exp_entry)", "def get_max_key(data):\n return max(map(len, data))", "def mle(self):\n\n\t\tmax_key, max_value = None, 0\n\t\tfor key, value in self.items():\n\t\t\tif value > max_value:\n\t\t\t\tmax_key, max_value = key, value\n\n\t\treturn max_key", "def produce(self, key=lambda x: 1.0):\n return max(self.data[0], key=key)", "def get_largest_id(self):\n try:\n cur = self.conn.execute(\"\"\"SELECT MAX(id) FROM todo;\"\"\")\n row = cur.fetchone()\n if row[0] == None:\n return 0\n else:\n return row[0]\n except Exception as e:\n print(e)", "def get_highest(self, test):\n return", "def __getitem__(self, key):\n if key not in self._data:\n raise KeyError(key)\n versions = self._data[key]\n return versions[max(versions)], max(versions)", "def last_key(self):\n return self._last_key", "def getMaxKey(self) -> str:\n return \"\" if self.tail.prev == self.head else next(iter(self.tail.prev.keySet))", "def maxKey(analyzer):\n return om.maxKey(analyzer['dateIndex'])", "def maxKey(analyzer):\n return om.maxKey(analyzer['dateIndex'])", "def find_largest_id():\n max_id_val= 0\n for event in Event.query.all():\n if event.id > max_id_val:\n max_id_val = event.id\n return max_id_val", "def find_max(self):\n if self.right:\n return self.right.find_max()\n return self.data", "def take_max(self):\n return self.delete_first()", "def longest_key(self):\n longest = None\n for key in self:\n if not longest or len(key) > len(longest):\n longest = key\n return longest", "def most_stable():\n \n \n \n \n return Z", "def latest(scores: list) -> int:\n return scores[-1]", "def get_latest(self, key):\n # Check connection\n self._checkInit()\n \n # Construct the query\n query = \"\"\"SELECT \n date_format(max(str_to_date(concat(year,',',month,',',day,',',hour),'%Y,%c,%e,%k')),'%Y-%m-%d-%H')\n FROM {} WHERE `key`='{}'\"\"\".format(\n self.table,\n key)\n\n #logging.debug(\"query: \\\"{}\\\"\".format(query))\n\n # Get Connection\n cnx = self.getConnection()\n cur = cnx.cursor()\n cur.execute(query)\n retval = None\n for fields in cur:\n retval = fields[0]\n break\n cur.close()\n cnx.close()\n return retval", "def get_highest_id(self):\n\n return self.mint.get_highest_id()", "def get_max_item(self):\n return self._get_page('maxitem').json()", "def datastore_most_space(cls, container, cluster):\n obj = Query.get_obj(container, cluster)\n datastores = {}\n\n if hasattr(obj, 'datastore'):\n for datastore in obj.datastore:\n # if datastore is a VMware File System\n if datastore.summary.type == 'VMFS':\n free = int(datastore.summary.freeSpace)\n datastores.update({datastore.name:free})\n\n\n most = max(datastores.values())\n for key, value in datastores.items():\n if value == most:\n return key\n return None", "def find_max(self):\n\n if self.right:\n return self.right.find_max()\n\n return self.data", "def _update_max(self):\n tmp = self\n while tmp.right is not None:\n tmp = tmp.right\n return tmp.parent.key", "def max(self, fn=lambda x: x):\n return _(max(*self._, key=fn))", "def get_most_valuable(self):\n return self.most_valuable", "def getlast(self, key, default=None):\n \n values = self.getlist(key)\n return values[-1] if values else default", "def test_perf_max():\n dict_time = timeit.timeit(\n \"max(keys_dict.keys())\",\n setup=\"keys_dict = {key: key for key in range(1000, -1000, -1)}\",\n number=1000\n )\n dict_sort_time = timeit.timeit(\n \"sorted(keys_dict.keys())[-1]\",\n setup=\"keys_dict = {key: key for key in range(1000, -1000, -1)}\",\n number=1000\n )\n tree_time = timeit.timeit(\n \"keys_tree.max()\",\n setup=\"from amp_trees import OrderedTreeDict;\"\n \"keys_tree = OrderedTreeDict((key, key) for key in range(1000, -1000, -1))\",\n number=1000\n )\n assert dict_time > tree_time, \"Max method is slow.\"\n assert dict_sort_time > tree_time, \"Max method is slow.\"", "def deep_max(self):\r\n node = self\r\n while not node.is_leaf():\r\n node = node.children[-1]\r\n return node.keys[-1] if node.keys else None", "def test_perf_max(self):\n dict_time = timeit.timeit(\n \"max(keys_dict.keys())\",\n setup=\"from random import sample;\"\n \"keys_dict = {key: key for key in sample(range(-1000, 1000), 2000)}\",\n number=1000\n )\n dict_sort_time = timeit.timeit(\n \"sorted(keys_dict.keys())[-1]\",\n setup=\"from random import sample;\"\n \"keys_dict = {key: key for key in sample(range(-1000, 1000), 2000)}\",\n number=1000\n )\n tree_time = timeit.timeit(\n \"keys_tree.max()\",\n setup=\"from amp_trees import SplayDict;\"\n \"from random import sample;\"\n \"keys_tree = SplayDict((key, key) for key in sample(range(-1000, 1000), 2000))\",\n number=1000\n )\n self.assertGreater(dict_time, tree_time, \"Max method is slow.\")\n self.assertGreater(dict_sort_time, tree_time, \"Max method is slow.\")", "def max(self):\n try:\n val = iinfo._max_vals[self.key]\n except KeyError:\n if self.kind == 'u':\n val = int((1 << self.bits) - 1)\n else:\n val = int((1 << (self.bits-1)) - 1)\n iinfo._max_vals[self.key] = val\n return val", "def max(self):\n try:\n val = iinfo._max_vals[self.key]\n except KeyError:\n if self.kind == 'u':\n val = int((1 << self.bits) - 1)\n else:\n val = int((1 << (self.bits-1)) - 1)\n iinfo._max_vals[self.key] = val\n return val", "def maxLike(self):\n return max(self.d.values())", "def v10_multimax(iterable, key=lambda x: x):\n max_key = None\n maximums = []\n for item in iterable:\n k = key(item)\n if k == max_key:\n maximums.append(item)\n elif not maximums or k > max_key:\n maximums = [item]\n max_key = k\n return maximums", "def best_score(my_dict):\n # Make sorted list\n if my_dict:\n return(sorted(my_dict)[-1])", "def get_max(self):\n return self.max[-1]", "def v9_multimax(iterable, key=None):\n if key is None:\n def key(item): return item\n max_key = None\n maximums = []\n for item in iterable:\n k = key(item)\n if k == max_key:\n maximums.append(item)\n elif not maximums or k > max_key:\n maximums = [item]\n max_key = k\n return maximums", "def best_score(a_dictionary):\n for key in a_dictionary:\n if key is None:\n return None\n else:\n max_val = max(a_dictionary)\n return max_val", "def get_largest_batch(self):\n try:\n with Transaction().start(DBNAME, 1):\n purchase_list = self.Purchase.search([], order=(('batch_number', 'DESC'),))\n batch = tuple(i.batch_number for i in purchase_list if i.batch_number)\n if batch:\n return batch[0]\n else:\n return None\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return None", "def best(self):\n if len(self) == 0:\n return None\n return max_elems(self, key=attr.getter(\"value\"), gt=self.solver.sense.is_better)[0]", "def get_longest_item(self,items):\n # Assume longest is initially zero\n longest = 0\n for item in items:\n # get length of item name\n length = len(item[ITEM])\n if length > longest:\n longest = length\n return longest", "def test_find_max_seat_id():\n data = [\n {\"seat_id\": 100},\n {\"seat_id\": 101},\n {\"seat_id\": 99},\n ]\n assert find_max_seat_id(data) == 101", "def get_stock_symbol_with_highest_cap():\n #highest_cap = 0\n highest_cap_stock = max(data, key=lambda counter: _cap_str_to_mln_float(counter['cap']))\n #for counter in data:\n # if _cap_str_to_mln_float(counter['cap']) > highest_cap:\n # highest_cap_stock = counter['symbol']\n # highest_cap = _cap_str_to_mln_float(counter['cap'])\n return highest_cap_stock['symbol']", "def get_max_pk_values(cursor, catalog_entry):\n database_name = common.get_database_name(catalog_entry)\n escaped_db = common.escape(database_name)\n escaped_table = common.escape(catalog_entry.table)\n\n key_properties = common.get_key_properties(catalog_entry)\n escaped_columns = [common.escape(c) for c in key_properties]\n\n sql = \"\"\"SELECT {}\n FROM {}.{}\n ORDER BY {}\n LIMIT 1\n \"\"\"\n\n select_column_clause = ', '.join(escaped_columns)\n order_column_clause = ', '.join([pk + ' DESC' for pk in escaped_columns])\n\n cursor.execute(sql.format(select_column_clause,\n escaped_db,\n escaped_table,\n order_column_clause))\n result = cursor.fetchone()\n\n if result:\n max_pk_values = dict(zip(key_properties, result))\n else:\n max_pk_values = {}\n\n return max_pk_values", "def find_max(list):\n return find_value_at(list, 0)", "def getBestOption(self):\n if len(self.Data) < 1:\n return None\n else:\n bestR = max(self.Data.items(), key=lambda x: x[1]['SPat'].I)\n return bestR[1]", "def max_move(d):\n v = list(d.values())\n k = list(d.keys())\n return k[v.index(max(v))]", "def longest_value_key(incoming_dict):\n #return_value = max(incoming_dict, key=len)\n #return return_value\n if not incoming_dict:\n return None\n\n all_keys = incoming_dict.keys()\n if not all_keys:\n return None\n\n Key_with_longest_value = None\n for key in all_keys:\n if not Key_with_longest_value:\n Key_with_longest_value = key\n\n if len(incoming_dict[key]) > len(incoming_dict[Key_with_longest_value]):\n Key_with_longest_value = key\n return Key_with_longest_value", "def get(self, key):\n if key < self.length:\n return self.buckets[key]\n return -1", "def get_stock_symbol_with_highest_cap():\n #data2 = _cap_str_to_mln_float('cap')\n symbol_max = dict()\n for items in data:\n if items['symbol'] in symbol_max.keys():\n symbol_max[items['symbol']] = max(symbol_max[items['symbol']], _cap_str_to_mln_float(items['cap']))\n else:\n symbol_max[items['symbol']] = _cap_str_to_mln_float(items['cap'])\n\n value = sorted(symbol_max.items(), key = lambda x:x[1], reverse=True)[0][0]\n #sorted(symbol_max.items(), key = lambda x:x[1])\n return value", "def latest(scores):\n return scores[-1]", "def latest(scores):\n return scores[-1]", "def find_max(self):\n return max(self.nodes, key=int)", "def max(self):\n most = self.data[0]\n \n for i in range(len(self.data)):\n if self.data[i] > least:\n most = self.data[i]\n return most", "def get_highest_block(self):\n if self._highest_block is not None:\n return self._highest_block\n\n highest_in_cache = None\n if self.block_cache:\n main_chain = [block for block in self.block_cache.values() if block.chain == MAIN_CHAIN]\n if main_chain:\n highest_in_cache = max(main_chain, key=lambda b: b.height)\n\n highest_in_db = self.blocks.find_one({\"chain\": MAIN_CHAIN}, sort=[(\"height\", -1)])\n if highest_in_db:\n mongo_block_transactions = self.transactions.find({\"blockhash\": highest_in_db['hash']})\n highest_in_db = MongoBlockFactory.from_mongo(highest_in_db, mongo_block_transactions)\n\n highest_block = max([highest_in_cache, highest_in_db])\n self.set_highest_block(highest_block)\n return self._highest_block", "def max(*args, **kwargs):\n key = kwargs.get(\"key\", lambda x: x)\n args = args[0] if len(args) == 1 else args[:]\n max_value = \"\"\n for arg in args:\n if max_value == \"\":\n max_value = arg\n max_value = arg if key(arg) > key(max_value) else max_value\n return max_value", "def maxKeyTime(analyzer):\n return om.maxKey(analyzer['timeIndex'])", "def get_maximum ( self, object ):\n return self.maximum", "def get_higest_bid(self):\n orders = self.returnOrderBook(1)\n return orders[\"bids\"][0]" ]
[ "0.77251846", "0.74479496", "0.7443115", "0.728091", "0.7273358", "0.72690016", "0.7162577", "0.7084611", "0.706453", "0.702705", "0.69870156", "0.69063205", "0.69063205", "0.69063205", "0.6894217", "0.68838924", "0.687145", "0.68680716", "0.6850627", "0.67813027", "0.67538136", "0.6707102", "0.66967374", "0.66883576", "0.66206056", "0.6568412", "0.6545297", "0.6534002", "0.6512825", "0.6504137", "0.648744", "0.6464861", "0.64411604", "0.6426223", "0.6408127", "0.6388863", "0.63784057", "0.63779575", "0.63580364", "0.6357284", "0.6346443", "0.6332737", "0.632488", "0.6307064", "0.63024664", "0.6271978", "0.6266706", "0.6231872", "0.62292594", "0.62231153", "0.6221421", "0.621204", "0.62089556", "0.62089556", "0.619763", "0.6194331", "0.61721736", "0.6168468", "0.6166777", "0.6162401", "0.61507666", "0.6150651", "0.6138208", "0.61290425", "0.6127144", "0.61257535", "0.6125046", "0.61185986", "0.6110955", "0.60861313", "0.60760206", "0.605459", "0.6039318", "0.6039318", "0.60392493", "0.60360086", "0.5995617", "0.5994576", "0.5989972", "0.5989193", "0.5974294", "0.5972244", "0.5958845", "0.5957184", "0.59401035", "0.59306705", "0.59268916", "0.5925773", "0.5923351", "0.59229714", "0.5921016", "0.59097195", "0.590209", "0.590209", "0.5901644", "0.5901037", "0.5890651", "0.58894265", "0.5883293", "0.5873372", "0.5867369" ]
0.0
-1
Given a normal feature matrix, creates a transpose feature matrix, a list of discrete features, and a list of nonempty features for each id. If the dataset is dense, this uses a Numpy matrix to save on space. Otherwise, it uses a listofdicts structure.
def __init__(self,db): self._numFeatures = len(db.keys) self._numEntries = len(db.entries) numMissing = 0 if isinstance(db.entries[0],dict): #already sparse database given as input self.featureMatrix = None self.featureDicts = [{} for i in range(self._numFeatures)] self.discreteFeature = [True]*self._numFeatures for i in xrange(self._numFeatures): for j in xrange(self._numEntries): if i in db.entries[j]: v = db.entries[j][i] if v != int(v): self.discreteFeature[i] = False break self.entryLists = [[] for i in range(self._numFeatures)] self.featureSets = [] for i in xrange(self._numEntries): flist = [] for j in xrange(self._numFeatures): if j in db.entries[i]: flist.append(j) self.entryLists[j].append(i) self.featureDicts[j][i] = db.entries[i][j] else: numMissing += 1 self.featureSets.append(set(flist)) else: featureMatrix = np.array(db.entries,dtype=np.float_) self.featureMatrix = np.asfortranarray(featureMatrix).T self.featureDicts = [{} for i in range(self._numFeatures)] self.discreteFeature = [] for i in xrange(self.featureMatrix.shape[0]): self.discreteFeature.append(not any(v != int(v) for v in self.featureMatrix[i,:] if not np.isnan(v))) self.entryLists = [[] for i in range(self._numFeatures)] self.featureSets = [] for i in xrange(self._numEntries): flist = [] for j in xrange(self._numFeatures): if not np.isnan(featureMatrix[i,j]): flist.append(j) self.entryLists[j].append(i) self.featureDicts[j][i] = featureMatrix[i,j] else: numMissing += 1 self.featureSets.append(set(flist)) if numMissing == 0: self.featureSets = None self.featureDicts = None else: self.featureMatrix = None self.sparsity = float(numMissing) / (self._numFeatures*self._numEntries)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)", "def preprocess_features(X):\n\t# Initialize new output DataFrame\n\toutput = pd.DataFrame(index = X.index)\n\n\t# Investigate new output DataFrame\n\tfor col, col_data in X.iteritems():\n\t\t# If data type is categorical, convert to dummy variables\n\t\tif col_data.dtype == object:\n\t\t\tcol_data = pd.get_dummies(col_data, prefix = col)\n\n\t\t\t# Collect the revised columns\n\t\t\toutput - output.join(col_data)\n\treturn output", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return sparse_to_tuple(features)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return sparse_to_tuple(features)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense()", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return sparse_to_tuple(features)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features).tocoo()\n return sparse_to_tensor(features)", "def construct_feature_columns(input_features):\n return set([tf.feature_column.numeric_column(my_feature)\n for my_feature in input_features])", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.toarray() # densify -- these are tiny and we don't care", "def transpose(matrix: List[List[float]]) -> List[List[float]]:\n return [[row[i] for row in matrix] for i in range(len(matrix[0]))]", "def features_to_unscaled_matrix(features_table):\n\n # Check arguments\n if features_table is None:\n raise ValueError('Cannot convert features table: None')\n\n if isinstance(features_table, str):\n features_table = pd.read_csv(features_table, sep='\\t', header=0)\n\n if not isinstance(features_table, pd.DataFrame):\n raise ValueError(\n 'Argument \"features_table\" must be a Pandas DataFrame or a string path to a features file that can be '\n 'loaded into a DataFrame: Found type \"{}\"'.format(type(features_table)))\n\n # Load\n X = features_table[list(GT_FEATURES)].copy()\n\n # Cast all features to float64\n X['SVTYPE'] = X['SVTYPE'].apply(lambda label: GT_SVTYPE_TO_NUMERIC[label]) # SVTYPE label numeric representation\n X = X.astype(np.float64)\n\n # Return feature matrix\n return X", "def matrix_transpose(matrix):\n new_matrix = [[matrix[j][i] for j in range(len(matrix))] for i in range(len(matrix[0]))]\n\n return new_matrix", "def matrix_transpose(matrix):\n res = [[matrix[j][i] for j in range(len(matrix))] for i in\n range(len(matrix[0]))]\n return (res)", "def dummify_features(df):\n colnames = df.columns\n le_dict = {}\n for col in colnames:\n le_dict[col] = preprocessing.LabelEncoder()\n le_dict[col].fit(df[col])\n df.loc[:, col] = le_dict[col].transform(df[col])\n\n enc = preprocessing.OneHotEncoder()\n enc.fit(df)\n X = enc.transform(df)\n\n dummy_colnames = [cv + '_' + str(modality) for cv in colnames for modality in le_dict[cv].classes_]\n # for cv in colnames:\n # for modality in le_dict[cv].classes_:\n # dummy_colnames.append(cv + '_' + modality)\n\n return X, dummy_colnames, enc", "def create_Tf_matrix(\n corpus,\n filename_npz=\"../data/tfidf/data_tf.npz\",\n filename_features=\"../data/tfidf/data_feature_names.pkl\",\n):\n\n vectorizer = CountVectorizer(max_features=len(corpus))\n X = vectorizer.fit_transform(corpus)\n print(\"-Vectorized matrix, \", X.toarray().shape)\n print(\" first line:\")\n print(X.toarray()[0])\n print(\"- Nombre de features :\" + str(len(vectorizer.get_feature_names())))\n print(vectorizer.get_feature_names()[0:10], \" ...\")\n\n data = pd.DataFrame(vectorizer.get_feature_names())\n data.to_pickle(filename_features)\n print(\"tf feature names - saved\")\n sparse.save_npz(filename_npz, X)\n print(\"tf matrix:\", filename_npz, \" - saved\")", "def nontuple_preprocess_features(features):\n rowsum = np.array(features.sum(1))\n ep = 1e-10\n r_inv = np.power(rowsum + ep, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def get_movie_tag_matrix(self):\n data_frame = genre_tag.get_genre_data()\n tag_df = data_frame.reset_index()\n unique_tags = tag_df.tag.unique()\n idf_data = tag_df.groupby(['movieid'])['tag'].apply(set)\n tf_df = tag_df.groupby(['movieid'])['tag'].apply(lambda x: ','.join(x)).reset_index()\n movie_tag_dict = dict(zip(tf_df.movieid, tf_df.tag))\n tf_weight_dict = {movie: genre_tag.assign_tf_weight(tags.split(',')) for movie, tags in\n list(movie_tag_dict.items())}\n idf_weight_dict = {}\n idf_weight_dict = genre_tag.assign_idf_weight(idf_data, unique_tags)\n tag_df = genre_tag.get_model_weight(tf_weight_dict, idf_weight_dict, tag_df, 'tfidf')\n tag_df[\"total\"] = tag_df.groupby(['movieid','tag'])['value'].transform('sum')\n temp_df = tag_df[[\"moviename\", \"tag\", \"total\"]].drop_duplicates().reset_index()\n\n\n\n genre_tag_tfidf_df = temp_df.pivot_table('total', 'moviename', 'tag')\n genre_tag_tfidf_df = genre_tag_tfidf_df.fillna(0)\n genre_tag_tfidf_df.to_csv('movie_tag_matrix1d.csv', index=True, encoding='utf-8')\n return genre_tag_tfidf_df", "def matrix_transpose(matrix):\n transpose = [[] for i in range(len(matrix[0]))]\n\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n transpose[j].append(matrix[i][j])\n\n return transpose", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1),dtype='float')\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n # return sparse_to_tuple(features)\r\n return features\r\n # print(features)\r\n # rowsum = np.array(features.sum(1),dtype='float')\r\n #\r\n # r_inv = np.power(rowsum, -1).flatten()\r\n # r_inv[np.isinf(r_inv)] = 0.\r\n # r_mat_inv = np.diag(r_inv)\r\n # features = r_mat_inv.dot(features)\r\n # # return sparse_to_tuple(features)\r\n # return features\r", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = np.diag(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return features", "def build_feature_matrix(self, dataset):\n # Create the dictionary of feature functions if it is not created\n if len(features.features_fun_dict) == 0:\n i = 0\n for o in getmembers(features):\n if isfunction(o[1]):\n features.features_fun_dict[i] = o[1]\n i += 1\n features.num_features = len(features.features_fun_dict)\n\n matrix = np.zeros([dataset.shape[0], features.num_features])\n\n # For each sample in dataset, call every feature function and store its value\n for i in range(dataset.shape[0]):\n for j in range(features.num_features):\n args = getargspec(features.features_fun_dict[j]).args\n if len(args) == 2:\n matrix[i, j] = features.features_fun_dict[j](dataset[i], self.inv_vocab)\n else:\n matrix[i, j] = features.features_fun_dict[j](dataset[i])\n\n # Return sparse matrix with the features (needed by the classifier)\n return csr_matrix(matrix)", "def _make_random_matrix(self, n_components, n_features):", "def to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):\n \n max_len = max_len or max(map(len,lines))\n matrix = np.empty([len(lines),max_len],dtype)\n matrix.fill(pad)\n\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]\n matrix[i,:len(line_ix)] = line_ix\n\n return matrix.T if time_major else matrix", "def to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):\n \n max_len = max_len or max(map(len,lines))\n matrix = np.empty([len(lines),max_len],dtype)\n matrix.fill(pad)\n\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]\n matrix[i,:len(line_ix)] = line_ix\n\n return matrix.T if time_major else matrix", "def to_context_mat( iterable, context=FloatContext ):\n to_float = context.from_int\n return [[to_float(x) for x in row] for row in iterable]", "def matrix_transpose(mat: List[List]) -> List[List]:\n if len(mat) == 0:\n raise ValueError(\"Matrix is empty\")\n return [[mat[j][i] for j in range(len(mat))] for i in range(len(mat[0]))]", "def transpose(matrix):\n\n res = [[0] * len(matrix) for i in range(len(matrix[0]))]\n\n for i in range(len(matrix[0])):\n for j in range(len(matrix)):\n res[i][j] = matrix[j][i]\n\n return res", "def matT(mat):\n shape=matShape(mat)\n return [[matGet(mat,y,x) for y in range(shape[0])] \\\n for x in range(shape[1])]", "def factor_matrix_to_tidy(factor_matrix, var_name=\"Component\", value_name=\"Signal\", id_vars=None):\n factor_matrix = factor_matrix.reset_index()\n if id_vars is None:\n id_vars = set()\n for column in factor_matrix.columns:\n if type(column) != int:\n id_vars.add(column)\n id_vars = sorted(id_vars)\n\n return factor_matrix.melt(var_name=var_name, value_name=value_name, id_vars=id_vars)", "def create_list_dataset(feature_matrix):\n\n list_x, list_y = [], []\n # Iterate through pred_time\n for pred_time in feature_matrix.pred_time.unique():\n # Slice DataFrame\n feature_matrix_slice = feature_matrix[feature_matrix.pred_time == pred_time]\n # Split feat_mat into features x and labels y\n list_x.append(feature_matrix_slice.drop(columns=[\"escalation_flag\", \"pred_time\", \"customer\"]))\n list_y.append(feature_matrix_slice[\"escalation_flag\"])\n return list_x, list_y", "def _convert_to_features(self, img: np.ndarray) -> np.ndarray:", "def features_to_array(features_table, scaler):\n\n # Check arguments\n X = features_to_unscaled_matrix(features_table)\n return scaler.transform(X)", "def to_numeric_features(features, feature_cols=None):\n\n for col in CATEGORICAL_COLS:\n features = pd.concat(\n [features, pd.get_dummies(features[col[0]], drop_first=True)], axis=1)\n features.drop(col[0], axis=1, inplace=True)\n\n # Remove the unused columns from the dataframe.\n for col in UNUSED_COLUMNS:\n features.pop(col)\n\n # Re-index dataframe (if categories list changed from the previous dataset)\n if feature_cols is not None:\n features = features.T.reindex(feature_cols).T.fillna(0)\n return features", "def get_feature_vectors(self):\n\t\tresult = self.session.query(Image.id, Image.feature_vector).all()\n\n\t\ttransformed_result = list()\n\t\t\n\t\tfor (id, serialized_feature_vector) in result:\n\t\t\tdeserialized_tensor = tf.deserialize_feature_vector(serialized_feature_vector)\n\t\t\ttransformed_result.append((id, deserialized_tensor))\n\n\t\treturn transformed_result", "def create_design_matrix(features):\r\n\r\n onehotencoder = OneHotEncoder(categories='auto')\r\n\r\n design_matrix = ColumnTransformer(\r\n [(\"\", onehotencoder, [2, 3]),],\r\n remainder='passthrough'\r\n ).fit_transform(features)\r\n\r\n scaler = StandardScaler(with_mean=False)\r\n design_matrix = scaler.fit_transform(design_matrix)\r\n\r\n return design_matrix", "def np_transpose(matrix):\n\n return matrix.transpose()", "def transformer(ndarry):\r\n # Considering a list for collecting the mean and Standard deviation each feature\r\n y_mean = []\r\n y_std = []\r\n\r\n # Iterating over ndarray features which is first element of the ndarray shape\r\n for i in range(ndarry.shape[0]):\r\n mean_rowi = ndarry[i].mean()\r\n y_mean.append(mean_rowi)\r\n std_rowi = ndarry[i].std()\r\n y_std.append(std_rowi)\r\n\r\n matrix_b = np.array([y_mean, y_std])\r\n\r\n y_flatten = matrix_b.flatten()\r\n return y_flatten", "def reverse_transform(self, data):\n table = []\n\n for i in range(self.metadata['num_features']):\n column_data = data['f%02d' % i]\n column_metadata = self.metadata['details'][i]\n\n if column_metadata['type'] == 'value':\n column = self.continous_transformer.inverse_transform(column_data, column_metadata)\n\n if column_metadata['type'] == 'category':\n self.categorical_transformer.classes_ = column_metadata['mapping']\n column = self.categorical_transformer.inverse_transform(\n column_data.ravel().astype(np.int32))\n\n table.append(column)\n\n result = pd.DataFrame(dict(enumerate(table)))\n result.columns = self.columns\n return result", "def transpose(matrix):\n\n nb_rows = len(matrix)\n nb_cols = len(matrix[0])\n result = [ [None]*nb_rows for k in range(nb_cols)]\n\n for row in range(nb_rows):\n for col in range(nb_cols):\n result[col][row] = matrix[row][col]\n \n return result", "def preprocess_feature(df):", "def to_matrix(lines, token_to_id, max_len=None, pad=0, dtype=\"int32\", time_major=False):\n \n max_len = max_len or max(map(len,lines))\n matrix = np.empty([len(lines),max_len],dtype)\n matrix.fill(pad)\n\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]\n matrix[i,:len(line_ix)] = line_ix\n\n return matrix.T if time_major else matrix", "def build_feature_matrix(node2feature, length, features):\n num_nodes = len(node2feature)\n num_features = len(features)\n X = np.zeros((length, num_nodes, num_features))\n \n for key, val in node2feature.items():\n for i, f in enumerate(features):\n X[:,key,i] = val[f]\n \n return X", "def feature_matrix(df, user_id=None, item_id=None):\n print(\"get feature matrix\")\n df1 = df.drop_duplicates(subset=['user_id'], keep='first', inplace=False)\n user_x = None\n if user_id is not None:\n user_x = int(np.argwhere(df1['user_id'].values == user_id))\n user_features = df1[['average_stars']].values\n csr_user_features = sparse.csr_matrix(user_features)\n\n df2 = df.drop_duplicates(\n subset=['business_id'],\n keep='first',\n inplace=False)\n item_x = None\n if item_id is not None:\n item_x = int(np.argwhere(df2['business_id'].values == item_id))\n item_features = df2.iloc[:, 10:].values\n\n csr_item_features = sparse.csr_matrix(item_features)\n return csr_user_features, csr_item_features, user_x, item_x", "def convert_full_features_to_input_features(raw_features):\n data_features = mx.gluon.data.SimpleDataset(list(itertools.chain.from_iterable(raw_features)))\n data_features = data_features.transform(lambda *example: (\n example[0], # example_id\n example[7], # inputs_id\n example[9], # segment_ids\n example[2], # valid_length,\n example[8], # p_mask\n example[10], # start_position,\n example[11], # end_position\n example[14])) # is_impossible\n return data_features", "def transpose(self,mat):\n result = [[mat[j][i] for j in range(len(mat))] for i in range(len(mat[0]))]\n self.out = result\n return self.out", "def gram_matrix(features, normalize=True):\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****", "def transpose(matrix):\n return list(zip(*matrix))", "def transpose(self):\n data = [list(col) for col in zip(*self.data)]\n return self.__class__(self.n, self.m, data)", "def transpose(m):\n\n pass", "def _transpose_vectorized(M):\n ndim = M.ndim\n assert ndim == 3\n return np.transpose(M, [0, ndim-1, ndim-2])", "def preprocess(self, matrix: np.array) -> typing.Tuple[np.array, np.array]:\n # Impute values for some preprocessors\n matrix = self._impute_values(matrix)\n\n # Apply the preprocessors manually\n processed_features = []\n for index, preprocessor in enumerate(self._preprocessors):\n features = [line[index] for line in matrix]\n if self._is_loaded:\n try:\n current_preprocessed = preprocessor.transform(features)\n\n except ValueError:\n # If there is a difference between features count, pad the\n # vectors\n features = self._impute_values(features,\n preprocessor.n_features_in_)\n current_preprocessed = preprocessor.transform(features)\n else:\n current_preprocessed = preprocessor.fit_transform(features)\n\n processed_features.append(current_preprocessed)\n\n # Transpose the matrix of features to let each line represent a sample\n processed_features = list(map(list, zip(*processed_features)))\n\n # Drop the array and sparse matrix representations\n converted_features = []\n length_already_stored = bool(self._preprocessors_output_lengths)\n for sample_id, _ in enumerate(processed_features):\n current_features = []\n for feature_id in range(len(processed_features[sample_id])):\n feature = processed_features[sample_id][feature_id]\n if isinstance(feature, scipy.sparse.csr.csr_matrix):\n current_features.extend(feature.toarray()[0])\n elif isinstance(feature, list):\n current_features.extend(feature)\n else:\n current_features.append(feature)\n\n # Save the lengths if they are not already set\n if not length_already_stored:\n if isinstance(feature, scipy.sparse.csr.csr_matrix):\n length = feature.shape[1]\n elif isinstance(feature, list):\n length = len(feature)\n else:\n length = 1\n\n self._preprocessors_output_lengths.append(length)\n\n converted_features.append(current_features)\n\n # Apply a scalar\n if self._is_loaded:\n converted_features = self._last_scalar_model.transform(\n converted_features)\n else:\n # If the core is not loaded from dumped models, then create a new\n # scalar, fit it and transform the data\n self._last_scalar_model = MinMaxScaler()\n converted_features = self._last_scalar_model.fit_transform(\n converted_features)\n\n # Create a model if one is not loaded\n if not self._is_loaded:\n if self._reduction_algorithm == ReductionAlgorithm.PCA:\n self._reduction_model = PCA(\n n_components=self._reduction_components_count)\n elif self._reduction_algorithm == ReductionAlgorithm.FAST_ICA:\n self._reduction_model = FastICA(\n n_components=self._reduction_components_count)\n elif self._reduction_algorithm == ReductionAlgorithm.NMF:\n self._reduction_model = NMF(\n n_components=self._reduction_components_count)\n\n reduced_features = self._reduction_model.fit_transform(\n converted_features)\n else:\n reduced_features = self._reduction_model.transform(\n converted_features)\n\n return (converted_features, reduced_features)", "def generate_features(self, df):\n df = df.reset_index()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return extract_features(df, column_id=\"id\", impute_function=impute,\n default_fc_parameters=self.extraction_settings)", "def get_tfidf_matrices(documents):\n tfidf_vec_model = TfidfVectorizer()\n tfidf_matrix = tfidf_vec_model.fit_transform(documents)\n tfidf_matrix_transpose = tfidf_matrix.transpose()\n\n return tfidf_matrix, tfidf_matrix_transpose", "def _get_sparse_categorical_feature_columns(\n self, include_integer_columns: bool = True) -> List[FeatureColumn]:\n\n feature_columns = []\n for feature in self._dataset_schema.feature:\n\n feature_name = feature.name\n if feature_name == self.raw_label_key:\n continue\n\n feature_storage_type = _get_feature_storage_type(self._dataset_schema,\n feature_name)\n\n if feature_storage_type == tf.float32:\n continue\n\n if feature_storage_type == tf.int64:\n if not include_integer_columns:\n continue\n\n # Categorical or categorical-set feature stored as an integer(s).\n num_buckets = (\n self._tf_transform_output.num_buckets_for_transformed_feature(\n feature_name))\n new_feature_column = tf.feature_column.categorical_column_with_identity(\n feature_name, num_buckets=num_buckets)\n elif feature_storage_type == tf.string:\n # Note TFT automatically converts string columns to int columns.\n raise ValueError(\n 'String dtypes should be converted to int columns by Transform')\n else:\n raise ValueError(f'Unsupported dtype: {feature_storage_type}')\n feature_columns.append(new_feature_column)\n return feature_columns", "def createFeatureMatrix(self,batch):\n \n feature_dim = self.__flags.no_inner_unit * self.__flags.no_outer_unit\n data = np.zeros((len(batch), self.__flags.embedding_dim, 2 * feature_dim), dtype=np.float32)\n\n count = 0\n for obj in batch:\n m1 = self.__object2Matrix(obj)\n m2 = self.__object2Matrix(obj)\n data[count, :self.__flags.embedding_dim, :feature_dim] = m1\n data[count, :self.__flags.embedding_dim, feature_dim:2 * feature_dim] = m2\n count += 1\n scores = np.zeros(len(batch), dtype=np.float32)\n\n return (data,scores)", "def get_feature_oriented_matrix(self):\n nbr_features = self.hyperparameters.time_series_depth\n matrix = np.ones(shape=(nbr_features, nbr_features), dtype=np.float)\n np.fill_diagonal(matrix, val=0)\n return matrix", "def compute_feature_matrix(sequences, split, dinuc=False, model='cterm'):\n if model == 'cterm':\n X = compute_cterm_feature_matrix(sequences, split, dinuc=dinuc)\n else:\n X = compute_nterm_feature_matrix(sequences, split, dinuc=dinuc)\n return X", "def build_ternary_feature_matrix(prop, candidate_models, desired_data):\n transformed_features = sympy.Matrix([feature_transforms[prop](i) for i in candidate_models])\n all_samples = get_samples(desired_data)\n feature_matrix = np.empty((len(all_samples), len(transformed_features)), dtype=np.float)\n feature_matrix[:, :] = [transformed_features.subs({v.T: temp, 'YS': ys, 'V_I': v_i, 'V_J': v_j, 'V_K': v_k}).evalf() for temp, (ys, (v_i, v_j, v_k)) in all_samples]\n return feature_matrix", "def extract_svd_vectors_with_id_col(row):\n # tuple(x for x in row if x not in ['pcaFeatures'])+\n return (row[id_col],)+tuple(float(x) for x in row.svdFeatures.values)", "def transpose(M):\n # Section 1: if a 1D array, convert to a 2D array = matrix\n if not isinstance(M[0],list):\n M = [M]\n\n # Section 2: Get dimensions\n rows = len(M); cols = len(M[0])\n\n # Section 3: MT is zeros matrix with transposed dimensions\n MT = zeros_matrix(cols, rows)\n\n # Section 4: Copy values from M to it's transpose MT\n for i in range(rows):\n for j in range(cols):\n MT[j][i] = M[i][j]\n\n return MT", "def extract_vectors_with_id_col(row):\n # tuple(x for x in row if x not in ['pcaFeatures'])+\n return (row[id_col],)+tuple(float(x) for x in row.pcaFeatures.values)", "def reshape_typerec_dataset(self, data_pre, features):\n # TODO merge with reshape_dataset() (only differences is the list of keys/features)\n\n if not isinstance(data_pre, list):\n raise ValueError(f'reshape_dataset() requires a list as input')\n\n self._logger.debug(f'Reshaping dataset ({len(data_pre)} samples)...')\n data = {}\n\n for feature in features:\n data[feature] = []\n\n for sample in tqdm(data_pre):\n for feature in features:\n data[feature].append(sample[feature])\n\n for feature in features:\n data[feature] = np.asarray(data[feature])\n\n self._logger.debug(f'Reshaped dataset')\n\n return data", "def transform( self, X, y = None ):\n matrix = np.zeros((len(X),len(self.feature_names)))\n for i,bag in enumerate(X):\n for test in bag:\n try:\n matrix[i,self.feature_names.index(test)] = 1\n except ValueError:\n pass\n return matrix", "def transform(self, X):\n extracted = []\n for columns, transformers in self.features:\n # columns could be a string or list of\n # strings; we don't care because pandas\n # will handle either.\n Xt = self._get_col_subset(X, columns)\n if transformers is not None:\n Xt = transformers.transform(Xt)\n extracted.append(_handle_feature(Xt))\n\n # handle features not explicitly selected\n if self.default is not False:\n Xt = self._get_col_subset(X, self._unselected_columns(X))\n if self.default is not None:\n Xt = self.default.transform(Xt)\n extracted.append(_handle_feature(Xt))\n\n\n # combine the feature outputs into one array.\n # at this point we lose track of which features\n # were created from which input columns, so it's\n # assumed that that doesn't matter to the model.\n\n # If any of the extracted features is sparse, combine sparsely.\n # Otherwise, combine as normal arrays.\n if any(sparse.issparse(fea) for fea in extracted):\n stacked = sparse.hstack(extracted).tocsr()\n # return a sparse matrix only if the mapper was initialized\n # with sparse=True\n if not self.sparse:\n stacked = stacked.toarray()\n else:\n stacked = np.hstack(extracted)\n\n return stacked", "def data_reshape(image):\n image_mat = []\n if image.shape[-1] == 3:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j[0], j[1], j[2]])\n else:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j])\n return np.array(image_mat)", "def map_feature(x):\n m, n = x.shape\n out = x\n\n # Add quodratic features.\n for i in range(n):\n for j in range(i, n):\n out = hstack((out, x[:, i].reshape(m, 1) * x[:, j].reshape(m, 1)))\n\n # Add cubic features.\n for i in range(n):\n for j in range(i, n):\n for k in range(j, n):\n out = hstack(\n (out, x[:, i].reshape(m, 1) * x[:, j].reshape(m, 1) * x[:, k].reshape(m, 1)))\n return out", "def transpose():", "def createFeatureArray(self, lyrFeats): \n featIdlist = []\n fullFeatureList= []\n #add features to the attribute list\n for feat in lyrFeats:\n if feat == NULL:\n feat = None\n featIdlist.append(feat.id())\n featAttributes = feat.attributes()\n fullFeatureList.extend(featAttributes)\n \n #get size of attribute table\n rows = len(featIdlist)\n cols = len(featAttributes)\n \n #create an array af attributes and return it\n featArray = np.array([fullFeatureList])\n featArray2 = np.reshape(featArray, (rows, cols))\n return featArray2", "def get_map_features(self, ids, maps):\n maps = self.backbone(maps)\n\n for i in range(batch_size):\n sample_size = (ids == i).sum()\n sample_map = maps[i].repeat(sample_size, 1)\n \n # concatenate the group of sample maps\n if i == 0:\n map_features = sample_map \n else:\n map_features = torch.cat((map_features, sample_map), dim=0)\n \n return map_features", "def get_feature_matrix(N, Xtrain, D):\n for i in range(D+1):\n if i == 0:\n X = [1] * N\n else:\n X = np.vstack([np.power(Xtrain, i), X])\n X = X.transpose()\n return X", "def atoms_to_node_features(self, atoms):\n\n node_feature_matrix = np.zeros((len(atoms),2))\n for (i,atom) in enumerate(atoms):\n node_feature_matrix[i] = self.get_atom_features(atom)\n return node_feature_matrix", "def transpose_matrix(matrix_x):\n n = len(matrix_x[0])\n m = len(matrix_x)\n \n # construct the transpose of matrix\n matrix_x_t = []\n for i in xrange(n):\n row = []\n for j in xrange(m):\n row.append(matrix_x[j][i])\n\tmatrix_x_t.append(row)\n\n return matrix_x_t", "def extract_features(input_feature_map, points=conv43Points):\n arr = []\n for y,x in points:\n arr.append(input_feature_map[:,y,x,:])\n return tf.stack(arr, axis=1, name=\"extracted_features\"), len(points)", "def transpose(M):\n rows = len(M)\n cols = len(M[0])\n\n MT = zeros_matrix(cols, rows)\n\n for i in range(rows):\n for j in range(cols):\n MT[j][i] = M[i][j]\n\n return MT", "def testMultiClass_MatrixData(self):\n cont_features = [\n tf.contrib.layers.real_valued_column('feature', dimension=4)]\n\n classifier = tf.contrib.learn.DNNClassifier(\n n_classes=3,\n feature_columns=cont_features,\n hidden_units=[3, 3],\n config=tf.contrib.learn.RunConfig(tf_random_seed=1))\n\n classifier.fit(input_fn=_iris_input_multiclass_fn, steps=200)\n self.assertTrue('centered_bias_weight' in classifier.get_variable_names())\n scores = classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.8)\n self.assertLess(scores['loss'], 0.3)", "def _transform_fn(features, mode):\n\t\tprint('Before feature transform_fn')\n\t\tfor k in features:\n\t\t\tprint(features[k].shape)\n\t\tcontext_features, example_features = feature_lib.encode_listwise_features(\n\t\t\t\tfeatures,\n\t\t\t\tinput_size=input_size,\n\t\t\t\tcontext_feature_columns=context_feature_columns,\n\t\t\t\texample_feature_columns=example_feature_columns,\n\t\t\t\tmode=mode)\n\t\tprint('After feature transform_fn')\n\t\tfor k in example_features:\n\t\t\tprint(k)\n\t\t\tprint(example_features[k].shape)\n\t\tfor k in context_features:\n\t\t\tprint(k)\n\t\t\tprint(context_features[k].shape)\n\t\treturn context_features, example_features", "def prepare_features(features, subject_labels):\n data = {}\n labels = {}\n for stage in STAGES:\n labels[stage] = []\n features_combined = []\n\n for subject in subject_labels.keys():\n current = []\n for feature, columns in features:\n if feature[stage][subject].size == 0:\n # do not look at empty arrays\n continue\n # collect features for current stage and subject\n if len(feature[stage][subject].shape) == 2:\n # feature is 2-dimensional, just use transpose\n current.append(feature[stage][subject].T)\n elif len(feature[stage][subject].shape) == 3:\n # feature is 3-dimensional, manually reshape to 2-dimensional\n # np.reshape does not work here\n reshaped = []\n for electrode in range(feature[stage][subject].shape[0]):\n for band in range(feature[stage][subject].shape[2]):\n if len(feature[stage][subject].shape) != 3:\n continue\n reshaped.append(feature[stage][subject][electrode, :, band])\n current.append(np.array(reshaped).T)\n\n if len(current) == 0:\n continue\n\n # merge the features for the current stage and subject\n features_combined.append(np.concatenate(current, axis=1))\n\n # concatenate the label name for the current subject as often as there are samples\n labels[stage] += [subject_labels[subject]] * features_combined[-1].shape[0]\n\n # concatenate the features for all subjects\n data[stage] = np.concatenate(features_combined, axis=0)\n labels[stage] = np.array(labels[stage])\n\n return data, labels", "def _convert_matrix(m):\n\n return [m[0][0], m[0][1], m[0][2], m[0][3],\n m[2][0], m[2][1], m[2][2], m[2][3],\n -m[1][0], -m[1][1], -m[1][2], -m[1][3],\n m[3][0], m[3][1], m[3][2], m[3][3]]", "def mtid_matrix(self, mtids):\n output = np.zeros((len(mtids), self.matrix.shape[1]))\n for i, mtid in enumerate(mtids):\n output[i] = self.matrix[self.mtid_lookup[mtid]]\n return output", "def get_sparse_feature_columns(self) -> List[FeatureColumn]:\n\n return self._get_numeric_feature_columns(\n ) + self._get_sparse_categorical_feature_columns()", "def transform(self, corpus: Iterable[str]):\n count_matrix = super().transform(corpus)\n\n tfidfs = []\n for i in range(count_matrix.shape[0]):\n n = euclidian_normalization(count_matrix[i, :].multiply(self.idfs))\n tfidfs.append(n)\n\n tfidfs = sp.vstack(tfidfs)\n return tfidfs", "def get_dense_feature_columns(self) -> List[FeatureColumn]:\n\n return self._get_numeric_feature_columns(\n ) + self._get_embedding_feature_columns()", "def make_tag_matrix(self):\n pass", "def compute_cterm_feature_matrix(sequences, split, di=False):\n if 0 < split < 23:\n X = degron_pred.binned_bag_of_words(sequences.str[-split:],\n int(split), n=int(split),\n dinuc=di)\n X2 = degron_pred.binned_bag_of_words(sequences.str[:-split],\n 1, n=23-int(split), dinuc=False)\n X = np.hstack([X2.toarray(), X])\n elif split == 0:\n X = degron_pred.binned_bag_of_words(sequences,\n int(split), n=int(split),\n dinuc=False)\n elif split == 23:\n X = degron_pred.binned_bag_of_words(sequences,\n int(split), n=int(split),\n dinuc=di)\n return X", "def get_tfidf_features(n_features) -> np.array:\r\n # Transform all titles from the original DataFrame into TF-IDF matrix\r\n vectorizer = TfidfVectorizer(decode_error='ignore',\r\n stop_words='english',\r\n max_features=n_features)\r\n\r\n vectors = vectorizer.fit_transform(data['title']).toarray().astype(np.float16, copy=False)\r\n print('TF-IDF features extracted. Shape:', vectors.shape)\r\n\r\n return vectors", "def makeStoichiometricMatrix(self, var, typed_token):\n # TODO: Change stoichiometric matrix to include the integers of reactions\n model = self.model\n size = self.size_of_variable(var)\n mat = np.zeros(size)\n for i, instance in enumerate(model.typed_tokens[typed_token].instances):\n for j, conversion in enumerate(model.typed_tokens[\n typed_token].conversions):\n if instance in conversion['reactants']:\n mat[i, j] = -1.\n elif instance in conversion['products']:\n mat[i, j] = 1.\n if typed_token == var.index_structures[1]: # Check sequence\n return np.transpose(mat)\n return mat", "def transpose(X):\n if len(X.shape) == 1:\n return X\n else:\n Xt = zeros((X.shape[1], X.shape[0]))\n for i in range(X.shape[0]):\n for j in range(X.shape[1]):\n Xt[j][i] = X[i][j]\n\n\n return Xt", "def flatten_features_array(features: {}) -> []:\n flattened = []\n flattened.append(features[\"volume\"])\n flattened.append(features[\"area\"])\n flattened.append(features[\"compactness\"])\n flattened.append(features[\"bbox_volume\"])\n flattened.append(features[\"diameter\"])\n flattened.append(features[\"eccentricity\"])\n for i in features[\"A3\"][0]:\n flattened.append(i)\n for i in features[\"D1\"][0]:\n flattened.append(i)\n for i in features[\"D2\"][0]:\n flattened.append(i)\n for i in features[\"D3\"][0]:\n flattened.append(i)\n for i in features[\"D4\"][0]:\n flattened.append(i)\n\n return flattened", "def transform_data(data_df, target_df = None):\n rec_idx, rec_col, rec_data = create_recency_feature(data_df)\n freq_idx, freq_col, freq_data = create_frequency_feature(data_df)\n norm_idx, norm_col, norm_data = create_norm_feature(data_df)\n\n # with hstack function we are concatinating a sparse matrix and a dense matirx :)\n feat_df = hstack((rec_data, freq_data, norm_data))\n print('Final feature matrix shape:', feat_df.shape)\n \n # merge all the feature names\n feat_names = list(rec_col) + list(freq_col) + list(norm_col)\n \n if isinstance(target_df, pd.core.frame.DataFrame):\n # get +ve & -ve indices\n one_idx = target_df[target_df['outcome_flag'] == 1]['id'].index.tolist()\n zero_idx = target_df[target_df['outcome_flag'] == 0]['id'].index.tolist()\n \n # calculate fitness values of features\n rcdf = create_fitness_stats(rec_data, rec_col, one_idx, zero_idx, nans = True)\n fqdf = create_fitness_stats(freq_data, freq_col, one_idx, zero_idx, nans = False)\n nrdf = create_fitness_stats(norm_data, norm_col, one_idx, zero_idx, nans=False)\n fit_df = rcdf.append(fqdf).append(nrdf)\n fit_df.reset_index(drop=1)\n return feat_df, feat_names, fit_df\n \n return feat_df, feat_names", "def np_example_to_features(np_example: FeatureDict,\n config: ml_collections.ConfigDict,\n random_seed: int = 0) -> FeatureDict:\n np_example = dict(np_example)\n num_res = int(np_example['seq_length'][0])\n cfg, feature_names = make_data_config(config, num_res=num_res)\n\n if 'deletion_matrix_int' in np_example:\n np_example['deletion_matrix'] = (\n np_example.pop('deletion_matrix_int').astype(np.float32))\n\n if USE_TF:\n tf_graph = tf.Graph()\n with tf_graph.as_default(), tf.device('/device:CPU:0'):\n tf.compat.v1.set_random_seed(random_seed)\n tensor_dict = proteins_dataset.np_to_tensor_dict(\n np_example=np_example, features=feature_names)\n\n processed_batch = input_pipeline.process_tensors_from_config(\n tensor_dict, cfg)\n\n tf_graph.finalize()\n\n with tf.Session(graph=tf_graph) as sess:\n features = sess.run(processed_batch)\n\n else:\n array_dict = input_pipeline.np_to_array_dict(\n np_example=np_example,\n features=feature_names,\n use_templates=cfg.common.use_templates)\n features = input_pipeline.process_arrays_from_config(array_dict, cfg)\n features = {k: v for k, v in features.items() if v.dtype != 'O'}\n\n extra_msa_length = cfg.common.max_extra_msa\n for k in ['extra_msa', 'extra_has_deletion', 'extra_deletion_value',\n 'extra_msa_mask']:\n features[k] = features[k][:, :extra_msa_length]\n\n for k in features.keys():\n if features[k].dtype == np.int64:\n features[k] = features[k].astype(np.int32)\n elif features[k].dtype == np.float64:\n features[k] = features[k].astype(np.float32)\n\n return features", "def build_matrix(file, feature_mode):\n\n nlp = spacy.load('de_core_news_sm')\n\n conn = sql.connect(file)\n\n sql_select = \"\"\"SELECT COMP, ISCOMP, SENTENCE FROM sentences WHERE ISCOMP!=-1\"\"\"\n\n c = conn.cursor()\n c.execute(sql_select)\n\n rows = c.fetchall()\n\n nltk_data = list()\n\n for r in rows:\n comp = r[0]\n label = r[1]\n sentence = r[2]\n\n sentence = sentence.replace('<comp>', '')\n sentence = sentence.replace('</comp>', '')\n doc = nlp(sentence)\n\n features = process_row(doc, comp, feature_mode)\n\n nltk_tuple = (features, label, sentence)\n nltk_data.append(nltk_tuple)\n\n return nltk_data", "def dataarrays_to_xdata(arrays: DataArrays, features: FeatureSet) -> XData:\n x_con = None\n if arrays.con_marray is not None:\n assert features.continuous\n con_labels = features.continuous.columns.keys()\n x_con = dict(zip(con_labels, np.rollaxis(arrays.con_marray, 3)))\n x_cat = None\n if arrays.cat_marray is not None:\n assert features.categorical\n cat_labels = features.categorical.columns.keys()\n x_cat = dict(zip(cat_labels, np.rollaxis(arrays.cat_marray, 3)))\n xdata = XData(x_con, x_cat, arrays.image_indices, arrays.world_coords)\n return xdata", "def _vectorize_and_sparsify_data(self,\n vec: Vectorizer,\n ids: List[str],\n batch_size: int = 50) \\\n -> sp.sparse.csr.csr_matrix:\n\n X = []\n samples = self._generate_samples(ids, 'x')\n while True:\n X_list = list(take(batch_size, samples))\n if not X_list: break\n X_part = vec.transform(X_list)\n del X_list\n X.append(X_part)\n del X_part\n\n return sp.sparse.csr_matrix(np.vstack([x.todense() for x in X]))", "def get_features_train(tweets):\n feats = get_feature_array(tweets)\n tfidf = vectorizer.fit_transform(tweets).toarray()\n M = np.concatenate([tfidf,feats],axis=1)\n return M", "def matrix_transpose(matrix):\n\n newmat = []\n newmat2 = []\n finalmat = []\n cursize = []\n\n cursize = matrix_shape(matrix)\n for i in range(0, cursize[0]):\n for j in range(0, cursize[1]):\n newmat.append(matrix[i][j])\n for i in range(0, cursize[1]):\n newmat2.append(newmat[i])\n j = i\n while(len(newmat2) < cursize[0]):\n if j < len(newmat):\n newmat2.append(newmat[j + cursize[1]])\n j = j + cursize[1]\n finalmat.append(newmat2)\n newmat2 = []\n return finalmat", "def transform(self, transformer):\n\t\tnew_matrix = Matrix(self.dims)\n\t\tnew_matrix.data = [transformer(copy.deepcopy(c)) for c in self.data]\n\t\treturn new_matrix", "def transpose_matrix(matrix):\n n = len(matrix[0])\n m = len(matrix)\n matrix_t = create_matrix(n,m)\n for j in range(n):\n for i in range(m):\n matrix_t[j][i] = matrix[i][j]\n \n return matrix_t", "def matrix_transpose(X):\n if is_sparse_dataframe(X):\n fill_values = np.array([dtype.fill_value for dtype in X.dtypes])\n if not np.all(fill_values == fill_values[0]):\n raise TypeError(\n \"Can only transpose sparse dataframes with constant fill value. \"\n \"If you wish to proceed, first convert the data to dense with \"\n \"scprep.utils.toarray.\"\n )\n X_T = X.sparse.to_coo().T\n return SparseDataFrame(\n X_T, index=X.columns, columns=X.index, default_fill_value=fill_values[0]\n )\n else:\n return X.T" ]
[ "0.5814003", "0.5814003", "0.5798338", "0.5759967", "0.5759967", "0.5759362", "0.5754472", "0.56665456", "0.56545", "0.5584708", "0.5582025", "0.5555118", "0.5517428", "0.551733", "0.5496943", "0.54869235", "0.5478383", "0.5463325", "0.5463325", "0.54537517", "0.5449544", "0.5412376", "0.54102474", "0.5397746", "0.53940827", "0.53831536", "0.53831536", "0.5374167", "0.5368981", "0.535509", "0.5349816", "0.5349812", "0.5339122", "0.5334403", "0.533439", "0.53315413", "0.53179127", "0.53130585", "0.53129774", "0.52976525", "0.5291928", "0.52893853", "0.52703166", "0.5263238", "0.5246595", "0.5242078", "0.52399415", "0.5239229", "0.523405", "0.52267045", "0.5207066", "0.5182015", "0.51733726", "0.5128348", "0.51272595", "0.51127976", "0.5109645", "0.5096686", "0.50948817", "0.5094781", "0.50836176", "0.50820947", "0.50700057", "0.50682414", "0.50628704", "0.5059676", "0.50595987", "0.5056629", "0.50499684", "0.5049731", "0.5048873", "0.5038755", "0.503083", "0.50274944", "0.50268316", "0.50178367", "0.5016248", "0.5011031", "0.5010382", "0.49950576", "0.49883157", "0.4971708", "0.49715427", "0.49691686", "0.49684578", "0.49677628", "0.49671015", "0.4965914", "0.49626997", "0.49543992", "0.49527252", "0.4951479", "0.49478385", "0.49428517", "0.49423775", "0.49266142", "0.49250644", "0.49204704", "0.4914503", "0.49059802", "0.49047443" ]
0.0
-1
Given a dict, returns the key that has maximum value (arg max)
def argmax(table): return max((v,k) for k,v in table.iteritems())[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max_key(dico):\n our_max = 0\n argmax = None\n for key, val in dico.items():\n if val > our_max:\n argmax = key\n our_max = val\n return argmax", "def max_key (dict):\n output = -1\n for key, value in dict.items():\n output = max(output, key)\n return output", "def keywithmaxval(dictionary): # from https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary/12343826#12343826 \n\tv=list(dictionary.values())\n\tk=list(dictionary.keys())\n\treturn k[v.index(max(v))]", "def key_of_max(d):\n keys = list(d.keys())\n keys.sort()\n return max(keys, key=lambda x: d[x])", "def keywithmaxval(d):\n\treturn max(d, key=lambda k: d[k])", "def keymaxval (dictionary):\n values = list (dictionary.values())\n return list(dictionary.keys())[values.index(max(values))]", "def get_key_with_max_value(dictionary):\n values = list(dictionary.values())\n keys = list(dictionary.keys())\n return keys[values.index(max(values))]", "def find_max_key_val_in_dict(in_dict):\n\tmax_key = None\n\tmax_val = -np.inf\n\tfor key,val in in_dict.iteritems():\n\t\tif val >= max_val:\n\t\t\tmax_val = val\n\t\t\tmax_key = key\n\treturn (max_key,max_val)", "def keywithmaxval(kwmv_dict):\n values = list(kwmv_dict.values())\n keys = list(kwmv_dict.keys())\n return keys[values.index(max(values))]", "def argmax(d):\n return max(d.iteritems(), key=operator.itemgetter(1))", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def keywithmaxval(d): \r\n v=list(d.values())\r\n k=list(d.keys())\r\n return k[v.index(max(v))]", "def argMax(self):\n if len(list(self.keys())) == 0:\n return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def most_occured(dict):\n\n max = dict['e']\n max_alpha = 'e'\n\n for i, j in zip(dict.values(), dict.keys()):\n\n if max < i:\n max = i\n max_alpha = j\n \n return max_alpha", "def argMax(self):\n if len(self.keys()) == 0: return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def dict_max(dic):\n cnt = 0\n for i in dic:\n if dic[i] > cnt:\n cnt = dic[i]\n return cnt", "def __argmax(l: list, key):\n max = float('-inf')\n max_i = -1\n for i in range(len(l)):\n if key(l[i]) > max:\n max = key(l[i])\n max_i = i\n return max_i", "def max(*args, **kwargs):\n key = kwargs.get(\"key\", lambda x: x)\n args = args[0] if len(args) == 1 else args[:]\n max_value = \"\"\n for arg in args:\n if max_value == \"\":\n max_value = arg\n max_value = arg if key(arg) > key(max_value) else max_value\n return max_value", "def MostPopularKey(d, default):\n x = [(v, k) for (k, v) in d.iteritems()]\n if not x: return default\n x.sort()\n return x[-1][1]", "def find_max_key_val(value):\n most_sold = max(value, key=value.get)\n keys_val = value.get(most_sold)\n print(f\"We can see the highest count is for {most_sold},\")\n print(f\"with a total of {keys_val} sale(s).\")\n return keys_val", "def longest_value_key(incoming_dict: dict):\n longest_value = max(incoming_dict.values(), key=len)\n\n for key, value in incoming_dict.items():\n if longest_value == value:\n longest_key = key\n print(longest_key)", "def max_in_dict(dict_of_ints):\n list_of_vals =[]\n list_of_max_keys = []\n for i in dict_of_ints:\n list_of_vals.append(dict_of_ints[i])\n max_val= max(list_of_vals)\n for i in dict_of_ints:\n if dict_of_ints[i] == max_val:\n list_of_max_keys.append(i)\n if len(list_of_max_keys) == 1:\n return list_of_max_keys[0]\n else:\n return list_of_max_keys", "def max_move(d):\n v = list(d.values())\n k = list(d.keys())\n return k[v.index(max(v))]", "def test_perf_max():\n dict_time = timeit.timeit(\n \"max(keys_dict.keys())\",\n setup=\"keys_dict = {key: key for key in range(1000, -1000, -1)}\",\n number=1000\n )\n dict_sort_time = timeit.timeit(\n \"sorted(keys_dict.keys())[-1]\",\n setup=\"keys_dict = {key: key for key in range(1000, -1000, -1)}\",\n number=1000\n )\n tree_time = timeit.timeit(\n \"keys_tree.max()\",\n setup=\"from amp_trees import OrderedTreeDict;\"\n \"keys_tree = OrderedTreeDict((key, key) for key in range(1000, -1000, -1))\",\n number=1000\n )\n assert dict_time > tree_time, \"Max method is slow.\"\n assert dict_sort_time > tree_time, \"Max method is slow.\"", "def best_score(a_dictionary):\n for key in a_dictionary:\n if key is None:\n return None\n else:\n max_val = max(a_dictionary)\n return max_val", "def max_map(freq_map):\n\n max_val = max(freq_map.values())\n return max_val", "def longest_value_key(incoming_dict):\n #return_value = max(incoming_dict, key=len)\n #return return_value\n if not incoming_dict:\n return None\n\n all_keys = incoming_dict.keys()\n if not all_keys:\n return None\n\n Key_with_longest_value = None\n for key in all_keys:\n if not Key_with_longest_value:\n Key_with_longest_value = key\n\n if len(incoming_dict[key]) > len(incoming_dict[Key_with_longest_value]):\n Key_with_longest_value = key\n return Key_with_longest_value", "def test_perf_max(self):\n dict_time = timeit.timeit(\n \"max(keys_dict.keys())\",\n setup=\"from random import sample;\"\n \"keys_dict = {key: key for key in sample(range(-1000, 1000), 2000)}\",\n number=1000\n )\n dict_sort_time = timeit.timeit(\n \"sorted(keys_dict.keys())[-1]\",\n setup=\"from random import sample;\"\n \"keys_dict = {key: key for key in sample(range(-1000, 1000), 2000)}\",\n number=1000\n )\n tree_time = timeit.timeit(\n \"keys_tree.max()\",\n setup=\"from amp_trees import SplayDict;\"\n \"from random import sample;\"\n \"keys_tree = SplayDict((key, key) for key in sample(range(-1000, 1000), 2000))\",\n number=1000\n )\n self.assertGreater(dict_time, tree_time, \"Max method is slow.\")\n self.assertGreater(dict_sort_time, tree_time, \"Max method is slow.\")", "def find_max_value(self, dictionary):\n max_value = max(dictionary.items(), key=lambda x: x[1])\n list_of_max_values = []\n for k, v in dictionary.items():\n if v == max_value[1]:\n list_of_max_values.append(k)\n return list_of_max_values", "def longest_value_key(incoming_dict):\n if not incoming_dict:\n return None\n\n all_keys = incoming_dict.keys()\n if not all_keys:\n return None\n\n longest_key = None\n for key in all_keys:\n if not longest_key:\n longest_key = key\n\n if len(incoming_dict[key]) > len(incoming_dict[longest_key]):\n longest_key = key\n return longest_key", "def max(self, include_zero=False):\n for key, value in reversed(self.items()):\n if value > 0 or include_zero:\n return key", "def longest_value_key(incoming_dict):\n if not incoming_dict:\n return None\n\n all_keys = incoming_dict.keys()\n if not all_keys:\n return None\n\n\n longest_value=None\n for key in all_keys:\n if not longest_value:\n longest_value= key\n\n\n if len(incoming_dict[key])>len(incoming_dict[longest_value]):\n longest_value=key\n return longest_value", "def mle(self):\n\n\t\tmax_key, max_value = None, 0\n\t\tfor key, value in self.items():\n\t\t\tif value > max_value:\n\t\t\t\tmax_key, max_value = key, value\n\n\t\treturn max_key", "def max(self, key=lambda _: _):\n return max(self, key=key)", "def find_majority(dict_probs):\n # if there is no majority class, pick the first from the sorted\n max_val = max(dict_probs.values())\n max_keys = [key for key in dict_probs.keys()\n if dict_probs[key] == max_val]\n return sorted(max_keys)[0]", "def argmax(fn,over):\n return max([(arg,fn(arg)) for arg in over],key=lambda v: v[1])[0]", "def get_max_key(data):\n return max(map(len, data))", "def getMaxKey(self):\n print self.freq\n if self.freq:\n max_freq = max(self.freq.keys())\n return list(self.freq[max_freq])[0]\n\n return ''", "def arglexmax(keys, multi=False):\n # Handle keys in reverse order to be consistent with np.lexsort\n reverse_keys = keys[::-1]\n arr = reverse_keys[0]\n breakers = reverse_keys[1:]\n # Look for the maximum value in the first array, and continue using new\n # arrays until a unique maximum index is found.\n _cand_idxs = np.where(arr == arr.max())[0]\n if len(_cand_idxs) > 1:\n for breaker in breakers:\n vals = breaker[_cand_idxs]\n _cand_idxs = _cand_idxs[vals == vals.max()]\n if len(_cand_idxs) == 1:\n break\n # If multiple maximum values are found then either\n # return them all or return an arbitrary one.\n return _cand_idxs if multi else _cand_idxs[0]", "def data_dict_max(data_dict, feature):\n name = max(filter(lambda k: isinstance(data_dict[k][feature],\n (int, float)), data_dict), key=lambda k: data_dict[k][feature])\n\n return name, data_dict[name][feature]", "def maxKey(analyzer):\n return om.maxKey(analyzer['dateIndex'])", "def maxKey(analyzer):\n return om.maxKey(analyzer['dateIndex'])", "def max_word_value(words):\n return max(words, key=calc_word_value)", "def getMaxKey(self) -> str:\n if self.buckets.empty():\n return \"\"\n return iter(self.buckets.back().keys).__next__()", "def select_best_th(metrics_dict: Dict, metric: str):\n max_metric_ix = np.argmax(metrics_dict[metric])\n return metrics_dict['metrics_ths'][max_metric_ix]", "def best_score(my_dict):\n # Make sorted list\n if my_dict:\n return(sorted(my_dict)[-1])", "def extract_max_value(h: np.ndarray):\n return np.argmax(h, axis=1)", "def maxLike(self):\n return max(self.d.values())", "def giveMaxDict(self, dicts):\n if len(dicts) == 0:\n return {}\n\n elif len(dicts) == 1:\n return dicts[0]\n\n result = deepcopy(dicts[0])\n for i in range(1, len(dicts)):\n for k, v in dicts[i].items():\n if k in result:\n result[k] = max(result[k], dicts[i][k])\n else:\n result[k] = v\n return result", "def max_by(d: D, *, keyfunc: Callable[..., Hashable], **kwds: Any) -> NumDict:\n\n value = by(d, max, keyfunc, **kwds)\n _kwds = {\"keyfunc\": keyfunc}\n _kwds.update(kwds)\n record_call(max_by, value, (d,), _kwds)\n\n return value", "def keywithminval(d): \r\n v=list(d.values())\r\n k=list(d.keys())\r\n return k[v.index(min(v))]", "def argmax(sequence):\r\n\r\n import operator\r\n index, value = max(enumerate(sequence), key=operator.itemgetter(1))\r\n\r\n return index", "def mode(nums):\n dict_my = {num: nums.count(num) for num in nums}\n print(dict_my, 'my dict')\n max_value = max(dict_my.values())\n print(max_value, 'max value')\n # now we need to see at which index the highest value is at\n\n for (num, freq) in dict_my.items():\n if freq == max_value:\n print('found number with max freq', num)\n return num", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def min_max_keys(d):\n return (min(d.keys()), max(d.keys()))", "def find_max(list):\n return find_value_at(list, 0)", "def max():\n return KeeperOfMinOrMax(int.__lt__)", "def get_topper(my_map):\n current_max = 0\n topper = \"\"\n for key, value in my_map.iteritems():\n if value > current_max:\n current_max = value\n topper = key\n return topper, current_max", "def maxKeyTime(analyzer):\n return om.maxKey(analyzer['timeIndex'])", "def test_get_maximum():\n assert get_maximum({\"numbers\": [4, 3, 2, 1]}) == {\"maximum\": 4}", "def find_matrix_max(matrix):\n\n max_val = 0.0\n max_i = 0\n max_j = 0\n\n for i in matrix.keys():\n try:\n kvp = max(matrix[i].iteritems(), key=itemgetter(1))\n except ValueError:\n continue\n \n # Maybe I should store the max value with the array, and then always \n # store the previous largest, and when i insert or delete...\n \n if kvp[1] > max_val:\n max_val = kvp[1]\n max_i = i\n max_j = kvp[0]\n\n return (max_i, max_j, max_val)", "def getMaxKey(self):\n if self.last is None:\n return \"\"\n return self.last.first.key", "def detChar(char_dic):\n\t#get the key\n\tmax_char_key = max(char_dic, key=char_dic.get)\n\t#get the value\n\tmax_char_val = max(char_dic.values())\n\n\treturn (max_char_key, max_char_val)", "def findMax(img):\n\td = minMaxLoc(img)\n\treturn {\"maxVal\":d[\"maxVal\"], \"maxLoc\":d[\"maxLoc\"]}", "def getHighest(key, values, num):\n assert isinstance(key, list)\n assert isinstance(values, list)\n assert isinstance(num, int)\n key, values = getSorted(key, values)\n newKey = key[:num]\n newValue = values[:num]\n return newKey, newValue", "def allmax(iterable, key=None):\n result, maxcal = [], None\n key = key or (lambda x: x)\n for x in iterable:\n xval = key(x)\n if not result or xval > maxval:\n result, maxval = [x], xval\n elif xval == maxval:\n result.append(x)\n if len(result) == 1:\n result = result[0]\n return result", "def v9_multimax(iterable, key=None):\n if key is None:\n def key(item): return item\n max_key = None\n maximums = []\n for item in iterable:\n k = key(item)\n if k == max_key:\n maximums.append(item)\n elif not maximums or k > max_key:\n maximums = [item]\n max_key = k\n return maximums", "def v10_multimax(iterable, key=lambda x: x):\n max_key = None\n maximums = []\n for item in iterable:\n k = key(item)\n if k == max_key:\n maximums.append(item)\n elif not maximums or k > max_key:\n maximums = [item]\n max_key = k\n return maximums", "def greatest_product_one(self, key):\n return self.greatest_product(key)[0]", "def max_key(self):\n return self._price_list[-1]", "def testIfTrue(self):\n\t\ttestDict = {1:1, 2:2, 3:3}\n\t\tself.assertTrue(keyMax(testDict) == 3)", "def getMaxKey(self) -> str:\n if self.tail.prev.val == 0:\n return \"\"\n return next(iter(self.tail.prev.keys))", "def get_max(self):\n if self.root is None: # BC1\n return float('-inf')\n\n current = self.root\n while current.right is not None: # Traverse like a linked-list\n current = current.right\n\n return current.key", "def max_value(policy_lookup, state, player):\n\taction_values = list(get_policy_actions(policy_lookup, state, player).values())\n\tif action_values:\n\t\treturn np.max(action_values)\n\treturn 0", "def argmax(values):\n\tvalues = np.array(values)\n\tmx = np.max(values)\n\tval = np.where(values==mx)[0]\n\treturn np.random.choice(val)", "def get_max_index(a):\n return a.argmax()", "def get_max_index(a):\n return a.argmax()", "def get_max_index(a):\n return a.argmax()", "def argmax(self, evidence={}):\n if len(evidence)==0:\n return self.v.ind2sub(self.t.argmax())\n ax = tuple([ evidence[v] if v in evidence else slice(None) for v in self.v ])\n return self.v.ind2sub( self.t[ax].argmax() )", "def MAX(strArg, composList, atomDict):\n accum = []\n for atom, _ in composList:\n tStr = strArg.replace('DEADBEEF', atom)\n accum.append(eval(tStr))\n return max(accum)", "def max(self):\n try:\n val = iinfo._max_vals[self.key]\n except KeyError:\n if self.kind == 'u':\n val = int((1 << self.bits) - 1)\n else:\n val = int((1 << (self.bits-1)) - 1)\n iinfo._max_vals[self.key] = val\n return val", "def max(self):\n try:\n val = iinfo._max_vals[self.key]\n except KeyError:\n if self.kind == 'u':\n val = int((1 << self.bits) - 1)\n else:\n val = int((1 << (self.bits-1)) - 1)\n iinfo._max_vals[self.key] = val\n return val", "def _single_value_max(self, maps, threshold):\r\n max_vec = np.max(maps, axis=1)\r\n cmin = np.min(max_vec)\r\n cmax = np.max(max_vec)\r\n limit = cmax - (cmax - cmin) * threshold\r\n max_mask = max_vec > limit\r\n argmax = np.argmax(maps, axis=1)\r\n return (argmax + 1) * max_mask", "def max(scores):\n return __builtin__.max(scores) if len(scores) else 0", "def find_greatest_rotator(word_dict,n):\n rotate_best = None\n rotations = 0\n for word in word_dict:\n if rotate_pairs(word,word_dict,n) > rotations:\n rotate_best = word\n rotations = rotate_pairs(word,word_dict,n)\n return rotate_best, rotations", "def _get_maximum_from_heatmap(self, heatmap):\n assert heatmap.size(0) == 1 and heatmap.size(1) == 1\n max_map = torch.eq(heatmap, self.pool(heatmap)).float()\n heatmap = heatmap * max_map\n score = heatmap.view(-1)\n score, pos_idx = score.topk(self.max_num_people)\n mask = score > self.keypoint_threshold\n score = score[mask]\n pos_idx = pos_idx[mask]\n return pos_idx, score", "def compare_max(values, weights):\n return np.max(values.numpy())", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def get_min_depth(l_k):\n return max(l_k.values())", "def findMaximal(freqSet):", "def value(q, s):\n # Your code here\n return max(q.get(s,a) for a in q.actions)", "def longest_key(self):\n longest = None\n for key in self:\n if not longest or len(key) > len(longest):\n longest = key\n return longest", "def find_max(self):\r\n maxVal = self.items[1]\r\n if maxVal is None:\r\n return None\r\n \r\n for i in range(1,len(self.items)):\r\n if self.items[i] is not None:\r\n if self.items[i] > maxVal:\r\n maxVal = self.items[i]\r\n return maxVal", "def _find_majority(values):\n counter = Counter(values)\n return counter.most_common(1)[0][0]", "def max(self, fn=lambda x: x):\n return _(max(*self._, key=fn))", "def test_find_max_seat_id():\n data = [\n {\"seat_id\": 100},\n {\"seat_id\": 101},\n {\"seat_id\": 99},\n ]\n assert find_max_seat_id(data) == 101", "def getMaxKey(self) -> str:\n return \"\" if self.tail.prev == self.head else next(iter(self.tail.prev.keySet))" ]
[ "0.86044014", "0.85811913", "0.85383964", "0.8332157", "0.8297145", "0.82476956", "0.8214348", "0.8207442", "0.8156642", "0.81523865", "0.8088131", "0.8088131", "0.8088131", "0.80344087", "0.7471879", "0.7465998", "0.74641794", "0.74351037", "0.7350328", "0.73464257", "0.7231997", "0.71965677", "0.715888", "0.70937353", "0.70547324", "0.7039995", "0.6989997", "0.69186246", "0.68816125", "0.68668103", "0.6860922", "0.68475467", "0.6837474", "0.6772407", "0.6755284", "0.6547054", "0.64665645", "0.6424037", "0.6416567", "0.6402618", "0.6395157", "0.6365057", "0.6364572", "0.6364572", "0.632422", "0.6317832", "0.62839514", "0.62629926", "0.6254557", "0.6221654", "0.61517805", "0.61507624", "0.61291665", "0.6103958", "0.60709244", "0.60407746", "0.60407746", "0.60362136", "0.60340446", "0.6032607", "0.60230064", "0.6022434", "0.600628", "0.6005015", "0.59265625", "0.59106314", "0.5909415", "0.5903537", "0.5903403", "0.58938134", "0.58894527", "0.5889245", "0.58807504", "0.58744866", "0.58694327", "0.5859198", "0.5855674", "0.58505434", "0.58465207", "0.58465207", "0.58465207", "0.5811957", "0.5799254", "0.5792654", "0.5792654", "0.5785402", "0.5764425", "0.5761351", "0.57428133", "0.57363784", "0.5735189", "0.56984425", "0.5688193", "0.5663606", "0.56628716", "0.56625926", "0.56478447", "0.56317204", "0.562556", "0.5622466" ]
0.7673297
14
Given a distribution, given by the list p_list, returns the entropy of the distribution.
def entropy(p_list): assert len(p_list) > 0 E = 0.0 for p in p_list: if p == 0.0: continue E += p*math.log(p) return E
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entropy(l):\n\n probabilities = np.bincount(l) / len(l)\n with np.errstate(divide='ignore'): # ignore log(0) errors, we'll handle\n log_probabilities = np.log2(probabilities)\n log_probabilities[~np.isfinite(log_probabilities)] = 0\n return -np.sum(probabilities * log_probabilities)", "def entropyDistributed(distribution):\n return -sum(map(lambda p : p * log(p, 2), distribution))", "def entropy_py(p):\n return 2 ** np.sum(-p*np.log2(p+1e-10))", "def entropy(p):\n assert (p >= 0).all()\n assert abs(np.sum(p)-1) < 1e-6\n return -np.sum(p*np.log(p+1e-12))", "def calEntropy(vList):\n from collections import Counter\n counter = Counter(vList)\n entropy, N = 0, len(vList)\n for v in counter:\n p = counter[v] / N\n entropy += - p * np.log(p)\n return entropy", "def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))", "def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)", "def entropy(p: torch.Tensor):\n nz = (p > 0).to(p.device)\n\n eps = torch.finfo(p.dtype).eps\n p_stable = p.clone().clamp(min=eps, max=1 - eps)\n\n out = torch.where(\n nz,\n p_stable * torch.log(p_stable),\n torch.tensor(0.0, device=p.device, dtype=torch.float),\n )\n\n return -(out).sum(-1)", "def entropy_(P):\n res = 0.0\n\n mask = P != 0.0 # avoid 0 in log\n f = lambda x: x*np.log2(x)\n # map-reduce strategy (likely to be more optimized than loops)\n temp = list(map(f, P[mask]))\n res = -np.sum(temp, dtype=float)\n return res", "def entropy(data, idxList):\n df = data.loc[idxList]\n counts = df.value_counts().to_numpy()\n counts = counts.reshape(1, -1).astype(np.float32)\n counts /= np.sum(counts)\n log_sum = counts @ np.log2(counts.T)\n return -log_sum[0, 0]", "def entropy(p):\n ent = tf.where(p > np.finfo(np.float32).eps, -p * tf.log(p), tf.zeros_like(p))\n ent = tf.reduce_sum(ent, axis=1)\n return ent", "def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e", "def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res", "def entropy(strength=256, wordlist=wordlist):\n return os.urandom(strength // 8)", "def get_entropy(distribution, samples):\n entropy = -tf.reduce_sum(distribution.log_prob(samples), axis=1)\n return entropy", "def entropy_numba(p):\n return 2 ** np.sum(-p*np.log2(p+1e-10))", "def entropy(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n return np.nansum(np.multiply(P_nan, np.log2(1 / P_nan)))", "def entropy(self, policy_params):\n return self.head.entropy(policy_params)", "def entropy(group_counts):\n total = sum(group_counts)\n entro = 0\n for item_count in group_counts:\n entro += item_entropy(item_count, total)\n return entro", "def entropy(self, priors=None):\n def entropy_f(x):\n x[x != 0] *= np.log(x[x != 0])\n return -x.sum(axis=0)\n return self.utility(entropy_f, priors)", "def getEntropy(self, pVal, nVal):\n totVal = pVal + nVal\n if pVal == 0 or nVal == 0:\n return 0\n\n pProb = pVal/totVal\n nProb = 1 - pProb\n entropy = - (pProb * math.log(pProb, 2) + nProb * math.log(nProb, 2))\n return entropy", "def entropy_function(c, n):\n return -(c*1.0/n)*math.log(c*1.0/n,2)", "def entropy(self, args):\n mean, stddev = args\n dist = tfp.distributions.Normal(loc=mean, scale=stddev)\n entropy = dist.entropy()\n return entropy", "def calculate_entropy(prob):\n return -(prob * math.log(prob,2))", "def __compute_entropy_probability(probability:np.ndarray) -> float:\n entropy = -np.sum(probability * np.log2(probability))\n return entropy", "def entropy(counts):\n assert (counts >= 0).all()\n probs = counts / counts.sum()\n probs = probs[probs > 0] # Avoid log(0)\n return - np.sum(probs * np.log2(probs))", "def entropy(message):\n n = len(message)\n message = letter_freq(message)\n h = 0\n for n_i in message.values():\n p_i = n_i/n\n h += -p_i*(log2(p_i))\n return h", "def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))", "def entropy(d, total, word_count):\n\t# Entropie je - Sum_morf p(morf) * log_2 p(morf)\n\t# p(morf) = c(morf) / c(all)\n\te = 0\n\tfor count in d.values():\n\t\tp = count/total\n\t\ttype_e = - p * log2(p)\n\t\te += type_e * count\n\treturn e / word_count", "def entropy(class_probabilities):\n return sum(-p * math.log(p,2)\n for p in class_probabilities\n if p)", "def entropy(class_probabilities):\n return sum(-p * math.log(p, 2)\n for p in class_probabilities\n if p) #ignore 0's", "def _graph_fn_entropy(distribution):\n return distribution.entropy()", "def entropy(a):\n a = a.upper()\n\n freq = collections.defaultdict(int) # int() is the default constructor for non existent item, and returns 0\n for c in a:\n freq[c] = freq[c] + 1\n\n e = 0.0\n for f in freq.values():\n if f:\n p = f / len(a)\n e += p * math.log(p)\n\n return -e", "def entropyRandom(stream):\n prob = 1.0 / len(stream)\n return -(prob * log(prob, 2)) * len(stream)", "def entropy(distribution, unit=2):\n frequencies = distribution.frequencies(normalised=True)\n # check to see if it is a deterministic case (all but one are zero)\n zeros_size = frequencies[frequencies == 0].size\n if zeros_size + 1 == frequencies.size:\n return 0\n else:\n return np.sum(-frequencies * np.log2(frequencies) / np.log2(unit))", "def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])", "def entropy( freq ):\n N = 0.0\n entropy = 0.0\n for x, v in freq.items( ):\n N += v\n entropy -= v * math.log( v, 2 )\n return (N * math.log( N, 2 ) + entropy) / N", "def entropy(message):\n message = letter_freq(message)\n n = sum(message.values())\n h = 0\n for n_i in message.values():\n p_i = n_i / n\n h += -p_i * log2(p_i)\n return h", "def item_entropy(item_count, total_count):\n # Two cases where the entropy is 0\n if item_count == total_count or item_count == 0:\n return 0\n \n item_prob = 1.0 * item_count / total_count\n return -item_prob * math.log(item_prob)", "def entropy(data):\n strings, lens = Counter(data), np.float(len(data))\n return -sum(count / lens * np.log2(count / lens) for count in strings.values())", "def entropy(self, f):\n f_log = -torch.log(self.einsum(\"q,q->q\", [f, 1 / self.w]))\n return self.einsum(\"q,q->\", [f, f_log])", "def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n H = 0.0\n for x in np.nditer(self.t, op_flags=['readonly']):\n p = x/Z\n H += 0.0 if p==0 else -p*np.log(p)\n return H", "def calc_entropy(data_set): #calculates total entropy of the dataset\r\n republicans = 0\r\n democrats = 0\r\n total = 0\r\n for data_point in data_set:\r\n party = data_point.dat_party\r\n if party == \"R\":\r\n republicans+=1\r\n elif party == \"D\":\r\n democrats+=1\r\n total+=1\r\n\r\n if total == 0: return 0\r\n prob_dem = democrats/total\r\n prob_rep = republicans/total\r\n if prob_dem == 0: return -(prob_rep * math.log(prob_rep, 2))\r\n if prob_rep == 0: return -(prob_dem * math.log(prob_dem, 2))\r\n\r\n entropy = (-prob_dem * math.log(prob_dem, 2)) -(prob_rep * math.log(prob_rep, 2))\r\n return entropy", "def _entropy(P):\n\n #TODO remove the \"+ 1e-20\" inside the log2 computation\n # it's just a hack to avoid to compute log2(0)\n ent = -1.0 * np.sum(P * np.log2(P+1e-20), axis=0)\n return ent", "def entropy(s):\n p, lns = Counter(s), float(len(s))\n return -sum( count/lns * math.log(count/lns, 2) for count in p.values())", "def shannon_entropy(counts):\n freq = np.array(counts) * 1.0 / np.sum(counts)\n return -np.sum([f * np.log2(f) for f in freq if f != 0])", "def kl_to_uniform(p):\n n = p.shape[0]\n p = p/p.sum()\n return np.log2(n) - entropy(p)", "def entropy(y):\n return -1 * sum(\n [\n pipe(np.sum(y == value) / len(y), lambda ratio: ratio * np.log(ratio))\n for value in set(y)\n ]\n )", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def get_entropy(*labels):\n entropies = [] #list of entropy values from each subset\n total = 0 #total number of datapoints\n for subset in labels:\n n = len(subset)\n total += n\n counts = np.unique(subset, return_counts=True)[1] #frequency of unique values\n entropy = np.sum([-(i/n) * np.log2(i/n) for i in counts]) #subset entropy calcuation\n entropies.append((entropy, n))\n return np.sum([(n/total) * ent for n, ent in iter(entropies)])", "def entropy(self, params):\n log_std = params[:, :, 1]\n return (log_std + 0.5 * (self.LOG2PI + 1)).sum(dim=-1)", "def entropy(self, base: int = None):\n\n # shannon entropy in nats\n fdist_ = self.fdist\n fdist_[\"prob\"] = fdist_[\"freq\"] / fdist_[\"freq\"].sum()\n fdist_[\"logp\"] = np.log(fdist_[\"prob\"])\n fdist_[\"nats\"] = -fdist_[\"prob\"] * fdist_[\"logp\"]\n entropy_ = fdist_[\"nats\"].sum()\n\n # convert base\n if base:\n entropy_ = entropy_ / np.log(base)\n\n # return\n return entropy_", "def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')", "def _entropy(self):\n return self.rv.entropy(*self._pymc_dists_to_value(self.args), **self.kwds)", "def shannon_entropy(probs):\n return -(\n math.sum([px * math.log2(px) if px != 0 and not (np.isclose(px, 0)) else 0 for px in probs])\n )", "def entropy(y):\n p = _proba(y)\n return (-p * np.log2(p)).sum()", "def image_entropy(img):\r\n hist = img.histogram()\r\n hist_size = sum(hist)\r\n hist = [float(h) / hist_size for h in hist]\r\n\r\n return -sum([p * math.log(p, 2) for p in hist if p != 0])", "def shannon_entropy(c):\n\n c_normalized = c / float(np.sum(c))\n c_normalized_nonzero = c_normalized[np.nonzero(c_normalized)] # gives 1D array\n entropy = -sum(c_normalized_nonzero * np.log2(c_normalized_nonzero)) # unit in bits\n return entropy", "def GetEntropy(flanks):\n countA = 0\n countT = 0\n countG = 0\n countC = 0\n for i in flanks:\n if i == \"A\":\n countA += 1\n elif i == \"T\":\n countT += 1\n elif i == \"C\":\n countC += 1\n elif i == \"G\":\n countG += 1\n else: pass\n total = countA+countT+countG+countC\n fractions = [item*1.0/total for item in [countA,countT,countG,countC]]\n entropy = sum([-1.0*item*math.log(item,2) for item in fractions if item != 0])\n return entropy", "def shannon_entropy(ps: np.ndarray, base: int = 2) -> float:\n\n return -np.sum(ps * np.log(ps) / np.log(base))", "def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent", "def entropy(*args):\n\n\n values = []\n leaf = -1\n\n for i, val in enumerate(args):\n if(val != 0):\n values.append(val * math.log(val, len(args)))\n if(val == 1):\n leaf = i\n \n return -sum(values), leaf", "def entropy(self) -> float:\n probabilities = np.array([len(players) for players in self.answers.values()])\n probabilities = probabilities / sum(probabilities)\n return sc.stats.entropy(probabilities)", "def entropy(self):\n raise NotImplementedError", "def entropy(self, logits):\n probs = torch.exp(logits)\n entropy = - torch.sum(probs * logits, dim=-1)\n return entropy", "def entropy(temp,pres):\n g_t = liq_g(1,0,temp,pres)\n s = -g_t\n return s", "def _Apply(self, c, p):\n entropy = ((1.0 - c) * tf.log(1.0 - p) + c * tf.log(p)) / (-math.log(2))\n entropy = tf.reduce_mean(entropy)\n return entropy", "def entropy(x, bins, normalize=False, xy_probabilities=False):\n # calculate probabilities if xy_probabilities == False\n if xy_probabilities:\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x),1,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n \n # add a small number to all probabilities if zero occurs\n if x.any(0):\n p = x + 1e-15\n else:\n p = x\n else:\n # get the bins\n bins = np.histogram_bin_edges(x, bins)\n\n # calculate the empirical probabilities\n count = np.histogram(x, bins=bins)[0]\n\n # if counts should be None, raise an error\n if np.sum(count) == 0:\n raise ValueError('The histogram cannot be empty. Adjust the bins to ' +\n 'fit the data')\n # calculate the probabilities\n p = (count / np.sum(count)) + 1e-15\n\n\n # calculate the Shannon Entropy\n if normalize:\n # get number of bins\n nbins = len(p)\n # maximal entropy: uniform distribution\n normalizer = np.log2(nbins) \n\n return - p.dot(np.log2(p)) / normalizer\n else:\n return - p.dot(np.log2(p))", "def get_entropy(dictionary):\n my_sum = 0\n weighted_sum_of_logs = 0\n for freq in dictionary.values():\n if freq:\n my_sum += freq\n weighted_sum_of_logs += freq * math.log(freq)\n return math.log(my_sum) - weighted_sum_of_logs / my_sum", "def calculate_entropy(dataset) :\n\n num_entries = len(dataset)\n label_counts = {}\n for vector in dataset :\n # the label is at the last index of the data set\n current_label = vector[-1]\n if current_label not in label_counts :\n label_counts[current_label] = 0\n label_counts[current_label] += 1\n # Calculate the entropy\n entropy = 0.0\n for label in label_counts :\n # Calculate probability of each label within the dataset\n prob_of_label = label_counts[label]/num_entries\n # Since the entropy is the negative of the sum of all probability,\n # simply subtract it\n entropy -= prob_of_label * log(prob_of_label, 2)\n return entropy", "def calc_entropy(data_set):\n size = len(data_set)\n label_counts = {}\n for feat_vector in data_set:\n label = feat_vector[-1]\n label_counts.setdefault(label, 0)\n label_counts[label] += 1\n\n entropy = 0.0\n for key, count in label_counts.iteritems():\n prob = float(count) / size\n entropy -= prob * log(prob, 2)\n\n return entropy", "def entropy(self):\n return -np.sum(self.log_likelihoods * np.exp(self.log_likelihoods))", "def entropy(self, y):\n n = y.size\n if n <= 1:\n return 0\n\n labels, counts = unique(y, return_counts=True)\n\n if counts.size <= 1:\n return 0\n\n probs = counts / n\n entropy = -sum([p * log(p, 2) for p in probs])\n return entropy", "def logit_entropy(logits):\n # We support either:\n # - 1D list of logits\n # - A 2D list, batch size x logits\n assert len(logits.shape) <= 2\n # keepdims=True is necessary so that we get a result which is\n # batch size x 1 instead of just batch size\n logp = logits - tf.reduce_logsumexp(logits, axis=-1, keepdims=True)\n nlogp = -logp\n probs = tf.nn.softmax(logits, axis=-1)\n nplogp = probs * nlogp\n # This reduce_sum is just the final part of the entropy calculation.\n # Don't worry - we do return the entropy for each item in the batch.\n return tf.reduce_sum(nplogp, axis=-1, keepdims=True)", "def entropy(y):\n total = y.size\n value_counts = np.bincount(y).astype(\"float\")\n proportions = value_counts / y.size\n\n return sum(-i * np.log(i) for i in proportions if i)", "def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))", "def _graph_fn_get_distribution_entropies(self, key, parameters):\n return self.distributions[key].entropy(parameters)", "def partition_entropy(subsets):\n total_count = sum(len(subset) for subset in subsets)\n return sum(data_entropy(subset) * len(subset) / total_count\n for subset in subsets)", "def compute_policy_entropy(available_actions, policy, actions):\n _,arg_ids = actions\n\n fn_pi, arg_pis = policy\n fn_pi = mask_unavailable_actions(available_actions, fn_pi)\n entropy = tf.reduce_mean(compute_entropy(fn_pi))\n tf.summary.scalar('entropy/fn', entropy)\n\n for arg_type in arg_ids.keys():\n arg_id = arg_ids[arg_type]\n arg_pi = arg_pis[arg_type]\n batch_mask = tf.to_float(tf.not_equal(arg_id, -1))\n arg_entropy = safe_div(\n tf.reduce_sum(compute_entropy(arg_pi) * batch_mask),\n tf.reduce_sum(batch_mask))\n entropy += arg_entropy\n tf.summary.scalar('used/arg/%s' % arg_type.name,\n tf.reduce_mean(batch_mask))\n tf.summary.scalar('entropy/arg/%s' % arg_type.name, arg_entropy)\n\n return entropy", "def entropy(out, dim=1, reduce='mean'):\n log_prob = F.log_softmax(out, dim=dim)\n h = -torch.sum(log_prob.exp() * log_prob, dim=dim)\n if reduce == 'none':\n return h\n if reduce == 'mean':\n return h.mean()\n if reduce == 'sum':\n return h.sum()", "def spatial_entropy(map_):\n map_ = map_ / np.sum(map_)\n return -1 * np.sum(map_ * np.log(map_))", "def entropy(self):\n return self._entropy_func", "def entropy(self, text):\n\n# text = self.myReplacer.replace(text)\n# text = self.tokenizer.tokenize(text)\n new_text = []\n for word in text:\n if word.count('\\'') > 0:\n words = word.split('\\'')\n for w in words:\n new_text.append(w)\n else:\n new_text.append(word)\n text = new_text\n \n e = 0.0\n lenth = len(text)\n if lenth == 0:\n return 0\n elif lenth < self._n:\n current_n = lenth\n else:\n current_n = self._n\n \n for i in range(current_n - 1, len(text)):\n context = tuple(text[(i - current_n + 1) : i])\n token = text[i]\n e += self.logprob(token, context)\n return e", "def entropy(data):\n n, m = np.shape(data)\n data = np.tanh(data)\n data = data / np.sum(data, axis=0)\n a = data * 1.0\n a[np.where(data == 0)] = 0.000001\n\n e = (-1.0 / np.log(n)) * np.sum(data * np.log(a), axis=0)\n w = (1 - e) / np.sum(1 - e)\n return w", "def prob_2_entropy(prob):\r\n n, c, h, w = prob.size()\r\n return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)", "def partition_entropy(subsets):\n\n total_count = sum(len(subset) for subset in subsets)\n\n return sum(data_entropy(subset) * len(subset) / total_count for subset in subsets)", "def computeEntropy(self, img):\n hist, bins = np.histogram(img.ravel(), bins=256, density=True)\n return scipy.stats.entropy(hist)", "def entropy(self):\n ent = 0.0\n for f in self.byte_freq:\n if f > 0:\n freq = float(f) / self.byte_total\n ent = ent + freq * math.log(freq, 2)\n return -ent", "def entropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n logits = args.attribution_model.output2logits(args.forward_output)\n out = torch.distributions.Categorical(logits=logits).entropy()\n if out.ndim > 1:\n out = out.squeeze(-1)\n return out", "def entropy(self):\n return self._normal.entropy()", "def entropy(string):\n p, lns = Counter(string), float(len(string))\n return -sum(count/lns * math.log(count/lns, 2) for count in p.values())", "def entropy ( target_array ):\n return -1 * sum (\n [\n pipe ( np.sum ( target_array == value ) / len ( target_array ), lambda ratio: ratio * np.log ( ratio ) )\n for value in set ( target_array )\n ]\n ) # End entropy()", "def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en", "def entropy(n_bits):\n return n_bits and random.getrandbits(n_bits)", "def entropy(x):\n x_max, x_min = x.max(), x.min()\n assert (x_min >= 0) and (x_max <= 1)\n if x_min == x_max == 0:\n return np.float32(0.)\n # Take only non-zero values as log(0) = 0 :\n nnz_x = x[np.nonzero(x)]\n entr = -np.sum(nnz_x * np.log2(nnz_x))\n\n return entr", "def estimate_entropy(pwlen):\n return pwlen * math.log(len(frozenset(default_charlist)), 2)", "def div(self):\n freqList = [i / sum(self.has.values()) for i in self.has.values()]\n entropies = [i * math.log(i, 2) for i in freqList]\n entropy = -sum(entropies)\n return entropy", "def calc_entropy(frequency_wn, temperature):\r\n\tentropy = 0\r\n\tfrequency = [entry * SPEED_OF_LIGHT for entry in frequency_wn]\r\n\tfor entry in frequency:\r\n\t\tfactor = ((PLANCK_CONSTANT*entry)/(BOLTZMANN_CONSTANT*temperature))\r\n\t\ttemp = factor*(1/(math.exp(factor)-1)) - math.log(1-math.exp(-factor))\r\n\t\ttemp = temp*GAS_CONSTANT/4.184\r\n\t\tentropy = entropy + temp \r\n\treturn entropy", "def chl_entropy(y, base=2):\n p,bins = histogram(y, bins=unique(y)) # don't use 'Normed' feature, since that includes the bin-width!\n p = p[p!=0]/float(len(y))\n S = -1.0*sum(p*log(p))/log(base)\n return S", "def find_entropy(examples_set):\n pk = find_probabilities_of_labels(examples_set)\n result = 0\n for p in pk:\n if p != 0:\n result = result + (p * math.log(p, 2.0))\n\n result = -result\n return result" ]
[ "0.75446194", "0.73901826", "0.7331276", "0.7134225", "0.70520824", "0.7050444", "0.69640076", "0.6912087", "0.68127155", "0.68039405", "0.6800982", "0.6751854", "0.6749378", "0.6710503", "0.6699265", "0.6682273", "0.6634976", "0.6633226", "0.6585759", "0.6569177", "0.6564183", "0.6562543", "0.6555286", "0.6552651", "0.654478", "0.651475", "0.6499428", "0.6495497", "0.6492644", "0.64897996", "0.64732873", "0.6434289", "0.6431482", "0.6430887", "0.64296156", "0.63902223", "0.6379004", "0.6365881", "0.63217443", "0.6308523", "0.63025206", "0.63019437", "0.6288875", "0.6280275", "0.6273117", "0.6260369", "0.6242097", "0.62306285", "0.62289006", "0.62288934", "0.62276083", "0.6200573", "0.6200018", "0.61873883", "0.6177434", "0.61353076", "0.61284566", "0.6127936", "0.61259586", "0.61114275", "0.6097093", "0.6087518", "0.6076246", "0.6075307", "0.6068641", "0.60517436", "0.60482305", "0.60137075", "0.60109776", "0.60052663", "0.60035086", "0.6001204", "0.5981015", "0.5979964", "0.59573245", "0.59535563", "0.59428245", "0.59264517", "0.5925078", "0.59149855", "0.5908112", "0.5906954", "0.5894148", "0.5886756", "0.58729845", "0.587244", "0.587173", "0.5863092", "0.58629674", "0.5861801", "0.58589035", "0.5858337", "0.58552647", "0.58526516", "0.58519703", "0.5849819", "0.5836759", "0.58353245", "0.5830277", "0.5812063" ]
0.83441865
0
For a list of dictionaries mapping values to counts, returns a cost used for DT splitting that is optimal at 0. Currently uses the negative of information gain.
def split_cost(label_count_list): return -split_information_gain(label_count_list) #this cost value is the misclassification error. return split_misclassification_error(label_count_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cost(foods, foods_used):\n cost = 0.00\n for i, count in foods_used.items():\n cost += (foods[i]['serving_cost'] * count)\n return cost", "def weighted_score(counters, lst, weight):\n if counters == None:\n counters = {}\n\n\n for item in lst:\n if item in counters:\n counters[item] += weight\n else:\n counters[item] = weight\n\n return counters", "def cost_fun(self, specs_dict: Dict[str, float]) -> float:\n cost = 0\n for spec in self.spec_range.keys():\n penalty = self.compute_penalty(specs_dict[spec], spec)[0]\n cost += penalty\n\n return cost", "def greedy(items_list, max_cost, key_function):\n tmp_list = sorted(items_list, key=key_function, reverse=True)\n cur_cost = 0\n cur_value = 0\n result = []\n\n for item in tmp_list:\n if cur_cost + item.getCost() <= max_cost:\n result.append(item)\n cur_cost += item.getCost()\n cur_value += item.getValue()\n return result, cur_value", "def greedy(items, maxCost, keyFunction):\n result = []\n itemsCopy = sorted(items, key=keyFunction, reverse=True)\n totalValue , totalCalories = 0.0, 0.0\n for i in range(len(itemsCopy)):\n item = itemsCopy[i]\n if (totalCalories + item.getCalories()) <= maxCost:\n result.append(item)\n totalCalories += item.getCalories()\n totalValue += item.getValue()\n return result, totalValue", "def cost(self) -> float:", "def score(priority_list, totalItemCount, itemUsageDict, threshold):\n scored = list()\n for item in priority_list:\n scored.append((item, itemUsageDict[item][\"winRatio\"] * (itemUsageDict[item][\"totalCount\"]/ totalItemCount) * threshold))\n return scored", "def getCost(dat, rateBlocks, key=\"Lintel\"):\n\n x = dat[:]\n\n if key == \"Lintel\":\n edges = [s*100 for s in [5, 6, 7, 10,\n 11, 12, 15, 16, 17, 20, 21, 22, 25]]\n else:\n edges = [s*100 for s in [10, 11, 12, 15, 16, 17, 20,\n 21, 22, 25, 26, 27, 30, 31, 32, 35, 36, 37, 40]]\n for i in edges:\n if i >= x[2]:\n x[2] = i\n break\n\n vol = x[0]*600*x[2]/float(1000000000)\n return vol*rateBlocks # *x[3]", "def cost_func(plist):\n\t\tgamma, alpha = plist\n\t\tk = ac.Moffat2DKernel(gamma, alpha, x_size=nx, y_size=ny)\n\n\t\tarr_out_predict = ac.convolve(arr_in, k)\n\n\t\tarr_out_fit, arr_out_predict_fit = match_dimension(arr_out, arr_out_predict)\n\t\tdiff = (arr_out_fit - arr_out_predict_fit)*scale_factor\n\n\t\treturn np.sum(diff**2)/diff.size", "def segmentDict(dict, weights):\n # Normalize weights\n weights = normalize(weights)\n\n segments = {}\n actual_weights = []\n total_instances = 0\n percent_instances = 0\n i = 0\n cat = None\n\n for k,v in dict.items():\n total_instances += v\n if cat == None:\n cat = k[0].upper()\n\n sorted_d = sorted(dict.items(), key=operator.itemgetter(1), reverse=True)\n for k,v in sorted_d:\n percent_instances += v/total_instances\n segments[k] = cat + str(i)\n if percent_instances >= weights[i]:\n actual_weights += [percent_instances]\n percent_instances = 0\n i += 1\n actual_weights += [percent_instances]\n return [segments, actual_weights]", "def best_split(values,labels,nonelabels=None):\n assert len(values) >= 2\n assert len(values) == len(labels)\n N = len(values)\n ilist = sorted((v,l) for (v,l) in zip(values,labels))\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for v,l in ilist:\n rightcount[l] += 1\n bestindex = -1\n bestcost = split_cost([leftcount,rightcount])\n\n cost = bestcost\n #costs = [cost]\n #print \"Split costs:\"\n for i in xrange(len(ilist)):\n v,l = ilist[i]\n rightcount[l] -= 1\n leftcount[l] += 1\n if i+1 >= len(ilist) or v == ilist[i+1][0]:\n #no splits when v is equal to the next value\n continue\n cost = split_cost([leftcount,rightcount])\n #print \" \",v,leftcount.values(),rightcount.values(),cost\n #costs.append(cost)\n if cost < bestcost:\n bestcost = cost\n bestindex = i\n #raw_input()\n if bestindex < 0:\n #no split found... try splitting in half\n splitval = (ilist[0][0]+ilist[-1][0])*0.5\n else:\n splitval = (ilist[bestindex][0] + ilist[bestindex+1][0])*0.5\n if nonelabels is None:\n return (splitval,bestcost)\n #reevaluate counts\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for l in nonelabels:\n leftcount[l] += 1\n rightcount[l] += 1\n for v,l in ilist:\n if v <= splitval:\n leftcount[l] += 1\n else:\n rightcount[l] += 1\n return splitval,split_cost([leftcount,rightcount])", "def _greedy_packing(items: List[Item], cap: int,\n func: Callable) -> Tuple[Set[int], int]:\n items.sort(key=func)\n included = set()\n total_val, total_weight = 0, 0\n for item in items:\n if total_weight + item.weight > cap:\n continue\n included.add(item.idx)\n total_val += item.val\n total_weight += item.weight\n return included, total_val\n # Running time complexity: O(nlog n)", "def compute_tf(doc_info, freq_dict_all):\n tf_scores = []\n\n for temp_dict in freq_dict_all:\n id = temp_dict['doc_id']\n\n for k in temp_dict['freq_dict']:\n temp = {\n 'doc_id': id,\n 'TF_Score': temp_dict['freq_dict'][k] / doc_info[id - 1]['doc_length'],\n 'key': k\n }\n\n tf_scores.append(temp)\n\n return tf_scores", "def compute_cost_clarans(data, _cur_choice):\n # modified from that of CLARA\n total_cost = 0.0\n medoids = {}\n for idx in _cur_choice:\n medoids[idx] = []\n\n for i in list(data.index):\n choice = -1\n min_cost = np.inf\n for m in medoids:\n # fast_euclidean from CLARA\n tmp = np.linalg.norm(data.loc[m] - data.loc[i])\n if tmp < min_cost:\n choice = m\n min_cost = tmp\n\n medoids[choice].append(i)\n total_cost += min_cost\n # print(\"total_cost: \", total_cost)\n return total_cost, medoids", "def get_duty_cate_score(chosen_duty_list: list) -> pmag.MagicDict:\n res = pmag.MagicDict()\n for w, cate in chosen_duty_list:\n freq = MODEL[cate]['duty'][w]['freq']\n prob = MODEL[cate]['duty'][w]['prob']\n score = prob # freq * prob / DUTY_NF[cate]\n if cate in res:\n res[cate] += score\n else:\n res[cate] = score\n return res", "def sim_average_cost(self, dictionary):\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Running: sim_average_cost\"\n\t\tf = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_estimated_costs.csv', 'a')\n\n\t\tfor p in toggles.CHOSEN_PREDS:\n\t\t\tpred_cost = 0.0\n\t\t\tpred = Predicate.objects.all().get(pk=p+1)\n\t\t\tf.write(pred.question.question_text + '\\n')\n\n\t\t\t#iterate through to find each ip cost\n\t\t\tfor ip in IP_Pair.objects.filter(predicate=pred):\n\t\t\t\titem_cost = 0.0\n\t\t\t\t# sample toggles.COST_SAMPLES times\n\t\t\t\tfor x in range(toggles.COST_SAMPLES):\n\t\t\t\t\t# running one sampling\n\t\t\t\t\twhile ip.status_votes < toggles.NUM_CERTAIN_VOTES:\n\t\t\t\t\t\t# get the vote\n\t\t\t\t\t\tvalue = choice(dictionary[ip])\n\t\t\t\t\t\tif value == True:\n\t\t\t\t\t\t\tip.value += 1\n\t\t\t\t\t\t\tip.num_yes += 1\n\t\t\t\t\t\telif value == False:\n\t\t\t\t\t\t\tip.value -= 1\n\t\t\t\t\t\t\tip.num_no +=1\n\n\t\t\t\t\t\tip.status_votes += 1\n\n\t\t\t\t\t\t# check if ip is done\n\t\t\t\t\t\tif ip.status_votes == toggles.NUM_CERTAIN_VOTES:\n\t\t\t\t\t\t\t\tif ip.value > 0:\n\t\t\t\t\t\t\t\t\tuncertaintyLevel = btdtr(ip.num_yes+1, ip.num_no+1, toggles.DECISION_THRESHOLD)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tuncertaintyLevel = btdtr(ip.num_no+1, ip.num_yes+1, toggles.DECISION_THRESHOLD)\n\t\t\t\t\t\t\t\tif uncertaintyLevel < toggles.UNCERTAINTY_THRESHOLD:\n\t\t\t\t\t\t\t\t\titem_cost += (ip.num_yes + ip.num_no)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tip.status_votes -= 2\n\n\t\t\t\t\t# reset values\n\t\t\t\t\tip.value = 0\n\t\t\t\t\tip.num_yes = 0\n\t\t\t\t\tip.num_no = 0\n\t\t\t\t\tip.status_votes = 0\n\n\t\t\t\titem_cost = item_cost/float(toggles.COST_SAMPLES)\n\t\t\t\tpred_cost += item_cost\n\t\t\t\tf.write(ip.item.name + ': ' + str(item_cost) + \" \")\n\n\t\t\tpred_cost = float(pred_cost)/IP_Pair.objects.filter(predicate=pred).count()\n\t\t\tf.write('\\npredicate average cost: ' + str(pred_cost) + '\\n \\n')\n\t\tf.close()\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH + toggles.RUN_NAME + '_estimated_costs.csv'", "def set_class_distribution(ub_dict, percentage_dict, name):\n tot_percent = 0\n for x in percentage_dict:\n tot_percent += percentage_dict[x]\n label_ctr_dict = defaultdict(int)\n for x in ub_dict['labels']:\n label_ctr_dict[x] += 1\n \n if abs(tot_percent - 1) > 1e-15:\n sys.exit(\"Total percentages != 1\")\n if len(ub_dict['meta_data'][0]) != len(percentage_dict):\n sys.exit(\"Mismatch between expected and given number of classes\")\n if set(ub_dict['meta_data'][0]) != set(percentage_dict):\n sys.exit(\"Mismatch between classes given and those expected\")\n\n batch_size = int(min([label_ctr_dict[x]/percentage_dict[x] for x in percentage_dict]))\n class_trgt_distrib = {x: int(batch_size*percentage_dict[x]) for x in percentage_dict}\n class_actual_distrib = {x: 0 for x in percentage_dict}\n tot_trgts = sum([class_trgt_distrib[x] for x in class_trgt_distrib])\n if tot_trgts < batch_size:\n key, val = min(class_trgt_distrib.iteritems(), key=operator.itemgetter(1))\n class_trgt_distrib[key] += (batch_size - tot_trgts)\n\n tot_rows = batch_size\n\n bal_dict = dict()\n bal_data = np.zeros((tot_rows, 3072), dtype=ub_dict['data'].dtype)\n bal_labels = [0] * tot_rows\n bal_filenames = [\"\"] * tot_rows\n\n bal_ctr = 0\n for idx in range(len(ub_dict['labels'])):\n curr_label = ub_dict['labels'][idx]\n if class_actual_distrib[curr_label] < class_trgt_distrib[curr_label]:\n bal_data[bal_ctr, :] = ub_dict['data'][idx, :]\n bal_labels[bal_ctr] = ub_dict['labels'][idx]\n bal_filenames[bal_ctr] = ub_dict['filenames'][idx]\n \n bal_ctr += 1\n class_actual_distrib[curr_label] += 1\n\n bal_dict['data'] = bal_data\n bal_dict['labels'] = bal_labels\n bal_dict['filenames'] = bal_filenames\n bal_dict['name'] = name\n bal_dict['src_meta_data'] = ub_dict['meta_data']\n\n return bal_dict", "def calc_batter_z_score(batter_list, players_over_zero_dollars, one_dollar_players,\n dollar_per_fvaaz, player_pool_multiplier, add_original_value=False):\n player_pool = int(players_over_zero_dollars * player_pool_multiplier)\n # Standard Calculations\n run_list = []\n hr_list = []\n rbi_list = []\n sb_list = []\n ops_list = []\n avg_list = []\n # weighted_batter_list = []\n batter_dict_list = []\n if not isinstance(batter_list[0], dict):\n for batter in batter_list:\n b = model_to_dict(batter)\n batter_dict_list.append(b)\n else:\n batter_dict_list = batter_list\n for batter in batter_dict_list:\n if add_original_value:\n batter['original_value'] = batter['dollarValue']\n\n run_list.append(batter['r'])\n hr_list.append(batter['hr'])\n rbi_list.append(batter['rbi'])\n sb_list.append(batter['sb'])\n ops_list.append(batter['ops'])\n avg_list.append(batter['avg'])\n run_list_nlargest = heapq.nlargest(player_pool, run_list)\n hr_list_nlargest = heapq.nlargest(player_pool, hr_list)\n rbi_list_nlargest = heapq.nlargest(player_pool, rbi_list)\n sb_list_nlargest = heapq.nlargest(player_pool, sb_list)\n ops_list_nlargest = heapq.nlargest(player_pool, ops_list)\n avg_list_nlargest = heapq.nlargest(player_pool, avg_list)\n # Average Calculation\n r_avg = avg_calc(run_list_nlargest)\n hr_avg = avg_calc(hr_list_nlargest)\n rbi_avg = avg_calc(rbi_list_nlargest)\n sb_avg = avg_calc(sb_list_nlargest)\n ops_avg = avg_calc(ops_list_nlargest)\n avg_avg = avg_calc(avg_list_nlargest)\n # Standard Deviation Calculation\n r_std_dev = std_dev_calc(run_list_nlargest, r_avg)\n hr_std_dev = std_dev_calc(hr_list_nlargest, hr_avg)\n rbi_std_dev = std_dev_calc(rbi_list_nlargest, rbi_avg)\n sb_std_dev = std_dev_calc(sb_list_nlargest, sb_avg)\n ops_std_dev = std_dev_calc(ops_list_nlargest, ops_avg)\n avg_std_dev = std_dev_calc(avg_list_nlargest, avg_avg)\n # zScore Calculation\n for batter in batter_dict_list:\n batter['zScoreR'] = z_score_calc(batter['r'], r_avg, r_std_dev)\n batter['weightedR'] = batter['zScoreR'] * float(batter['ab'])\n batter['zScoreHr'] = z_score_calc(batter['hr'], hr_avg, hr_std_dev)\n batter['weightedHr'] = batter['zScoreHr'] * float(batter['ab'])\n batter['zScoreRbi'] = z_score_calc(batter['rbi'], rbi_avg, rbi_std_dev)\n batter['weightedRbi'] = batter['zScoreRbi'] * float(batter['ab'])\n batter['zScoreSb'] = z_score_calc(batter['sb'], sb_avg, sb_std_dev)\n batter['weightedSb'] = batter['zScoreSb'] * float(batter['ab'])\n batter['zScoreOps'] = z_score_calc(batter['ops'], ops_avg, ops_std_dev)\n batter['weightedOps'] = batter['zScoreOps'] * float(batter['ab'])\n batter['zScoreAvg'] = z_score_calc(batter['avg'], ops_avg, ops_std_dev)\n batter['weightedAvg'] = batter['zScoreAvg'] * float(batter['ab'])\n # weighted_batter_list.append(batter)\n # Weighted Calculations\n weighted_run_list = []\n weighted_hr_list = []\n weighted_rbi_list = []\n weighted_sb_list = []\n weighted_ops_list = []\n weighted_avg_list = []\n # for batter in weighted_batter_list:\n for batter in batter_dict_list:\n weighted_run_list.append(batter['weightedR'])\n weighted_hr_list.append(batter['weightedHr'])\n weighted_rbi_list.append(batter['weightedRbi'])\n weighted_sb_list.append(batter['weightedSb'])\n weighted_ops_list.append(batter['weightedOps'])\n weighted_avg_list.append(batter['weightedOps'])\n weighted_run_list_nlargest = heapq.nlargest(player_pool, weighted_run_list)\n weighted_hr_list_nlargest = heapq.nlargest(player_pool, weighted_hr_list)\n weighted_rbi_list_nlargest = heapq.nlargest(player_pool, weighted_rbi_list)\n weighted_sb_list_nlargest = heapq.nlargest(player_pool, weighted_sb_list)\n weighted_ops_list_nlargest = heapq.nlargest(player_pool, weighted_ops_list)\n weighted_avg_list_nlargest = heapq.nlargest(player_pool, weighted_avg_list)\n # Weighted Average Calculation\n weighted_r_avg = avg_calc(weighted_run_list_nlargest)\n weighted_hr_avg = avg_calc(weighted_hr_list_nlargest)\n weighted_rbi_avg = avg_calc(weighted_rbi_list_nlargest)\n weighted_sb_avg = avg_calc(weighted_sb_list_nlargest)\n weighted_ops_avg = avg_calc(weighted_ops_list_nlargest)\n weighted_avg_avg = avg_calc(weighted_avg_list_nlargest)\n # Weighted Standard Deviation Calculation\n weighted_r_std_dev = std_dev_calc(weighted_run_list_nlargest, weighted_r_avg)\n weighted_hr_std_dev = std_dev_calc(weighted_hr_list_nlargest, weighted_hr_avg)\n weighted_rbi_std_dev = std_dev_calc(weighted_rbi_list_nlargest, weighted_rbi_avg)\n weighted_sb_std_dev = std_dev_calc(weighted_sb_list_nlargest, weighted_sb_avg)\n weighted_ops_std_dev = std_dev_calc(weighted_ops_list_nlargest, weighted_ops_avg)\n weighted_avg_std_dev = std_dev_calc(weighted_avg_list_nlargest, weighted_avg_avg)\n # Weighted zScore Calculation\n for batter in batter_dict_list:\n batter['weightedZscoreR'] = z_score_calc(batter['weightedR'], weighted_r_avg,\n weighted_r_std_dev)\n batter['weightedZscoreHr'] = z_score_calc(batter['weightedHr'], weighted_hr_avg,\n weighted_hr_std_dev)\n batter['weightedZscoreRbi'] = z_score_calc(batter['weightedRbi'], weighted_rbi_avg,\n weighted_rbi_std_dev)\n batter['weightedZscoreSb'] = z_score_calc(batter['weightedSb'], weighted_sb_avg,\n weighted_sb_std_dev)\n batter['weightedZscoreOps'] = z_score_calc(batter['weightedOps'], weighted_ops_avg,\n weighted_ops_std_dev)\n batter['weightedZscoreAvg'] = z_score_calc(batter['weightedAvg'], weighted_avg_avg,\n weighted_avg_std_dev)\n # Calculate Values\n fvaaz_list = []\n for batter in batter_dict_list:\n # TODO: how to handle an avg version of this?\n batter['fvaaz'] = (batter['zScoreR'] + batter['zScoreHr'] + batter['zScoreRbi'] + batter['zScoreSb'] +\n batter['weightedZscoreOps'])\n fvaaz_list.append(batter['fvaaz'])\n players_over_one_dollar = players_over_zero_dollars - one_dollar_players\n fvaaz_list_over_zero = heapq.nlargest(players_over_zero_dollars, fvaaz_list)\n fvaaz_list_over_one = heapq.nlargest(players_over_one_dollar, fvaaz_list)\n for batter in batter_dict_list:\n if batter['fvaaz'] >= fvaaz_list_over_one[players_over_one_dollar - 1]:\n # TODO: dollar_per_fvaaz seems to be a circular reference, how to resolve this?\n batter['dollarValue'] = batter['fvaaz'] * dollar_per_fvaaz\n elif batter['fvaaz'] >= fvaaz_list_over_zero[players_over_zero_dollars - 1]:\n batter['dollarValue'] = 1.0\n else:\n batter['dollarValue'] = 0.0\n return sorted(batter_dict_list, key=operator.itemgetter('fvaaz'), reverse=True)\n # sorts by fvaaz (largest to smallest)", "def _SD_optimal(t):", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n \n for i in d1:\n total = total + d1[i]\n for i in d2:\n if i in d1:\n if total == 0:\n score = score\n else:\n probablility = (d1[i] / total)\n score = score + (math.log10(probablility) * d2[i])\n else:\n if total == 0:\n score = score\n else:\n score = score + ((0.5 / total) * d2[i])\n return score", "def compute_transition_weights(trans_counts, smoothing):\n weights = defaultdict(float)\n \n total_count = {}\n for tag in trans_counts.keys():\n total_count[tag] = sum(trans_counts[tag].values())\n \n\n for prev_tag in trans_counts:\n for curr_tag in (list(trans_counts.keys()) + [END_TAG]):\n if curr_tag in trans_counts[prev_tag]:\n weights[(curr_tag, prev_tag)] = np.log((trans_counts[prev_tag][curr_tag] + smoothing) / (total_count[prev_tag] + len(trans_counts) * smoothing))\n else:\n weights[(curr_tag, prev_tag)] = np.log(smoothing / (total_count[prev_tag] + len(trans_counts) * smoothing))\n\n\n for tag in (list(trans_counts.keys()) + [END_TAG]):\n weights[START_TAG, tag] = -np.inf\n weights[tag, END_TAG] = -np.inf\n\n return weights", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n\n for element in d1:\n total += d1[element]\n\n for item in d2:\n if item in d1:\n score += math.log(d1[item]/total) * (d2[item])\n else:\n score += math.log(0.5/total) * (d2[item])\n return score", "def calEntropy(vList):\n from collections import Counter\n counter = Counter(vList)\n entropy, N = 0, len(vList)\n for v in counter:\n p = counter[v] / N\n entropy += - p * np.log(p)\n return entropy", "def _compute_set_overhead_for_ns(set_stats, ns):\n\n if not ns or not set_stats or isinstance(set_stats, Exception):\n return 0\n\n overhead = 0\n for _k, stats in set_stats.iteritems():\n if not stats or isinstance(stats, Exception):\n continue\n\n ns_name = util.get_value_from_second_level_of_dict(stats, (\"ns\", \"ns_name\"), default_value=None,\n return_type=str).values()[0]\n if ns_name != ns:\n continue\n\n set_name = util.get_value_from_second_level_of_dict(stats, (\"set\", \"set_name\"), default_value=\"\",\n return_type=str).values()[0]\n objects = sum(util.get_value_from_second_level_of_dict(stats, (\"objects\", \"n_objects\"), default_value=0,\n return_type=int).values())\n overhead += objects * (9 + len(set_name))\n\n return overhead", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = one_way_skar(d, target, source, others)\n return uniques", "def create_costs():\n infinity = float(\"inf\")\n costs = {}\n costs['biysk'] = 0\n costs['barnaul'] = infinity\n costs['novosibirsk'] = infinity\n costs['belokurikha'] = infinity\n costs['tomsk'] = infinity\n costs['krasnoyarsk'] = infinity\n costs['omsk'] = infinity\n return costs", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = one_way_skar(d, source, target, others)\n return uniques", "def calculate_training_cost(soldier_list: List[Soldier]):\n total_cost = 0.0\n \n for soldier in soldier_list:\n ################################# YOUR CODE HERE #################################\n if soldier.typecode == \"INF\":\n cost = 2.5 * soldier.weapon + 1.0 * soldier.armor\n elif soldier.typecode == \"ARC\":\n cost = 1.5 * soldier.weapon + 3.0 * soldier.armor\n elif soldier.typecode == \"CVL\":\n cost = 4.0 * soldier.weapon + 6.0 * soldier.armor\n if soldier.vitality < 0.35:\n cost *= 0.5\n total_cost += cost\n ##################################################################################\n return total_cost", "def cost(self):\n cost = {}\n if len(self.nodes) == 0:\n return cost\n resources = self.nodes[0].capacity.keys()\n for r in resources:\n values = [n.cost[r] for n in self.nodes]\n estimator = AvgAggregatorEstimator(values)\n cost[r] = estimator\n return cost", "def calculate_cost(self):\n number_collisions = self.get_collisions()\n cs = dict(\n number_collisions=number_collisions,\n cost_collisions=number_collisions\n )\n # sum all costs in one total cost\n cs['cost'] = sum(v for k, v in cs.items() if k.startswith('cost_'))\n\n return cs", "def calculate_cost(self):\n number_collisions = self.get_collisions()\n z = self.agent.get_position()[2]\n cs = dict(\n number_collisions=number_collisions,\n cost_collisions=number_collisions,\n # Drone should not leave valid operation space...\n cost_out_of_range=(1. if z > 2 else 0.)\n )\n # sum all costs in one total cost\n cs['cost'] = min(1, sum(v for k, v in cs.items() if k.startswith('cost_')))\n return cs", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n for key in d1:\n total += d1[key]\n for item in d2:\n if item in d1:\n score += d2[item] * math.log(d1[item]/total)\n else:\n score += d2[item] * math.log(0.5/total)\n return score", "def chooseItems(member, items, item_count):\r\n # citation https://en.wikipedia.org/wiki/Knapsack_problem#Dynamic_programming_in-advance_algorithm\r\n # used the pseudocode provided\r\n # https://www.geeksforgeeks.org/python-program-for-dynamic-programming-set-10-0-1-knapsack-problem/\r\n # for how to set up opt\r\n opt = [[0 for x in range(member[0]+1)] for x in range(item_count+1)]\r\n for i in range(len(items)+1):\r\n # for each item...\r\n for w in range(member[0]+1):\r\n # find best value at buttom-up approach to carrying capacity of the family member\r\n if i == 0 or w == 0:\r\n # initialize at 0\r\n opt[i][w] = 0\r\n elif items[i-1][1] <= w:\r\n # compare current value and weight to that of it added with current item info\r\n if items[i-1][0] + opt[i-1][w-items[i-1][1]] > opt[i-1][w]:\r\n opt[i][w] = items[i-1][0] + opt[i-1][w-items[i-1][1]]\r\n else:\r\n opt[i][w] = opt[i-1][w]\r\n else:\r\n opt[i][w] = opt[i-1][w]\r\n\r\n carrying_val = max(opt[-1])\r\n track_items = carrying_val\r\n carried_items = []\r\n track_weight = member[0]\r\n # citation https://www.geeksforgeeks.org/printing-items-01-knapsack/ only for finding items carried\r\n for i in range(len(items), 0, -1):\r\n if track_items <= 0:\r\n break\r\n if track_items == opt[i-1][track_weight]:\r\n continue\r\n else:\r\n carried_items.append(items[i - 1][2])\r\n track_items -= items[i - 1][0]\r\n track_weight -= items[i - 1][1]\r\n data_for_member = [carrying_val, carried_items]\r\n return data_for_member", "def make_weights(grouping_lists):\n counts = list(map(Counter, grouping_lists))\n t = []\n for c, l in zip(counts, grouping_lists):\n t.append(1. / np.array([c[l[i]] for i in range(len(grouping_lists[0]))]))\n ret = np.mean(np.vstack(t), axis=0)\n return ret / np.sum(ret)", "def get_feature_statistics(results):\n to_be_deleted = []\n\n for result in results:\n if len(result.subset) != 6:\n to_be_deleted.append(result)\n\n length = len(results)\n feature_labels = datapoint_features\n statistics = {}\n\n for label in feature_labels:\n result_with = metrics.filter_results(results, features=[label])\n result_without = metrics.filter_results(results, without_features=[label])\n\n with_length = len(result_with)\n without_length = len(result_without)\n prevalence = with_length / length\n\n if prevalence != 0:\n avg_f1_dos = math.fsum([result.metrics['dos'].f1 for result in result_with]) / with_length\n avg_f1_fuzzy = math.fsum([result.metrics['fuzzy'].f1 for result in result_with]) / with_length\n avg_f1_imp = math.fsum([result.metrics['impersonation'].f1 for result in result_with]) / with_length\n else:\n avg_f1_dos = 0\n avg_f1_fuzzy = 0\n avg_f1_imp = 0\n\n avg_f1_without_dos = math.fsum([result.metrics['dos'].f1 for result in result_without]) / without_length\n avg_f1_without_fuzzy = math.fsum([result.metrics['fuzzy'].f1 for result in result_without]) / without_length\n avg_f1_without_imp = math.fsum([result.metrics['impersonation'].f1 for result in result_without]) / without_length\n avg_f1_diff_dos = avg_f1_without_dos - avg_f1_dos\n avg_f1_diff_fuzzy = avg_f1_without_fuzzy - avg_f1_fuzzy\n avg_f1_diff_imp = avg_f1_without_imp - avg_f1_imp\n\n statistics[label] = [prevalence, avg_f1_diff_dos, avg_f1_diff_fuzzy, avg_f1_diff_imp]\n\n return statistics", "def _calc_train_class_prb(self, labels_list=None):\n if not labels_list:\n return {}\n\n n = len(labels_list)\n label_num = len(self.labels)\n prb = {}\n for l in self.labels:\n # tmp = (l, sum(1 if v == l else 0 for k, v in train_data)/n)\n prb[l] = (labels_list.count(l) + 1.0) / (n + label_num)\n return prb", "def compute_transition_weights(trans_counts, smoothing):\n\n weights = defaultdict(float)\n totals = { tag : sum(trans_counts[tag].values()) for tag in trans_counts.keys() }\n\n for prev_tag in trans_counts:\n for curr_tag in (trans_counts.keys() + [END_TAG]):\n weights[(curr_tag, prev_tag, TRANS)] = np.log((trans_counts[prev_tag][curr_tag] + smoothing) / (totals[prev_tag] + len(trans_counts) * smoothing))\n\n for tag in trans_counts:\n weights[START_TAG, tag, TRANS] = -np.inf\n return weights", "def name_distribution_from_dict(d):\n def get_number_chars(los):\n \"returns the number of characters in the given list of strings\"\n res = 0\n for s in los:\n res += len(s)\n return res\n\n dist = dict((k, get_number_chars(v)) for (k, v) in d.items())\n total = 0\n print dist\n for k, v in dist.items():\n total += v\n\n return dict((k, v/float(total)) for (k, v) in dist.items())", "def berger_parker_d(counts):\n return counts.max()/float(counts.sum())", "def process_input_items(args):\n return dict(sum([Counter({sku: value * SCORES[k] for sku, value in\n Counter(args[k].split(',')).items()}) or Counter() for k in\n set(SCORES.keys()) & set(args.keys())], Counter()))", "def calculate_cost(self):\n costs = {}\n if np.abs(self.agent.get_position()[1]) > self.y_lim:\n costs['cost_outside_bounds'] = 1.\n if self.agent.velocity_violation:\n costs['cost_velocity_violation'] = 1.\n # sum all costs in one total cost\n costs['cost'] = min(1, sum(v for k, v in costs.items() if k.startswith('cost_')))\n return costs", "def cost(lossvalues):\n return np.sum(lossvalues ** 2) / (2 * lossvalues.shape[1])", "def partitioner(mappings):\n\t\n\ttoken_counts = defaultdict(list)\n\t\n\tfor sublist in mappings:\n\t\tfor t, c in sublist:\n\t\t\ttoken_counts[t].append(c)\n\t\t\t\n\treturn token_counts", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = two_way_skar(d, [source, target], others)\n return uniques", "def compute_cost(self, chrome):\n return 1", "def cutpointStrategy(listOfDict):\n df = pd.DataFrame(listOfDict)\n resultDF = df.copy(deep=True)\n for i in df.columns:\n if(df[i].dtype == np.float64):\n distNumValues = list(set(df.loc[:,i].values))\n distNumValues.sort()\n cutPoints = [(distNumValues[j]+distNumValues[j+1])/2 for j in range(len(distNumValues)-1)]\n del resultDF[i]\n for k in range(len(cutPoints)):\n for j in range(df.shape[0]):\n if(df.loc[j,i] < cutPoints[k]):\n resultDF.loc[j,i+str(cutPoints[k])] = str(min(distNumValues)) + \"..\" + str(cutPoints[k])\n else:\n resultDF.loc[j,i+str(cutPoints[k])] = str(cutPoints[k]) + \"..\" + str(max(distNumValues))\n return resultDF.T.to_dict().values()", "def calc_pool(players):\n players = [str(x) for x in players]\n n = len(players)\n for player in players:\n nopool = payoff_nopool(p=percentages[player])\n print(nopool)\n p = {i: percentages[key] for i, key in zip([x for x in range(2, n+1)],\n [x for x in players if x != player])}\n p[1] = percentages[player]\n pool = payoff_n_p(p=p, n=n)\n print(pool)", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost", "def compute_metrics(self, outputs: List[Dict[str, torch.Tensor]]) -> dict:\n distance_pos, distance_neg = [], []\n for minibatch in outputs:\n minibatch = minibatch[\"val_prediction\"]\n src_embedding = minibatch[\"src_sentemb\"]\n ref_embedding = minibatch[\"ref_sentemb\"]\n pos_embedding = minibatch[\"pos_sentemb\"]\n neg_embedding = minibatch[\"neg_sentemb\"]\n\n distance_src_pos = F.pairwise_distance(pos_embedding, src_embedding)\n distance_ref_pos = F.pairwise_distance(pos_embedding, ref_embedding)\n harmonic_distance_pos = (2 * distance_src_pos * distance_ref_pos) / (\n distance_src_pos + distance_ref_pos\n )\n distance_pos.append(harmonic_distance_pos)\n\n distance_src_neg = F.pairwise_distance(neg_embedding, src_embedding)\n distance_ref_neg = F.pairwise_distance(neg_embedding, ref_embedding)\n harmonic_distance_neg = (2 * distance_src_neg * distance_ref_neg) / (\n distance_src_neg + distance_ref_neg\n )\n distance_neg.append(harmonic_distance_neg)\n\n return {\n \"kendall\": self.metrics.compute(\n torch.cat(distance_pos), torch.cat(distance_neg)\n )\n }", "def compare_dictionaries(d1, d2):\r\n score = 0\r\n gef = 0\r\n for z in d1:\r\n gef += d1[z]\r\n total = gef\r\n \r\n for x in d2:\r\n if x in d1:\r\n score += math.log(d1[x] / total) * d2[x] \r\n else:\r\n score += math.log(0.5/total) * d2[x]\r\n return score", "def bandwidth(configuration):\n bandwidth_dict_all = {\n \"HL\" : {'H1' : 117.4, 'L1' : 117.4},\n \"HLV\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9},\n \"HLVK\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9, 'K1' : 148.9},\n \"HLVKI\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9, 'K1' : 148.9, 'I1' : 117.4},\n \"GW170817\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9},\n \"GW170817_without_Virgo\" : {'H1' : 117.4, 'L1' : 117.4},\n \"GW170814\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9},\n \"design\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9 },\n \"early\" : {'H1' : 123.7, 'L1' : 123.7 },\n \"half_virgo\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9 },\n \"half_ligo\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9 },\n \"nosrm\" : {'H1' : 43, 'L1' : 43, 'V1': 58 },\n \"india\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9, \"I1\" : 117.4 },\n \"kagra\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9, \"I1\" : 117.4, \\\n \"K1\" : 89.0 },\n \"bala\" : {'H1' : 117.4, 'H2' : 117.4, 'L1' : 117.4, 'V1': 148.9, \\\n \"I1\" : 117.4, \"K1\" : 89.0 },\n \"sa\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9, \"I1\" : 117.4, \\\n \"K1\" : 89.0 , \"S1\": 117.4},\n \"sa2\" : {'H1' : 117.4, 'L1' : 117.4, 'V1': 148.9, \"I1\" : 117.4, \\\n \"K1\" : 89.0 , \"S1\": 117.4},\n \"steve\" : {'H1' : 100.0, 'L1' : 100.0, 'V1': 100.0, \"I1\" : 100.0 },\n \"s6vsr2\" : {'H1' : 100., 'L1' : 100., 'V1': 120. }\n }\n return(bandwidth_dict_all[configuration])", "def calculate_priors(trainingLabels):\r\n sum = 0\r\n priors = {}\r\n totalSamples = len(trainingLabels)\r\n classes = set(trainingLabels)\r\n for cls in classes:\r\n numCls = len(filter(lambda x: x == cls, trainingLabels))\r\n sum += numCls\r\n priors[cls] = float(numCls) / float(totalSamples)\r\n \r\n # Sanity check: valid partitioning\r\n assert(sum == totalSamples)\r\n\r\n return priors", "def knapsack(items, capacity):\r\n pass", "def addCowWeight(list, cows):\r\n sum = 0.0\r\n for key in list:\r\n sum += cows[key]\r\n return sum", "def estimate_transition_params(symbol_symbol_counts, symbol_counts):\n\n transition_probabilities = {}\n for symbol1 in symbol_symbol_counts:\n transition_probabilities[symbol1] = {}\n for symbol2 in symbol_symbol_counts[symbol1]:\n if symbol_counts[symbol1] == 0:\n transition_probabilities[symbol1][symbol2] = 0\n else:\n transition_probabilities[symbol1][symbol2] = float(symbol_symbol_counts[symbol1][symbol2])/symbol_counts[symbol1]\n\n return transition_probabilities", "def hash_threshold(token_dict, fp_len):\n sum_hash = [0] * fp_len\n for _, token in token_dict.items():\n sum_hash = [ x + token.weight * y for x, y in zip(sum_hash, token.hash_list)]\n\n # apply binary threshold\n for i, ft in enumerate(sum_hash):\n if ft > 0:\n sum_hash[i] = 1\n else:\n sum_hash[i] = 0\n return sum_hash", "def compute_precision(gold_dict, src_word2ind, trg_word2ind, src_words, trg_words, scores, args, BATCH_SIZE=20, verbose = False):\n oov = set()\n correct = 0\n n,m = scores.shape # The actual size of mapping computed, might be smaller that total size of dict\n\n precisions = {}\n if verbose:\n print('@{:2} {:10} {:30} {:30}'.format('k', 'Src','Predicted','Gold'))\n print_row = '{:2} {:10} {:30} {:30} {}'\n\n for k in [1,5,10]:\n correct = 0\n for src_idx,tgt_idx in gold_dict.items():\n if src_idx > n or np.all([e>m for e in tgt_idx]):#Src word not in mapping\n oov.add(src_idx)\n continue\n else:\n knn = np.argpartition(scores[src_idx,:], -k)[-k:] # argpartition returns top k not in order\n knn_sort = knn[np.argsort(-scores[src_idx,knn])] # With - to get descending order\n if set(knn_sort).intersection(tgt_idx):\n correct +=1\n correct_string = ' '\n else:\n correct_string = 'X'\n if verbose:\n src_str = src_words[src_idx]\n pred_str = ','.join([trg_words[k] for k in knn_sort])\n gold_str = ','.join([trg_words[k] for k in tgt_idx])\n print(print_row.format(k,src_str,pred_str,gold_str,correct_string))\n\n\n coverage = len(gold_dict.keys()) / (len(gold_dict.keys()) + len(oov))\n if verbose:\n print('Coverage: {:7.2%} Precision @{:2}:{:7.2%}'.format(coverage, k, correct / len(gold_dict)))\n precisions[k] = correct / len(gold_dict)\n return precisions", "def _c2c_cost(sclst, eclst):\n def _c2c(point):\n _c_sum = 0\n for pt in eclst.points:\n _c_sum += point.frequency(pt)\n return _c_sum\n return int(sum(map(_c2c, sclst.points)))", "def calculate_weights(counts, thresholds):\n weights = []\n steps = len(thresholds) + 1\n for i in range(len(counts)):\n for j in range(steps - 1):\n if counts[i] <= thresholds[j]:\n weights.append(j + 1)\n break\n elif j == steps - 2:\n # last step\n weights.append(j + 2)\n return weights", "def compute_key_value(self) -> Dict[str, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics", "def score(caches, endpoints, requests, videos):\n result = 0\n for endpoint in endpoints:\n for request in endpoint.requests:\n result += (endpoint.latence_to_dc - getLatency(request.video, endpoint.eid, caches, videos, endpoints)) * request.amount\n return result", "def distributePoints(valueList, **args):\r\n points = args.get(\"points\", args.get(\"total\", 80) - sum(valueList))\r\n max = args.get(\"max\", 10)\r\n weights = args.get(\"weights\", [])\r\n if sum(valueList) + points > len(valueList) * max:\r\n return [max for i in valueList]\r\n\r\n # Set the weights, defaulting to all 1\r\n weights = weights if weights else [1 for i in range(0, len(valueList))]\r\n\r\n # We're distributing based on an index list because weighting requires using\r\n # random.choices\r\n indexList = [i for i in range(0, len(valueList))]\r\n\r\n while points > 0:\r\n points -= (\r\n 1\r\n if incrementAtIndex(\r\n valueList, choices(indexList, weights, k=1)[0], max\r\n )\r\n else 0\r\n )\r\n\r\n return valueList", "def compute_propability(word, label, dict):\n return dict[label][word] / sum(dict[label].values())", "def carbs(foods, foods_used):\n carbs = 0.0\n for i, count in foods_used.items():\n carbs += (foods[i]['carbs'] * count)\n return carbs", "def cost(books):\n\n num_types = len(set(books))\n assert num_types < 6, \"Discounts indeterminate\"\n return DISCOUNT_TABLE.get(num_types, 0) * 8 * len(books) * 100", "def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous label to current label in transition_count_table.\n All \"rare words\" (those words that appear less than 3 times) are replaced by <UNK>.\n I also add label|START counts.\n \"\"\"\n # Build feature_count_table of V x labels and transition_count_table of labels x labels\n for instance in instance_list: # Set of <(w, pos), l>\n index = 0\n for t in instance.data: # Tuple of (w, pos)\n index = instance.data.index(t)\n # print t[0] # word\n # print instance.label[index] # label\n if t in self.V:\n self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])] +=1\n else:\n self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])] +=1\n if index > 0:\n self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] += 1\n else:\n self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])] += 1", "def throughputbin(conn):\n c = conn.cursor()\n bc = bincount(conn)\n total_tasks = totaltasks(conn)\n return {\n \"throughput_tasks_per_bin\": total_tasks / bc['bins']['count']\n }", "def cost(self, output, labels, weights):\n raise NotImplementedError('Must be overridden by concrete subclass')", "def calcHeuristicFunc(self, dictio):\n\t\t_sum = 0\n\n\t\tfor u in self.unitlist:\n\t\t for p in self.units[u[0]]:\n\t\t nums = [0] *self.N**2\n\t\t for i in p:\n\t\t nums[dictio[i]-1] += 1\n\t\t for j in nums:\n\t\t if(j==0):\n\t\t _sum += 1\n\t\treturn _sum", "def smartdatasplit(target, *xs, **kw):\n random = kw[\"random\"] if \"random\" in kw else False\n keepmincount = kw[\"keepmincount\"] if \"keepmincount\" in kw else 1\n holdmincount = kw[\"holdmincount\"] if \"holdmincount\" in kw else 1\n xs = (target,) + xs\n assert([x.shape[0] for x in xs].count(xs[0].shape[0]) == len(xs))\n batsize = xs[0].shape[0]\n globcounts = {}\n # gather class usage stats\n for i in range(batsize):\n k = target[i]\n if k not in globcounts:\n globcounts[k] = 0\n globcounts[k] += 1\n # create new datas\n keepsize = 0\n holdsize = 0\n holdcounts = {}\n keepcounts = {}\n for k in globcounts:\n if globcounts[k] >= keepmincount + holdmincount:\n holdsize += holdmincount\n holdcounts[k] = holdmincount\n keepsize += globcounts[k] - holdmincount\n keepcounts[k] = globcounts[k] - holdmincount\n keepxs = [np.zeros((keepsize,) + x.shape[1:], dtype=x.dtype) for x in xs]\n holdxs = [np.zeros((holdsize,) + x.shape[1:], dtype=x.dtype) for x in xs]\n # populate datas\n idxs = np.arange(0, batsize)\n if random:\n np.random.shuffle(idxs)\n kidx = 0\n hidx = 0\n for i in range(batsize):\n idx = idxs[i]\n tidx = target[idx]\n if tidx in holdcounts:\n if holdcounts[tidx] > 0:\n holdcounts[tidx] -= 1\n for x, y in zip(holdxs, xs):\n x[kidx, ...] = y[idx, ...]\n kidx += 1\n elif keepcounts[tidx] > 0:\n keepcounts[tidx] -= 1\n for x, y in zip(keepxs, xs):\n x[hidx, ...] = y[idx, ...]\n hidx += 1\n else:\n print \"sum ting wong\"\n return tuple(keepxs), tuple(holdxs)", "def count_targets(searchList):\n targets = {}\n for x in searchList:\n loVal = -10000 - x\n hiVal = 10000 - x\n loInd = bisect_left(searchList, loVal)\n hiInd = bisect_right(searchList, hiVal)\n for y in searchList[loInd:hiInd]:\n if y == x:\n continue\n t = x + y\n targets[t] = 1\n return len(targets)", "def _get_label_weight(opts, data):\n experiments = data[\"exp_names\"].value\n label_mat = numpy.zeros((experiments.size, 7))\n vid_lengths = numpy.zeros((experiments.size,))\n for i in range(experiments.size):\n exp_key = experiments[i]\n exp = data[\"exps\"][exp_key]\n for j in range(6):\n # label_counts[j] += exp[\"org_labels\"].value[:, j].sum()\n label_mat[i, j] = exp[\"org_labels\"].value[:, j].sum()\n # label_counts[-1] +=\\\n # exp[\"org_labels\"].shape[0] - exp[\"org_labels\"].value.sum()\n label_mat[i, -1] =\\\n exp[\"org_labels\"].shape[0] - exp[\"org_labels\"].value.sum()\n\n # vid_lengths[i] = exp[\"hoghof\"].shape[0]\n vid_lengths[i] = exp[\"org_labels\"].shape[0]\n\n # label_counts = label_mat.sum(axis=0)\n label_weight = 1.0 / numpy.mean(label_mat, axis=0)\n # label_weight[-2] = label_weight[-2] * 10\n if opts[\"flags\"].reweight is False:\n label_weight = [5, 5, 5, 5, 5, 5, .01]\n # import pdb; pdb.set_trace()\n return label_weight", "def profit_curve(cost_benefit, predicted_probs, labels):\n n_obs = float(len(labels))\n # Make sure that 1 is going to be one of our thresholds\n maybe_one = [] if 1 in predicted_probs else [1] \n thresholds = maybe_one + sorted(predicted_probs, reverse=True)\n profits = []\n for threshold in thresholds:\n y_predict = predicted_probs >= threshold\n confusion_matrix = standard_confusion_matrix(labels, y_predict)\n threshold_profit = np.sum(confusion_matrix * cost_benefit) / n_obs\n profits.append(threshold_profit)\n return np.array(profits), np.array(thresholds)", "def getSupport(item):\n return float(freqSet[item]) / len(transactionList)", "def smooth(item_count, nr_tokens, type=\"min\"): #change type of smoothing? NLTK if freq dists\n if type == \"ele\":\n smoothed_count = item_count + nr_tokens * 0.5\n else:\n smoothed_count = item_count + (1 / nr_tokens)\n return smoothed_count", "def intra_cost(points, cluster):\n def _p2p(point):\n _freq_sum = 0\n for pt in points:\n if point != pt and pt not in cluster.points:\n _freq_sum += point.frequency(pt)\n return _freq_sum\n return int(sum(map(_p2p, cluster.points)))", "def calculateAllocations(rf, resource, ledgers, data):\n weights = {\n p: rf(l.debtRatio()) for p, l in ledgers.items() if not np.isclose(data[p], 0)\n }\n total_weight = sum(weight for weight in weights.values())\n return {\n p: np.round(resource * weight / total_weight, 1)\n for p, weight in weights.items()\n }", "def data_balancing(path):\r\n \r\n distribution_list = data_distribution(path)\r\n \r\n balancing_factor = []\r\n for i in range(len(distribution_list)):\r\n #print(i,distribution_list[i])\r\n #multiplier = max(distribution_list) / distribution_list[i] - 1\r\n multiplier = (np.round(5000 / distribution_list[i],0))\r\n multiplier = int(np.round(multiplier/4,0))\r\n balancing_factor.append(multiplier)\r\n #print(\"sddada\",max(distribution_list) / distribution_list[i])\r\n return balancing_factor", "def calculate_weights():\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights", "def calculate_cost(data, centers, clusters):\n total = 0\n for i in range(len(centers)):\n total = total + np.sum(data[centers[i]][clusters[i]]) \n return total", "def data_processing(data_dic: Dict[str, int]):\n\n sum_0 = 0\n for key, value in data_dic.items():\n if int(list(key)[0]) + int(list(key)[1]) == 0:\n sum_0 += value\n return sum_0 / shots", "def reduce_task(*items):\n merged = dict()\n keys = set().union(*items)\n for key in keys:\n merged[key] = sum([x.get(key, 0) for x in items])\n return merged", "def build_task_weight_from_label(task_name_to_labels):\n task_name_to_weights = {}\n for task_name, label in task_name_to_labels.items():\n task_name_to_weights[task_name] = tf.cast(tf.divide(tf.cast(tf.count_nonzero(tf.greater_equal(label, 0)), dtype=tf.int32), tf.shape(label)[0]), dtype=tf.float32)\n\n return task_name_to_weights", "def _compute_traj_stats(traj_obs_dict):\n traj_stats = { k : {} for k in traj_obs_dict }\n for k in traj_obs_dict:\n traj_stats[k][\"n\"] = traj_obs_dict[k].shape[0]\n traj_stats[k][\"mean\"] = traj_obs_dict[k].mean(axis=0, keepdims=True) # [1, ...]\n traj_stats[k][\"sqdiff\"] = ((traj_obs_dict[k] - traj_stats[k][\"mean\"]) ** 2).sum(axis=0, keepdims=True) # [1, ...]\n return traj_stats", "def bandwidth_share(self, nodes_efficiency: Dict[str, float]) -> Dict[str, float]:\n pass", "def get_good_label_distrib(semrel_dict, semrel_counts):\n\n # Do cardinality check on srdict first, to see if there are enough elements to fulfill the distribution in five_semrels.\n srdict_counts = {key:len(value) for key, value in semrel_dict.items()}\n\n # This variable will hold the number of collocations to add to the distribution in place of unfulfillable other labels.\n num_coll_to_add = 0\n\n # Go through all non-'collocation' labels in the dictionary that contains the requested number of labels.\n for label, count in semrel_counts.items():\n\n if label != 'collocation':\n\n # Get the difference in cardinality between the available set and the requested set.\n diffc = srdict_counts[label] - semrel_counts[label]\n\n # If negative, i.e. if there are more requested than available, record the difference (this is how many instances\n # of 'collocation' to add) and change the number of requested words to the number available.\n if diffc < 0:\n num_coll_to_add += abs(diffc)\n semrel_counts[label] = srdict_counts[label]\n\n # Adjust the values in 'collocation' in the dictionary.\n if 'collocation' in set(semrel_counts.keys()):\n semrel_counts['collocation'] += num_coll_to_add\n else:\n semrel_counts['collocation'] = num_coll_to_add\n\n return semrel_counts", "def fitness_function(items, m):\r\n cost = 0\r\n weight = 0\r\n for key, is_selected in items.items():\r\n if is_selected:\r\n weight += key[0]\r\n cost += key[1]\r\n res = cost if weight <= m else 0\r\n return res", "def calculate_cost(self, **kwargs):\n costs = {}\n if np.abs(self.agent.get_position()[0]) > self.x_lim:\n costs['cost_outside_bounds'] = 1.\n # sum all costs in one total cost\n costs['cost'] = min(1, sum(v for k, v in costs.items() if k.startswith('cost_')))\n\n return costs", "def calc_weights(terms, ent, fp_len):\n term_dict = {}\n length = len(terms)\n for i, term in enumerate(terms):\n # get weights\n arguments = [terms[(i-1)%length], term, terms[(i+1)%length]]\n if term not in term_dict:\n fp_hash = md5Hash(arguments).zfill(fp_len)\n fp_hash_list = binconv(fp_hash, fp_len)\n token = None\n #if term in ent:\n # token = Token(fp_hash_list, 2)\n #else:\n token = Token(fp_hash_list, 0)\n term_dict[term] = token\n #if term in ent:\n # term_dict[term].weight += 1\n #else:\n term_dict[term].weight += 1\n return term_dict", "def get_dps(data_dict, R = 50):\n p1_ida = iterable_data_array(data_dict, 'p1')\n p2_ida = iterable_data_array(data_dict, 'p2')\n worker = data_array_builder()\n \n for p1, p2 in zip(p1_ida, p2_ida):\n worker.append((p1 - p2)/R) \n \n return {'time':data_dict['time'], 'dp':worker.build()}", "def compute_metrics(self, results: list) -> dict:", "def overhead(readings):\n return 100.0 * (int(readings[0]) + int(readings[1])) / (int(readings[2]) + int(readings[3]))", "def calculate_tokens_balance(tokens):\n result = defaultdict(int)\n for key, value in tokens:\n result[key] += value\n return result", "def test_metric(self, qset: Iterator[Tuple[str, float]]) -> Dict[str, float]:\n res = dict(mks0=0.0, mks1=0.0, mks2=0.0, sum_weights=0.0, sum_wlen=0.0, n=0)\n hist = {k: {} for k in {\"mks0\", \"mks1\", \"mks2\", \"l\"}} # pylint: disable=C0208\n wei = {k: {} for k in hist}\n res[\"hist\"] = hist\n res[\"histnow\"] = wei\n\n for el, _ in self.enumerate_test_metric(qset):\n le = len(el.value)\n w = el.weight\n res[\"mks0\"] += w * el.mks0\n res[\"mks1\"] += w * el.mks1\n res[\"mks2\"] += w * el.mks2\n res[\"sum_weights\"] += w\n res[\"sum_wlen\"] += w * le\n res[\"n\"] += 1\n\n if el.mks0 not in hist[\"mks0\"]:\n hist[\"mks0\"][el.mks0] = w\n wei[\"mks0\"][el.mks0] = 1\n else:\n hist[\"mks0\"][el.mks0] += w\n wei[\"mks0\"][el.mks0] += 1\n if el.mks1 not in hist[\"mks1\"]:\n hist[\"mks1\"][el.mks1] = w\n wei[\"mks1\"][el.mks1] = 1\n else:\n hist[\"mks1\"][el.mks1] += w\n wei[\"mks1\"][el.mks1] += 1\n if el.mks2 not in hist[\"mks2\"]:\n hist[\"mks2\"][el.mks2] = w\n wei[\"mks2\"][el.mks2] = 1\n else:\n hist[\"mks2\"][el.mks2] += w\n wei[\"mks2\"][el.mks2] += 1\n if le not in hist[\"l\"]:\n hist[\"l\"][le] = w\n wei[\"l\"][le] = 1\n else:\n hist[\"l\"][le] += w\n wei[\"l\"][le] += 1\n return res", "def total_cost(self, data, lmbda, convert=False):\n cost = 0.0\n for x, y in data:\n a = self.feedforward(x)\n if convert: y = vectorized_result(y)\n cost += self.cost.fn(a, y)/len(data)\n cost += 0.5*(lmbda/len(data))*sum(\n np.linalg.norm(w)**2 for w in self.weights)\n return cost", "def process(list_, dict_, keyword):\n if len(list_) == 4:\n name, val, type_, frac_ = list_[0], list_[1], list_[2], list_[3]\n elif list_[0] == 'direc':\n name, val = list_[0], [list_[i] for i in range(len(list_)) if i > 0]\n else:\n name, val = list_[0], list_[1]\n\n if name not in dict_[keyword].keys() and name in ['coeff']:\n dict_[keyword][name] = []\n if keyword in ['TREATED', 'UNTREATED', 'COST'] and 'types' not in dict_[keyword].keys():\n dict_[keyword]['types'] = []\n if keyword in ['TREATED', 'UNTREATED', 'COST']:\n if len(list_) == 4:\n dict_[keyword]['types'] += [[type_, float(frac_)]]\n else:\n dict_[keyword]['types'] += ['nonbinary']\n\n # Type conversion\n if name in ['agents', 'seed', 'maxiter', 'disp']:\n val = int(val)\n elif name in ['source', 'file', 'optimizer', 'start']:\n val = str(val)\n elif name in ['direc']:\n val = list(val)\n else:\n val = float(val)\n if name in ['coeff']:\n dict_[keyword][name] += [val]\n else:\n dict_[keyword][name] = val\n # Finishing.\n return dict_", "def calc_weight(str,dict):\n for i,c in enumerate(str):\n dict[c] += 10**(len(str)-(i+1))", "def summarize(lst, threshold):\n\n freq = Counter(lst)\n freq_most = dict((key, val) for key, val in freq.items() if val > threshold)\n summary = pd.Series(list(freq_most.values()), index=list(freq_most.keys()))\n return summary", "def get_expected_cost(self):", "def calculate_total_cost(state):\n pass" ]
[ "0.58399343", "0.5823463", "0.5779527", "0.55471617", "0.5469961", "0.5425537", "0.53996956", "0.5383586", "0.533916", "0.5299643", "0.52919436", "0.5257763", "0.52473646", "0.52160746", "0.52141124", "0.52082145", "0.51415884", "0.5134601", "0.5126979", "0.5097489", "0.50955534", "0.5092949", "0.50773215", "0.507489", "0.5071505", "0.50704443", "0.5062674", "0.5060795", "0.5043839", "0.5036701", "0.50315046", "0.50294954", "0.50251454", "0.5021918", "0.5001163", "0.4986703", "0.49835545", "0.49826655", "0.49813733", "0.49779838", "0.4974216", "0.4973906", "0.49646807", "0.49573833", "0.49560454", "0.4953509", "0.4944804", "0.4943102", "0.4938694", "0.4926905", "0.49252582", "0.49247438", "0.49212745", "0.49189052", "0.4916288", "0.4915723", "0.49000475", "0.48939332", "0.48933476", "0.48931488", "0.4888235", "0.48714125", "0.486876", "0.4868519", "0.48631036", "0.48616436", "0.48615682", "0.48495635", "0.48484436", "0.4845434", "0.48452443", "0.4842679", "0.48400465", "0.48369238", "0.48341653", "0.48325866", "0.4829252", "0.48205096", "0.48199034", "0.481557", "0.4813945", "0.48110104", "0.48108473", "0.48065653", "0.48036042", "0.48003045", "0.4799238", "0.47957006", "0.47956446", "0.47893173", "0.47863692", "0.47853124", "0.47839773", "0.47776625", "0.4776586", "0.4774846", "0.47728088", "0.47689322", "0.4768254", "0.4767802" ]
0.6280166
0
Given a list of values and associated labels, optimizes the best split threshold z where dividing the values into z has the lowest split cost. Returns a pair (z,cost) where cost is the split_cost of the threshold z. If nonelabels is given, this indicates the labels of missing values that must be passed down to both subtrees. This does not affect the output z but it does affect the output cost value.
def best_split(values,labels,nonelabels=None): assert len(values) >= 2 assert len(values) == len(labels) N = len(values) ilist = sorted((v,l) for (v,l) in zip(values,labels)) leftcount = defaultdict(int) rightcount = defaultdict(int) for v,l in ilist: rightcount[l] += 1 bestindex = -1 bestcost = split_cost([leftcount,rightcount]) cost = bestcost #costs = [cost] #print "Split costs:" for i in xrange(len(ilist)): v,l = ilist[i] rightcount[l] -= 1 leftcount[l] += 1 if i+1 >= len(ilist) or v == ilist[i+1][0]: #no splits when v is equal to the next value continue cost = split_cost([leftcount,rightcount]) #print " ",v,leftcount.values(),rightcount.values(),cost #costs.append(cost) if cost < bestcost: bestcost = cost bestindex = i #raw_input() if bestindex < 0: #no split found... try splitting in half splitval = (ilist[0][0]+ilist[-1][0])*0.5 else: splitval = (ilist[bestindex][0] + ilist[bestindex+1][0])*0.5 if nonelabels is None: return (splitval,bestcost) #reevaluate counts leftcount = defaultdict(int) rightcount = defaultdict(int) for l in nonelabels: leftcount[l] += 1 rightcount[l] += 1 for v,l in ilist: if v <= splitval: leftcount[l] += 1 else: rightcount[l] += 1 return splitval,split_cost([leftcount,rightcount])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_cost(label_count_list):\n return -split_information_gain(label_count_list)\n #this cost value is the misclassification error.\n return split_misclassification_error(label_count_list)", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost", "def fraction_mislabeled_nodes(labels, labels_pred):\n G1 = partition_indicator(labels)\n G2 = partition_indicator(labels_pred)\n\n # cost is minimized, overlap maximized\n cost_matrix = -G1.T.dot(G2)\n row_ind, col_ind = linear_sum_assignment(cost_matrix.A)\n cost = -cost_matrix[row_ind, col_ind].sum()\n\n return 1 - (cost / len(labels))", "def get_best_thresholds(labels, test_y, outputs, plot=False):\n t_max = [0] * len(labels)\n f_max = [0] * len(labels)\n\n for i, label in enumerate(labels):\n ts = []\n fs = []\n\n for t in np.linspace(0.1, 0.99, num=50):\n p, r, f, _ = precision_recall_fscore_support(test_y[:,i], np.where(outputs[:,i]>t, 1, 0), average='micro')\n ts.append(t)\n fs.append(f)\n if f > f_max[i]:\n f_max[i] = f\n t_max[i] = t\n\n if plot:\n print(f'LABEL: {label}')\n print(f'f_max: {f_max[i]}')\n print(f't_max: {t_max[i]}')\n\n plt.scatter(ts, fs)\n plt.show()\n \n return t_max, f_max", "def heuristic2_label_OBD(n, P, label, critical=None):\n print \"trying to label \" + str(n) + \" with \" + str(label)\n nodes_labeled = []\n if ('critical' in P.node[n].keys()) and (P.node[n]['critical']==True) and (P.node[n]['OBDlabel'] != label) :\n print \"FAIL on critical and not the same label.\"\n return (False, []) # being critical, we could avoid failure only if the label to set would be the same (it happens)\n else:\n P.node[n]['OBDlabel'] = label\n nodes_labeled.append(n) # this is a list that gets passed through recursions\n if critical == True:\n P.node[n]['critical'] = True\n # labeling part done\n flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)\n new_label = label + 1\n neighbors = P.neighbors(n)\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if P.node[neigh]['OBDlabel'] > new_label:\n new_label = P.node[neigh]['OBDlabel']\n # we got maximum of current label or any node that neighbors have - now we label them all with that\n neighbors_to_label = []\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)\n neighbors_to_label.append(neigh)\n else: # if set and smaller than mine, leave them alone\n pass\n else: # if not set, then not lower and not labelled\n neighbors_to_label.append(neigh)\n # now we have all the neighbors that need to be labeled\n if len(neighbors_to_label) > 1:\n flag_critical = True\n # and now the recursive step - labeling all these nodes\n permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements\n for perm in permutations:\n print \"trying perm: \" + str(perm)\n this_run_success = True\n this_run_labeled = []\n for el in perm:\n (s, nl) = heuristic2_label_OBD(el, P, new_label, flag_critical)\n this_run_labeled = this_run_labeled + nl\n if s == False:\n this_run_success = False\n break\n if this_run_success == False:\n # then unlabel all that were labelled up to now\n for nn in this_run_labeled:\n print \"removing label of \" + str(nn)\n P.node[nn]['OBDlabel'] = None\n P.node[nn]['critical'] = False\n else: # obviously success is True, we managed to label all others...\n nodes_labeled = nodes_labeled + this_run_labeled\n print \"Win in labeling neighbors of \" + str(n)\n return (True, nodes_labeled)\n break\n # if no permutation is successful, we end up returning the last line\n return (False, nodes_labeled)\n print \"FAIL of all permutations from \" + str(n)", "def heuristic2B_label_OBD(n, P, label, critical=None):\n nodes_labeled = []\n\n flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)\n new_label = label + 1\n \n neighbors = P.neighbors(n)\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys(): # if it has a label\n if P.node[neigh]['OBDlabel'] > new_label: # and it is higher than what I would use for labeling\n new_label = P.node[neigh]['OBDlabel']\n # we got maximum of current label or any node that neighbors have - now we label them all with that\n \n neighbors_to_label = []\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)\n neighbors_to_label.append(neigh)\n else: # if set and smaller than mine, leave them alone\n pass\n else: # if not set, then not lower and not labelled\n neighbors_to_label.append(neigh)\n # now we have all the neighbors that need to be labeled\n \n if len(neighbors_to_label) > 1:\n flag_critical = True\n # and now labeling all these nodes\n \n for neigh in neighbors_to_label:\n if ('critical' in P.node[neigh].keys()) and (P.node[neigh]['critical']==True) and (P.node[neigh]['OBDlabel'] != new_label) :\n return (False, nodes_labeled) # being critical, we could avoid failure only if the label to set would be the same (it happens)\n else:\n P.node[neigh]['OBDlabel'] = new_label\n nodes_labeled.append(neigh) # this is a list that gets passed through recursions\n if flag_critical == True:\n P.node[neigh]['critical'] = True\n # labeling part done\n \n # and now recursive step - going into each neighbor to continue, in any order if necessary\n permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements\n for perm in permutations:\n this_run_success = True\n this_run_labeled = []\n for el in perm:\n (s, nl) = heuristic2B_label_OBD(el, P, new_label, flag_critical)\n this_run_labeled = this_run_labeled + nl\n if s == False:\n this_run_success = False\n if this_run_success == False:\n # then unlabel all that were labelled up to now\n for nn in this_run_labeled:\n P.node[nn]['OBDlabel'] = None\n P.node[nn]['critical'] = False\n else: # obviously success is True, we managed to label all others...\n nodes_labeled = nodes_labeled + this_run_labeled\n return (True, nodes_labeled)\n break\n # if no permutation is successful, we end up returning the last line\n return (False, nodes_labeled)", "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=()):\n nc = prediction.shape[2] - 5\n xc = prediction[..., 4] > conf_thres\n min_wh, max_wh = 2, 4096\n max_det = 300\n max_nms = 30000\n time_limit = 10.0\n redundant = True\n multi_label &= nc > 1\n merge = False\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction):\n x = x[xc[xi]]\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5]\n v[:, 4] = 1.0\n v[range(len(l)), l[:, 0].long() + 5] = 1.0\n x = torch.cat((x, v), 0)\n if not x.shape[0]:\n continue\n x[:, 5:] *= x[:, 4:5]\n box = xywh2xyxy(x[:, :4])\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else:\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n n = x.shape[0]\n if not n:\n continue\n elif n > max_nms:\n x = x[x[:, 4].argsort(descending=True)[:max_nms]]\n c = x[:, 5:6] * (0 if agnostic else max_wh)\n boxes, scores = x[:, :4] + c, x[:, 4]\n i = torchvision.ops.nms(boxes, scores, iou_thres)\n if i.shape[0] > max_det:\n i = i[:max_det]\n if merge and 1 < n < 3000.0:\n iou = box_iou(boxes[i], boxes) > iou_thres\n weights = iou * scores[None]\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)\n if redundant:\n i = i[iou.sum(1) > 1]\n output[xi] = x[i]\n if time.time() - t > time_limit:\n None\n break\n return output", "def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value", "def best_cutoff(self,\n split_label):\n split_args = self.sub_split_args[split_label]\n split_data = self.sub_split_data[split_label]\n # This criterion for the use_scipy flag is arbitrary and needs\n # further testing\n n_unique = len(np.unique(split_data[~np.isnan(split_data)]))\n use_scipy = True\n if n_unique > len(split_data)/1000:\n use_scipy = False\n idxcut_below, effects_below, rstats_below, ndata_below =\\\n self.u_data(split_label, use_scipy=use_scipy)\n idxcut_above, effects_above, rstats_above, ndata_above =\\\n self.u_data(split_label, above=True, use_scipy=use_scipy)\n\n # Default cutoff is min(split_data) - 1\n cutoff = split_data[split_args[0]] - 1\n value = 0\n # If no cutoff was possible\n if len(idxcut_below) == 0 or len(idxcut_above) == 0:\n return cutoff, value\n\n # All idx_cutoffs and values for cutoffs, for debugging\n for idx in range(len(idxcut_above)):\n idxcut = idxcut_above[idx]\n if idxcut != idxcut_below[idx]:\n raise NameError('Code error, invalid split')\n value_temp = (abs(effects_above[idx] -\n effects_below[idx]) *\n rstats_above[idx] *\n rstats_below[idx] *\n min(ndata_above[idx]) *\n min(ndata_below[idx]))\n if value_temp > value:\n cutoff = (split_data[split_args[int(idxcut)]] +\n split_data[split_args[int(idxcut)+1]])/2\n value = value_temp\n return cutoff, value", "def best_split(self):\r\n best_splits = [[0, None, None]]\r\n impurity, best_S, best_xj = 0, None, None\r\n \r\n for xj in self.x_names:\r\n for S in self.potential_splits(xj):\r\n ir = float(self.impurity_reduction(xj, S))\r\n if ir > impurity:\r\n impurity, best_S, best_xj = ir, S, xj\r\n best_splits.append([S, xj])\r\n else: \r\n pass\r\n \r\n return best_S, best_xj", "def best_threshold_from_folds(y_tuples, scoring=f1_score, step_size=0.01, maximize=True):\n thresholds, scores = [], []\n for _, y_true, y_pred in y_tuples:\n t, s = find_best_threshold(y_true, y_pred, step_size, scoring, maximize=maximize)\n thresholds.append(t)\n scores.append(s)\n\n mean_threshold = np.mean(thresholds)\n mean_score = np.mean([score_for_threshold(y, y_hat, scoring, mean_threshold) for _, y, y_hat in y_tuples])\n return mean_threshold, mean_score", "def _fit_split_(self, dataset, targets, val_set, val_targets, checkpoints):\n dir = ''.join(random.choices(string.ascii_lowercase + string.digits, k=16))\n dir = '.tmp' + dir + '/'\n os.mkdir(dir)\n grid = self.grid\n if self.folds is not None or self.folds != 0:\n if self.task == 'Classification':\n if self.folds > 1:\n sf = StratifiedKFold(n_splits=self.folds, shuffle=True, random_state=0)\n elif 0 <= self.folds < 1:\n sf = StratifiedShuffleSplit(n_splits=1, test_size=self.folds, random_state=0)\n elif self.task == 'Regression':\n folds, dataset, targets = self.split_regression(dataset, targets)\n results = []\n for params in grid:\n try:\n nn = NeuralNetwork()\n for i in range(len(params['layers'])):\n if i == 0:\n nn.add_layer('dense', params['layers'][i], params['activation'], dataset.shape[1])\n else:\n if i == len(params['layers']) - 1 and self.task == 'Regression':\n nn.add_layer('dense', params['layers'][i], 'linear')\n else:\n nn.add_layer('dense', params['layers'][i], params['activation'])\n curr_res = {'params': params,\n 'metric_stats': [],\n 'test_stats': [],\n 'vl_stats': [],\n 'tr_stats': []}\n\n if self.task == 'Classification':\n folds = sf.split(dataset, targets)\n for train_index, test_index in folds:\n X_train, X_test = dataset[train_index], dataset[test_index]\n Y_train, Y_test = targets[train_index], targets[test_index]\n nested_best = None\n nested_best_metric = None\n nested_tr_pred = None\n nested_vl_pred = None\n for i in range(self.restarts):\n nn.compile(task=self.task,\n loss=self.loss_name,\n l2_lambda=params['l2_lambda'],\n dropout=params['dropout'],\n optimizer=SGD(lr_init=params['lr'],\n momentum=params['momentum'],\n nesterov=params['nesterov'],\n lr_sched=StepDecayScheduler(drop=params['lr_sched'][0],\n epochs_drop=params['lr_sched'][1])))\n\n curr_model, curr_metric, best_epoch = nn.fit(X_train, Y_train,\n val_set=val_set, val_targets=val_targets,\n batch_size=params['batch_size'],\n test_size=params['test_size'],\n epochs=params['epoch'],\n patience=params['patience'],\n save_pred=dir + 'tmp_gs',\n save_model=None)\n\n nested_best_metric = metrics.metric_improve(self.metric, nested_best_metric, curr_metric)\n if nested_best_metric[1]:\n nested_tr_pred = np.load(dir + 'tmp_gs_tr_predictions.npy')[best_epoch]\n nested_vl_pred = np.load(dir + 'tmp_gs_vl_predictions.npy')[best_epoch]\n nested_best = copy.deepcopy(curr_model)\n if nested_best_metric[2]:\n break\n\n Y_pred = nested_best.predict(X_test)\n if self.metric == 'loss':\n curr_metric = np.sum(self.loss(Y_test, Y_pred), axis=0) / len(Y_test)\n else:\n curr_metric = metrics.metric_computation(self.metric, Y_test, Y_pred)\n\n curr_res['metric_stats'].append(curr_metric)\n tr_stats = []\n vl_stats = []\n test_stats = []\n for stat in self.statistics:\n if stat == 'loss':\n\n tr_stats.append(np.mean(self.loss(nested_tr_pred[:, :targets.shape[1]],\n nested_tr_pred[:, targets.shape[1]:])))\n vl_stats.append(np.mean(self.loss(nested_vl_pred[:, :targets.shape[1]],\n nested_vl_pred[:, targets.shape[1]:])))\n test_stats.append(np.mean(self.loss(Y_test, Y_pred)))\n else:\n tr_stats.append(metrics.metric_computation(stat,\n nested_tr_pred[:, :targets.shape[1]],\n nested_tr_pred[:, targets.shape[1]:]))\n vl_stats.append(metrics.metric_computation(stat,\n nested_vl_pred[:, :targets.shape[1]],\n nested_vl_pred[:, targets.shape[1]:]))\n test_stats.append(metrics.metric_computation(stat, Y_test, Y_pred))\n curr_res['tr_stats'].append(tr_stats)\n curr_res['vl_stats'].append(vl_stats)\n curr_res['test_stats'].append(test_stats)\n\n results.append(curr_res)\n if checkpoints is not None:\n with open(checkpoints + '.pkl', 'wb') as output:\n pickle.dump(results, output, pickle.HIGHEST_PROTOCOL)\n\n except NesterovError:\n continue\n shutil.rmtree(dir)\n return results", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def _fit_no_split(self, dataset, targets, val_set, val_targets, checkpoints):\n dir = ''.join(random.choices(string.ascii_lowercase + string.digits, k=16))\n dir = '.tmp' + dir + '/'\n os.mkdir(dir)\n grid = self.grid\n results = []\n for params in grid:\n try:\n nn = NeuralNetwork()\n for i in range(len(params['layers'])):\n if i == 0:\n nn.add_layer('dense', params['layers'][i], params['activation'], dataset.shape[1])\n else:\n if i == len(params['layers']) - 1 and self.task == 'Regression':\n nn.add_layer('dense', params['layers'][i], 'linear')\n else:\n nn.add_layer('dense', params['layers'][i], params['activation'])\n curr_res = {'params': params,\n 'vl_stats': [],\n 'tr_stats': []}\n\n nested_best_metric = None\n nested_tr_pred = None\n nested_vl_pred = None\n for i in range(self.restarts):\n nn.compile(task=self.task,\n loss=self.loss_name,\n l2_lambda=params['l2_lambda'],\n optimizer=SGD(lr_init=params['lr'],\n momentum=params['momentum'],\n nesterov=params['nesterov'],\n lr_sched=StepDecayScheduler(drop=params['lr_sched'][0],\n epochs_drop=params['lr_sched'][1])))\n\n curr_model, curr_metric, best_epoch = nn.fit(dataset, targets,\n val_set=val_set, val_targets=val_targets,\n batch_size=params['batch_size'],\n test_size=params['test_size'],\n epochs=params['epoch'],\n patience=params['patience'],\n save_pred=dir + 'tmp_gs',\n save_model=None)\n\n nested_best_metric = metrics.metric_improve(self.metric, nested_best_metric, curr_metric)\n if nested_best_metric[1]:\n nested_tr_pred = np.load(dir + 'tmp_gs_tr_predictions.npy')[best_epoch]\n nested_vl_pred = np.load(dir + 'tmp_gs_vl_predictions.npy')[best_epoch]\n if nested_best_metric[2]:\n break\n\n tr_stats = []\n vl_stats = []\n for stat in self.statistics:\n if stat == 'loss':\n tr_stats.append(np.mean(self.loss(nested_tr_pred[:, :targets.shape[1]],\n nested_tr_pred[:, targets.shape[1]:])))\n vl_stats.append(np.mean(self.loss(nested_vl_pred[:, :targets.shape[1]],\n nested_vl_pred[:, targets.shape[1]:])))\n else:\n tr_stats.append(metrics.metric_computation(stat,\n nested_tr_pred[:, :targets.shape[1]],\n nested_tr_pred[:, targets.shape[1]:]))\n vl_stats.append(metrics.metric_computation(stat,\n nested_vl_pred[:, :targets.shape[1]],\n nested_vl_pred[:, targets.shape[1]:]))\n curr_res['tr_stats'].append(tr_stats)\n curr_res['vl_stats'].append(vl_stats)\n\n results.append(curr_res)\n if checkpoints is not None:\n with open(checkpoints + '.pkl', 'wb') as output:\n pickle.dump(results, output, pickle.HIGHEST_PROTOCOL)\n\n except NesterovError:\n continue\n shutil.rmtree(dir)\n return results", "def get_best_split_all(x, y) -> Tuple[int, float, float]:\n m = x.shape[1]\n col_best_gin = np.ones(shape=m)\n col_best_val = np.ones(shape=m)\n for c in range(m):\n best = 1\n best_x = 0\n for i in np.unique(x[:, c]):\n gini = Tree.split(x[:, c], y, i)\n if gini < best:\n best = gini\n best_x = i\n col_best_gin[c] = best\n col_best_val[c] = best_x\n\n # Select best feature to split on\n col_idx = np.argmin(col_best_gin)\n # Convert to bool index\n col_idx = np.array(range(x.shape[1])) == col_idx\n\n return col_idx, col_best_val[col_idx], col_best_gin[col_idx]", "def pick_best_label(self,db,labels,ids):\n self.type = 'v'\n if len(labels) > 0:\n self.value = vote([labels[id] for id in ids])\n else:\n self.value = None\n return", "def _relabel(labels, minval=0, bgval=None):\n\n labels = np.unique(labels, return_inverse=True)[-1] + minval\n if bgval is not None:\n labels[labels == minval] = bgval\n return labels", "def minimization(current_tally, group_labels=None, seed=None):\n if seed is not None:\n random.seed(seed)\n n_treatments = len(current_tally)\n if n_treatments < 2:\n raise ValueError('current_tally must be a list of lists whose length is greater than 2.')\n target_length = None\n for tally in current_tally:\n if target_length is None:\n target_length = len(tally)\n else:\n if target_length != len(tally):\n raise ValueError('Each list in current_tally must be the same length.')\n\n if group_labels is not None:\n if len(group_labels) != target_length:\n raise ValueError('group_labels must be {} long'.format(target_length))\n\n sums = [0] * n_treatments\n for idx, tally in enumerate(current_tally):\n sums[idx] = sum(tally)\n if sum(sums) == 0:\n # No assignment made yet, so make one at random\n idx = random.randint(0, n_treatments - 1)\n else:\n min_value = min(sums)\n groups = [i for i, j in enumerate(sums) if j == min_value]\n if len(groups) > 1:\n idx = random.choice(groups)\n else:\n idx = groups[0]\n\n if group_labels:\n print(group_labels)\n group = group_labels[idx]\n else:\n group = idx + 1\n print(group)\n return group", "def get_optimal_threshhold(true_label, prediction, iterations=100, size=17):\n best_threshhold = [0.2]*size\n for t in range(size):\n best_fbeta = 0\n temp_threshhold = [0.2]*size\n for i in range(iterations):\n temp_value = i / float(iterations)\n temp_threshhold[t] = temp_value\n temp_fbeta = fbeta(true_label, prediction > temp_threshhold)\n if temp_fbeta > best_fbeta:\n best_fbeta = temp_fbeta\n best_threshhold[t] = temp_value\n return best_threshhold", "def binary_fairness(\n preds: torch.Tensor,\n target: torch.Tensor,\n groups: torch.Tensor,\n task: Literal[\"demographic_parity\", \"equal_opportunity\", \"all\"] = \"all\",\n threshold: float = 0.5,\n ignore_index: Optional[int] = None,\n validate_args: bool = True,\n) -> Dict[str, torch.Tensor]:\n if task not in [\"demographic_parity\", \"equal_opportunity\", \"all\"]:\n raise ValueError(\n f\"Expected argument `task` to either be ``demographic_parity``,\"\n f\"``equal_opportunity`` or ``all`` but got {task}.\"\n )\n\n if task == \"demographic_parity\":\n if target is not None:\n rank_zero_warn(\"The task demographic_parity does not require a target.\", UserWarning)\n target = torch.zeros(preds.shape)\n\n num_groups = torch.unique(groups).shape[0]\n group_stats = _binary_groups_stat_scores(preds, target, groups, num_groups, threshold, ignore_index, validate_args)\n\n transformed_group_stats = _groups_stat_transform(group_stats)\n\n if task == \"demographic_parity\":\n return _compute_binary_demographic_parity(**transformed_group_stats)\n\n if task == \"equal_opportunity\":\n return _compute_binary_equal_opportunity(**transformed_group_stats)\n\n if task == \"all\":\n return {\n **_compute_binary_demographic_parity(**transformed_group_stats),\n **_compute_binary_equal_opportunity(**transformed_group_stats),\n }\n return None", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right", "def choose_best_split(self, X_subset, y_subset):\n # YOUR CODE HERE\n feature_index = None\n threshold = None\n best_G = np.inf\n N = len(X_subset)\n \n for current_feature in range(X_subset.shape[1]):\n thresholds = np.unique(X_subset[:, current_feature])\n \n for t in thresholds:\n y_left, y_right = self.make_split_only_y(current_feature, t, X_subset, y_subset)\n H_L = self.H(y_left)\n H_R = self.H(y_right)\n \n G = (len(y_left) / N) * H_L + (len(y_right) / N) * H_R\n \n if G < best_G:\n best_G = G\n feature_index = current_feature\n threshold = t\n \n return feature_index, threshold", "def split_by_cost(cost_list, \n comm=None, \n return_work=False,\n return_all=False):\n if comm == None:\n comm = MPI.COMM_WORLD\n \n size = comm.Get_size()\n rank = comm.Get_rank()\n\n ### Total cost of job_list\n total = np.sum(cost_list) \n ### Ideal work for each rank\n max_work = (total / size)*1.01\n \n ### Preparing indices that each rank will use\n work_idx = [[] for x in range(size)]\n work_sum = [0 for x in range(size)]\n current_worker = 0\n withheld_idx_list = []\n withheld_value_list = []\n for idx,value in enumerate(cost_list):\n ## Decide whether to withhold value\n if work_sum[current_worker] + value > max_work*1.05:\n withheld_idx_list.append(idx)\n withheld_value_list.append(value)\n continue\n \n work_idx[current_worker].append(idx)\n work_sum[current_worker] += value\n if work_sum[current_worker] > max_work:\n current_worker += 1\n \n withheld_idx_list = np.array(withheld_idx_list)\n withheld_value_list = np.array(withheld_value_list)\n withheld_sort_idx = np.argsort(withheld_idx_list)\n withheld_idx_list = withheld_idx_list[withheld_sort_idx]\n withheld_value_list = withheld_value_list[withheld_sort_idx]\n for idx,withheld_idx in enumerate(withheld_idx_list):\n min_idx = np.argmin(work_sum)\n work_sum[min_idx] += withheld_value_list[idx]\n work_idx[min_idx].append(withheld_idx)\n \n my_list = work_idx[rank]\n \n if not return_all:\n if not return_work:\n return my_list\n else:\n return my_list,work_sum[rank]\n else:\n if not return_work:\n return work_idx\n else:\n return work_idx,work_sum", "def get_min_across_splits_continuous(\n arr: np.ndarray, y: np.ndarray, splits: np.ndarray, eval_func: Callable\n ):\n n = len(splits)\n if n > 500:\n # If many split points, use some threading\n with multiprocessing.Pool(processes=8) as p:\n # Get evaluation scores across all the splits\n post_split_evals = dict(\n zip(\n range(len(splits)),\n p.starmap(\n BaseTree.get_split_goodness_fit_continuous,\n zip([arr] * n, [y] * n, splits, [eval_func] * n),\n ),\n )\n )\n p.close()\n else:\n # If not too many split points, get scores across all splits\n post_split_evals = dict(\n zip(\n range(len(splits)),\n map(\n lambda x: BaseTree.get_split_goodness_fit_continuous(*x),\n zip([arr] * n, [y] * n, splits, [eval_func] * n),\n ),\n )\n )\n # Get the minimum split based on gain ratio\n min_eval = min(\n post_split_evals,\n key=lambda x: pipe(\n post_split_evals.get(x),\n lambda results: results[0] / results[1], # entropy / intrinsic value\n ),\n )\n\n # Return the best split and the splits scores\n return (splits[min_eval], *post_split_evals.get(min_eval))", "def _compute_best_value(self):\n asgt = self._neighbors_values.copy()\n best_cost, best_val = None, []\n\n for v in self._variable.domain:\n asgt[self.variable.name] = v\n c = self._compute_cost(**asgt)\n if (\n best_cost is None\n or (best_cost > c and self._mode == \"min\")\n or (best_cost < c and self._mode == \"max\")\n ):\n best_cost = c\n best_val = [v]\n elif best_cost == c:\n best_val.append(v)\n\n return best_val, best_cost", "def clean_labels(labels):\n\n llabels, slabels = list(labels), set(labels)\n \n for l in slabels:\n if llabels.count(l) <2 and l != max(slabels):\n llabels[llabels.index(l)] = l+1\n return clean_labels(llabels)\n elif llabels.count(l) <2 and l == max(slabels):\n llabels[llabels.index(l)] = l-1\n return clean_labels(llabels)\n else:\n return np.array(llabels)", "def fit(self, data, targets):\n # update these three\n self.idx = 0\n self.val = None\n self.left = None\n self.right = None\n ### YOUR CODE HERE\n # i have added a slow and a fast version\n \n num_points, num_features = data.shape\n # print('num points, num_features', num_points, num_features)\n \n def feat_score(feat_idx):\n feat = data[:, feat_idx].copy()\n perm = np.argsort(feat)\n s_feat = feat[perm]\n s_targets = targets[perm]\n target_var = ((s_targets - s_targets.mean())**2).sum()\n s_left, s_right = sum_squares(s_targets)\n def score(idx, _vals):\n ## slow version\n #left = _vals[0:idx]\n #right = _vals[idx:]\n #assert len(left) + len(right) == len(_vals), (len(left), len(right), len(_vals))\n #left_mean = np.mean(left)\n #right_mean = np.mean(right)\n #left_error = np.sum((left-left_mean)**2)\n #assert np.allclose(left_error, s_left[idx]) \n #right_error = np.sum((right-right_mean)**2)\n #assert np.allclose(right_error, s_right[idx])\n # return left_error+right_error\n # fast version\n return s_left[idx] + s_right[idx]\n # score for every split\n scores = np.array([score(x, s_targets) for x in range(0, num_points)])\n assert scores.min() <= target_var, target_var\n best_score_idx = np.argmin(scores)\n best_score = scores[best_score_idx]\n val = s_feat[best_score_idx]\n # print('best score', feat_idx, best_score, best_score_idx, val, s_feat[best_score_idx+1])\n \n return best_score, {'val': val, \n 'left': np.mean(s_targets[:best_score_idx]), \n 'right': np.mean(s_targets[best_score_idx:])\n } \n\n split_scores = []\n for f in range(0, num_features):\n total_score, _params = feat_score(f)\n split_scores.append(total_score)\n # print('score of {0} - {1}'.format(feat_names[f], total_score))\n # print('feature scores:', np.array(split_scores))\n best_feat = np.argmin(split_scores)\n best_score = split_scores[best_feat]\n # print('Best Feature idx: {0} - Best Cost: {1}'.format(best_feat, best_score))\n score_again, params = feat_score(best_feat)\n # print('double check score', score_again, best_score)\n self.idx = best_feat\n self.val = params['val']\n self.left = params['left']\n self.right = params['right']\n print(\"idx={}, val={}, left={}, right={}\".format(self.idx, self.val, self.left, self.right))\n assert not np.isnan(self.left)\n assert not np.isnan(self.right)\n ### END CODE", "def _backward_best_subset(X, y, nbest=8, beamwidth=40, score=\"bic\"):\n \n assert nbest > 0, \"nbest must be positive\"\n beamwidth = max(beamwidth, nbest)\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def get_bic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().aic\n\n get_score = get_bic if score == \"bic\" else get_aic\n \n features = X.columns\n \n heap = []\n visited = set()\n \n def get_pair(k):\n return get_score(['(Intercept)', *k]), k\n \n k = tuple(features)\n heapq.heappush(heap, get_pair(k))\n \n while True:\n modified = False\n min_score = heap[0][0]\n for _, k in heap:\n for f in features:\n if f not in k:\n continue\n candidate_features = tuple([x for x in k if x != f])\n if candidate_features in visited:\n continue\n visited.add(candidate_features)\n new_pair = get_pair(candidate_features)\n if new_pair[0] > min_score:\n modified = True\n heapq.heappush(heap, get_pair(candidate_features))\n if len(heap) > beamwidth:\n heapq.heappop(heap)\n min_score = heap[0][0]\n if not modified:\n break\n \n return heapq.nsmallest(nbest, [(-x, ['(Intercept)', *y]) for x, y in heap])", "def flatten_binary_scores(self, scores, labels, ignore=None):\n scores = scores.view(-1)\n labels = labels.view(-1)\n if ignore is None:\n return scores, labels\n valid = (labels != ignore)\n vscores = scores[valid]\n vlabels = labels[valid]\n return vscores, vlabels", "def overlap_score(labels, labels_pred):\n raw_overlap = 1-fraction_mislabeled_nodes(labels, labels_pred)\n partition_true = np.array(labels).astype(int)\n partition_pred = np.array(labels_pred).astype(int)\n num_nodes = partition_pred.size\n num_groups = partition_true.max() + 1\n\n chance_level = 0.\n for i in range(num_groups):\n temp = np.sum(i == partition_true) / num_nodes\n if temp > chance_level:\n chance_level = temp\n\n score = (raw_overlap - chance_level) / (1 - chance_level)\n if score <= 0:\n score = 0\n\n return score", "def prepare_val_res(score, threshold):\n new_whale = 'new_whale'\n\n res ={}\n\n train_arr = np.array(train)\n\n for i,p in enumerate(val):\n t = []\n s = set()\n a = score[i,:]\n\n top_label_probs = {}\n cond = a > threshold\n cond_index = np.where(cond)[0]\n cond_images = train_arr[cond_index]\n for j, img in enumerate(cond_images):\n if tagged[img] in top_label_probs:\n top_label_probs[tagged[img]] += a[cond_index[j]]\n else:\n top_label_probs[tagged[img]] = a[cond_index[j]]\n\n sorted_top_label_probs = sort_dict_by_values(top_label_probs)\n\n t = []\n for lb, _ in sorted_top_label_probs:\n t.append(lb)\n\n if len(t) < 5:\n t.append(new_whale)\n\n for index in np.argsort(a)[::-1]:\n if tagged[train_arr[index]] not in t:\n t.append(tagged[train_arr[index]])\n if len(t) >= 5:\n break\n\n assert len(t) >= 5\n\n res[p[:-4]+'.jpg'] = t[:5]\n\n return res", "def knapsack_greedy(vals: List[float], weights: List[int],\n cap: int) -> Set[int]:\n # Check whether the input arrays are None or empty\n if not vals:\n return set()\n # Check whether the input capacity is non-negative\n if cap < 0:\n return set()\n\n items = [Item(i, info[0], info[1]) for i, info in enumerate(zip(vals, weights))]\n\n included1, total_val1 = _greedy_packing(\n items, cap, func=lambda x: x.val / x.weight\n )\n\n included2, total_val2 = _greedy_packing(items, cap, func=lambda x: x.val)\n\n if total_val1 >= total_val2:\n return included1\n else:\n return included2\n # Overall running time complexity: O(nlog n)", "def _forward_best_subset(X, y, nbest=8, beamwidth=40, score=\"bic\"):\n \n assert nbest > 0, \"nbest must be positive\"\n beamwidth = max(beamwidth, nbest)\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def get_bic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().aic\n\n get_score = get_bic if score == \"bic\" else get_aic\n \n features = X.columns\n \n heap = []\n visited = set()\n \n def get_pair(k):\n return get_score(['(Intercept)', *k]), k\n \n k = ()\n heapq.heappush(heap, get_pair(k))\n \n while True:\n modified = False\n min_score = heap[0][0]\n for _, k in heap:\n for f in features:\n if f in k:\n continue\n candidate_features = tuple(sorted([*k, f]))\n if candidate_features in visited:\n continue\n visited.add(candidate_features)\n new_pair = get_pair(candidate_features)\n if new_pair[0] > min_score:\n modified = True\n heapq.heappush(heap, get_pair(candidate_features))\n if len(heap) > beamwidth:\n heapq.heappop(heap)\n min_score = heap[0][0]\n if not modified:\n break\n \n return heapq.nsmallest(nbest, [(-x, ['(Intercept)', *y]) for x, y in heap])", "def balance(labels):\n # subsample positive labels if we have too many\n labels = subsample_positive_labels(labels)\n\n # subsample negative labels if we have too many\n labels = subsample_negative_labels(labels)\n\n return labels", "def best_loss(list_bb):\n\n # Compute the number of predicted boxes\n n = len(list_bb)\n\n # if there are more than 0 predicted boxes, search for the 2 boxes\n if n != 0:\n tab_loss = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n loss_val = np.sum([(list_bb[i][k]-list_bb[j][k]) ** 2 for k in range(4)])\n tab_loss[i, j] = loss_val\n tab_loss[j, i] = loss_val\n\n # Find the minimum\n amin = np.unravel_index(tab_loss.argmin(), tab_loss.shape)\n\n return union(list_bb[amin[0]], list_bb[amin[1]])\n else:\n return []", "def get_best_thres(self, data, label, score_func = f1_score):\n pred_prob = self.model.predict(data)\n best_score = 0\n for i_thres in range(0, 100):\n pred_label = [int(i > (i_thres / 100.0)) for i in pred_prob]\n fs = score_func(label, pred_label)\n if best_score < fs:\n best_score = fs\n self.config.thres = i_thres / 100.0\n print ('best score: %0.2f best_thres: %0.2f' % (best_score, self.config.thres))", "def _best_split(cls, X, y):\n n = X.shape[0]\n num_feature = X.shape[1]\n y_types = np.unique(y)\n\n # initialize\n min_score = float(n)\n feature_idx = None\n best_theta = None\n best_idx = None\n\n for feature_idx in xrange(num_feature):\n # counter for y\n cumulate_y = Counter()\n rest_y = Counter()\n for y_type in y_types:\n cnt = np.where(y == y_type)[0].shape[0]\n rest_y[y_type] = cnt\n\n # sorted data\n sorted_idx = np.argsort(X[:, feature_idx])\n sorted_X = np.copy(X)\n sorted_y = np.copy(y)\n sorted_X = sorted_X[sorted_idx]\n sorted_y = sorted_y[sorted_idx]\n #print \"_best_split:\", sorted_X.shape, sorted_y.shape\n\n for idx in xrange(n-1):\n theta = (sorted_X[idx, feature_idx] + sorted_X[idx + 1, feature_idx]) / 2\n y_label = sorted_y[idx]\n cumulate_y[y_label] += 1\n rest_y[y_label] -= 1\n left_cnt = sum(cumulate_y.values())\n right_cnt = sum(rest_y.values())\n w_1 = left_cnt * cls._gini_index(cumulate_y.values())\n w_2 = right_cnt * cls._gini_index(rest_y.values())\n score = w_1 + w_2\n if score < min_score:\n min_score = score\n best_theta = theta\n best_idx = feature_idx\n #print('new min score: %.3f' % score)\n #print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n #print('left: %d, right: %d' % (left_cnt, right_cnt))\n print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n return (best_idx, best_theta)", "def _best_split(cls, X, y):\n n = X.shape[0]\n num_feature = X.shape[1]\n y_types = np.unique(y)\n\n # initialize\n min_score = float(n)\n feature_idx = None\n best_theta = None\n best_idx = None\n\n for feature_idx in xrange(num_feature):\n # counter for y\n cumulate_y = Counter()\n rest_y = Counter()\n for y_type in y_types:\n cnt = np.where(y == y_type)[0].shape[0]\n rest_y[y_type] = cnt\n\n # sorted data\n sorted_idx = np.argsort(X[:, feature_idx])\n sorted_X = np.copy(X)\n sorted_y = np.copy(y)\n sorted_X = sorted_X[sorted_idx]\n sorted_y = sorted_y[sorted_idx]\n #print \"_best_split:\", sorted_X.shape, sorted_y.shape\n\n for idx in xrange(n-1):\n theta = (sorted_X[idx, feature_idx] + sorted_X[idx + 1, feature_idx]) / 2\n y_label = sorted_y[idx]\n cumulate_y[y_label] += 1\n rest_y[y_label] -= 1\n left_cnt = sum(cumulate_y.values())\n right_cnt = sum(rest_y.values())\n w_1 = left_cnt * cls._gini_index(cumulate_y.values())\n w_2 = right_cnt * cls._gini_index(rest_y.values())\n score = w_1 + w_2\n if score < min_score:\n min_score = score\n best_theta = theta\n best_idx = feature_idx\n #print('new min score: %.3f' % score)\n #print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n #print('left: %d, right: %d' % (left_cnt, right_cnt))\n print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n return (best_idx, best_theta)", "def find_threshold(labels, predictions, wqs, post_shift):\n if post_shift:\n sorted_groups = []\n for q in range(1, 5):\n sorted_group = sorted(\n [(prediction[0], label)\n for (prediction, label, wq) in zip(predictions, labels, wqs)\n if wq == q],\n reverse=True)\n sorted_groups.append(sorted_group)\n\n cut_indices = [\n int(len(sorted_group) * COVERAGE) for sorted_group in sorted_groups\n ]\n\n for _ in range(1000):\n fprs = [\n get_fpr(sorted_group, cut_index)\n for (sorted_group, cut_index) in zip(sorted_groups, cut_indices)\n ]\n min_fpr_index = np.argmin(fprs)\n max_fpr_index = np.argmax(fprs)\n cut_indices[min_fpr_index] = min(cut_indices[min_fpr_index] + 1,\n len(sorted_groups[min_fpr_index]))\n cut_indices[max_fpr_index] = max(cut_indices[max_fpr_index] - 1, 0)\n\n thresholds = [sorted_groups[q][cut_indices[q]][0] for q in range(4)]\n return thresholds\n else:\n return [np.percentile(predictions, 100 - COVERAGE * 100)] * 4", "def find_best_split(self, x, y):\n\n # check cornor case: all same x\n n = y.size\n\n if all(x == x[0]):\n return (0, amin(x) - self.eps)\n\n sort_index = argsort(x)\n x_sorted = x[sort_index]\n y_sorted = y[sort_index]\n\n # build potential split index array\n split_index_array = array([i for i in range(1, n)\n if x_sorted[i] != x_sorted[i - 1]\n and y_sorted[i] != y_sorted[i - 1]])\n\n # split_index_array = linspace(\n # 0, y.size, num=min(5, ceil(n / 5)), endpoint=False, dtype='int')\n # split_index_array = split_index_array[1:]\n\n best_split_index = 0\n best_gain = 0\n h_x = self.cur_entropy\n\n for split_index in split_index_array:\n left_entropy = self.entropy(y_sorted[:split_index])\n right_entropy = self.entropy(y_sorted[split_index:])\n h_xy = (split_index * left_entropy +\n (n - split_index) * right_entropy) / n\n cur_gain = h_x - h_xy\n\n if cur_gain > best_gain:\n best_gain = cur_gain\n best_split_index = split_index\n\n if best_split_index != 0:\n best_split_point = (x_sorted[best_split_index] +\n x_sorted[best_split_index - 1]) / 2\n else:\n best_split_point = x_sorted[best_split_index] - self.eps\n\n return (best_gain, best_split_point)", "def label_lvl(a, thlds, labels):\n if len(labels) != len(thlds) + 1:\n raise ValueError(\"Must be one more label than number of thresholds\")\n lvl_indexes = index_lvl(a, thlds)\n return np.take(labels, lvl_indexes)", "def _split_dataset(self, X, y, label, index, value, sample_weights=None):\n # YOUR CODE HERE\n # Hint: Do not forget to remove the index-th feature from X.\n # begin answer\n ret1=[]\n ret2=[]\n featVec=X[:,index]\n X=X[:,[i for i in range(X.shape[1]) if i!=index ]]\n for i in range(len(featVec)):\n if featVec[i]>=value:\n ret1.append(i)\n else:\n ret2.append(i)\n sub1_X = X[ret1,:]\n sub1_y = y[ret1]\n label_1=label[ret1]\n sub1_sample_weights=sample_weights[ret1]\n sub2_X = X[ret2,:]\n sub2_y = y[ret2]\n label_2=label[ret2]\n sub2_sample_weights=sample_weights[ret2]\n # end answer\n return sub1_X, sub1_y, label_1, sub1_sample_weights, sub2_X, sub2_y, label_2, sub2_sample_weights", "def losses_hiera(predictions,\n targets,\n targets_top,\n num_classes,\n indices_high,\n eps=1e-8):\n b, _, h, w = predictions.shape\n predictions = torch.sigmoid(predictions.float())\n void_indices = (targets == 255)\n targets[void_indices] = 0\n targets = F.one_hot(targets, num_classes=num_classes).permute(0, 3, 1, 2)\n void_indices2 = (targets_top == 255)\n targets_top[void_indices2] = 0\n targets_top = F.one_hot(targets_top, num_classes=7).permute(0, 3, 1, 2)\n\n MCMA = predictions[:, :num_classes, :, :]\n MCMB = torch.zeros((b, 7, h, w)).to(predictions)\n for ii in range(7):\n MCMB[:, ii:ii + 1, :, :] = torch.max(\n torch.cat([\n predictions[:, indices_high[ii][0]:indices_high[ii][1], :, :],\n predictions[:, num_classes + ii:num_classes + ii + 1, :, :]\n ],\n dim=1), 1, True)[0]\n\n MCLB = predictions[:, num_classes:num_classes + 7, :, :]\n MCLA = predictions[:, :num_classes, :, :].clone()\n for ii in range(7):\n for jj in range(indices_high[ii][0], indices_high[ii][1]):\n MCLA[:, jj:jj + 1, :, :] = torch.min(\n torch.cat([\n predictions[:, jj:jj + 1, :, :], MCLB[:, ii:ii + 1, :, :]\n ],\n dim=1), 1, True)[0]\n\n valid_indices = (~void_indices).unsqueeze(1)\n num_valid = valid_indices.sum()\n valid_indices2 = (~void_indices2).unsqueeze(1)\n num_valid2 = valid_indices2.sum()\n # channel_num*sum()/one_channel_valid already has a weight\n loss = (\n (-targets[:, :num_classes, :, :] * torch.log(MCLA + eps) -\n (1.0 - targets[:, :num_classes, :, :]) * torch.log(1.0 - MCMA + eps))\n * valid_indices).sum() / num_valid / num_classes\n loss += ((-targets_top[:, :, :, :] * torch.log(MCLB + eps) -\n (1.0 - targets_top[:, :, :, :]) * torch.log(1.0 - MCMB + eps)) *\n valid_indices2).sum() / num_valid2 / 7\n\n return 5 * loss", "def most_discriminating( features_df, labels_df, top=5):\n \n columns = features_df.shape[1]\n labels_df = labels_df[['file', 'candy_id']].set_index('file')\n qualities = np.zeros(columns)\n \n _left = 0\n _right = 1\n\n _c = 0\n _h = 1\n\n # globals\n cases = float(labels_df['candy_id'].count()) # total cases\n\n p_c_A = (labels_df['candy_id'] == 0).sum() / cases\n p_h_A = 1.0 - p_c_A\n\n\n for feature in range(columns):\n\n branch_cases = np.zeros(2) # total on each branch\n pi = np.zeros(2) # proportion on each branch\n\n split = np.array([\n #c, h\n [0, 0], #left\n [0, 0] #right\n ])\n\n for index, value in features_df[feature].iteritems():\n split[value][labels_df.loc[index][0]] += 1\n\n branch_cases[_left] = split[_left].sum()\n branch_cases[_right] = split[_right].sum()\n \n if branch_cases[_left] == 0.0 or branch_cases[_right] == 0.0:\n qualities[feature] = 0\n continue\n \n pi[_left] = branch_cases[_left] / cases\n pi[_right] = branch_cases[_right] / cases\n\n p_c_B = split[_left][_c] / branch_cases[_left]\n p_h_B = split[_left][_h] / branch_cases[_left]\n\n p_c_C = split[_right][_c] / branch_cases[_right]\n p_h_C = split[_right][_h] / branch_cases[_right]\n\n gini_tree = 1.0 - (math.pow(p_c_A, 2) + math.pow(p_h_A, 2))\n\n gini_left = 1.0 - (math.pow(p_c_B, 2) + math.pow(p_h_B, 2))\n gini_right = 1.0 - (math.pow(p_c_C, 2) + math.pow(p_h_C, 2))\n\n quality = gini_tree - pi[_left] * gini_left - pi[_right] * gini_right\n\n qualities[feature] = quality\n return list(reversed(qualities.argsort()))[:top]", "def _compute_best_split_and_push(self, node):\n\n node.split_info = self.splitter.find_node_split(\n node.sample_indices, node.histograms, node.sum_gradients,\n node.sum_hessians)\n\n if node.split_info.gain <= 0: # no valid split\n self._finalize_leaf(node)\n else:\n heappush(self.splittable_nodes, node)", "def _compute_best_value(self):\n reduced_cs = []\n concerned_vars = set()\n\n for c in self.utilities:\n asgt = filter_assignment_dict(self._neighbors_values, c.dimensions)\n reduced_cs.append(c.slice(asgt))\n concerned_vars.update(c.dimensions)\n var_val, rel_val = find_arg_optimal(\n self.variable,\n lambda x: functools.reduce(operator.add, [f(x) for f in reduced_cs]),\n self._mode,\n )\n # Add the cost for each variable value if any\n for var in concerned_vars:\n if var.name == self.name:\n rel_val += var.cost_for_val(self.current_value)\n else:\n rel_val += var.cost_for_val(self._neighbors_values[var.name])\n\n return var_val, rel_val", "def _binary_groups_stat_scores(\n preds: torch.Tensor,\n target: torch.Tensor,\n groups: torch.Tensor,\n num_groups: int,\n threshold: float = 0.5,\n ignore_index: Optional[int] = None,\n validate_args: bool = True,\n) -> List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]:\n if validate_args:\n _binary_stat_scores_arg_validation(threshold, \"global\", ignore_index)\n _binary_stat_scores_tensor_validation(preds, target, \"global\", ignore_index)\n _groups_validation(groups, num_groups)\n\n preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)\n groups = _groups_format(groups)\n\n indexes, indices = torch.sort(groups.squeeze(1))\n preds = preds[indices]\n target = target[indices]\n\n split_sizes = _flexible_bincount(indexes).detach().cpu().tolist()\n\n group_preds = list(torch.split(preds, split_sizes, dim=0))\n group_target = list(torch.split(target, split_sizes, dim=0))\n\n return [_binary_stat_scores_update(group_p, group_t) for group_p, group_t in zip(group_preds, group_target)]", "def calc_instance_segmentation_voc_prec_rec(\r\n pred_masks, pred_labels, pred_scores,\r\n gt_masks, gt_labels, iou_thresh):\r\n\r\n pred_masks = iter(pred_masks)\r\n pred_labels = iter(pred_labels)\r\n pred_scores = iter(pred_scores)\r\n gt_masks = iter(gt_masks)\r\n gt_labels = iter(gt_labels)\r\n\r\n n_pos = defaultdict(int)\r\n score = defaultdict(list)\r\n match = defaultdict(list)\r\n\r\n for pred_mask, pred_label, pred_score, gt_mask, gt_label in \\\r\n six.moves.zip(\r\n pred_masks, pred_labels, pred_scores,\r\n gt_masks, gt_labels):\r\n\r\n for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):\r\n pred_keep_l = pred_label == l\r\n pred_mask_l = pred_mask[pred_keep_l]\r\n pred_score_l = pred_score[pred_keep_l]\r\n # sort by score\r\n order = pred_score_l.argsort()[::-1]\r\n pred_mask_l = pred_mask_l[order]\r\n pred_score_l = pred_score_l[order]\r\n\r\n gt_keep_l = gt_label == l\r\n gt_mask_l = gt_mask[gt_keep_l]\r\n\r\n n_pos[l] += gt_keep_l.sum()\r\n score[l].extend(pred_score_l)\r\n\r\n if len(pred_mask_l) == 0:\r\n continue\r\n if len(gt_mask_l) == 0:\r\n match[l].extend((0,) * pred_mask_l.shape[0])\r\n continue\r\n\r\n iou = mask_iou(pred_mask_l, gt_mask_l)\r\n gt_index = iou.argmax(axis=1)\r\n # set -1 if there is no matching ground truth\r\n gt_index[iou.max(axis=1) < iou_thresh] = -1\r\n del iou\r\n\r\n selec = np.zeros(gt_mask_l.shape[0], dtype=bool)\r\n for gt_idx in gt_index:\r\n if gt_idx >= 0:\r\n if not selec[gt_idx]:\r\n match[l].append(1)\r\n else:\r\n match[l].append(0)\r\n selec[gt_idx] = True\r\n else:\r\n match[l].append(0)\r\n\r\n for iter_ in (pred_masks, pred_labels, pred_scores, gt_masks, gt_labels):\r\n if next(iter_, None) is not None:\r\n raise ValueError('Length of input iterables need to be same.')\r\n\r\n n_fg_class = max(n_pos.keys()) + 1\r\n prec = [None] * n_fg_class\r\n rec = [None] * n_fg_class\r\n\r\n for l in n_pos.keys():\r\n score_l = np.array(score[l])\r\n match_l = np.array(\r\n match[l], dtype=np.int8)\r\n\r\n order = score_l.argsort()[::-1]\r\n match_l = match_l[order]\r\n\r\n tp = np.cumsum(match_l == 1)\r\n fp = np.cumsum(match_l == 0)\r\n\r\n # If an element of fp + tp is 0,\r\n # the corresponding element of prec[l] is nan.\r\n prec[l] = tp / (fp + tp)\r\n # If n_pos[l] is 0, rec[l] is None.\r\n if n_pos[l] > 0:\r\n rec[l] = tp / n_pos[l]\r\n\r\n return prec, rec", "def lgb_hyperopt(data, labels, num_evals=1000, n_folds=5, diagnostic=False):\r\n LGBM_MAX_LEAVES = 2**11 #maximum number of leaves per tree for LightGBM\r\n LGBM_MAX_DEPTH = 25 #maximum tree depth for LightGBM \r\n EVAL_METRIC_LGBM_CLASS = 'f1'\r\n\r\n def lgb_f1_score(y_hat, data):\r\n y_true = data.get_label()\r\n y_hat = np.round(y_hat)\r\n return 'f1', f1_score(y_true, y_hat), True\r\n\r\n print('Running {} rounds of LightGBM parameter optimisation:'.format(num_evals))\r\n #clear space\r\n \r\n integer_params = ['max_depth',\r\n 'num_leaves',\r\n 'max_bin',\r\n 'min_data_in_leaf',\r\n 'min_data_in_bin']\r\n \r\n def objective(space_params):\r\n \r\n #cast integer params from float to int\r\n for param in integer_params:\r\n space_params[param] = int(space_params[param])\r\n \r\n #extract nested conditional parameters\r\n if space_params['boosting']['boosting'] == 'goss':\r\n top_rate = space_params['boosting'].get('top_rate')\r\n other_rate = space_params['boosting'].get('other_rate')\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n space_params['top_rate'] = top_rate\r\n space_params['other_rate'] = other_rate\r\n \r\n subsample = space_params['boosting'].get('subsample', 1.0)\r\n space_params['boosting'] = space_params['boosting']['boosting']\r\n space_params['subsample'] = subsample\r\n \r\n cv_results = lgb.cv(space_params, train, nfold = n_folds, stratified=True,\r\n early_stopping_rounds=100, seed=42, feval=lgb_f1_score)\r\n \r\n best_loss = -cv_results['f1-mean'][-1]\r\n\r\n return{'loss':best_loss, 'status': STATUS_OK }\r\n \r\n train = lgb.Dataset(data, labels)\r\n \r\n #integer and string parameters, used with hp.choice()\r\n boosting_list = [{'boosting': 'gbdt',\r\n 'subsample': hp.uniform('subsample', 0.5, 1)},\r\n {'boosting': 'goss',\r\n 'subsample': 1.0,\r\n 'top_rate': hp.uniform('top_rate', 0, 0.5),\r\n 'other_rate': hp.uniform('other_rate', 0, 0.5)}] #if including 'dart', make sure to set 'n_estimators'\r\n\r\n objective_list_reg = ['huber', 'gamma', 'fair', 'tweedie']\r\n objective_list_class = ['binary', 'cross_entropy']\r\n objective_list = objective_list_class\r\n is_unbalance_list = [True]\r\n\r\n space ={'boosting' : hp.choice('boosting', boosting_list),\r\n 'num_leaves' : hp.quniform('num_leaves', 2, LGBM_MAX_LEAVES, 1),\r\n 'max_depth': hp.quniform('max_depth', 2, LGBM_MAX_DEPTH, 1),\r\n 'max_bin': hp.quniform('max_bin', 32, 255, 1),\r\n 'min_data_in_leaf': hp.quniform('min_data_in_leaf', 1, 256, 1),\r\n 'min_data_in_bin': hp.quniform('min_data_in_bin', 1, 256, 1),\r\n 'min_gain_to_split' : hp.quniform('min_gain_to_split', 0.1, 5, 0.01),\r\n 'lambda_l1' : hp.uniform('lambda_l1', 0, 5),\r\n 'lambda_l2' : hp.uniform('lambda_l2', 0, 5),\r\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.2)),\r\n 'metric' : None, \r\n 'objective' : hp.choice('objective', objective_list),\r\n 'feature_fraction' : hp.quniform('feature_fraction', 0.5, 1, 0.01),\r\n 'bagging_fraction' : hp.quniform('bagging_fraction', 0.5, 1, 0.01),\r\n 'is_unbalance' : hp.choice('is_unbalance', is_unbalance_list)\r\n }\r\n\r\n trials = Trials()\r\n best = fmin(fn=objective,\r\n space=space,\r\n algo=tpe.suggest,\r\n max_evals=num_evals, \r\n trials=trials)\r\n \r\n #fmin() will return the index of values chosen from the lists/arrays in 'space'\r\n #to obtain actual values, index values are used to subset the original lists/arrays\r\n #extract nested conditional parameters\r\n try:\r\n if best['boosting']['boosting'] == 'goss':\r\n top_rate = best['boosting'].get('top_rate')\r\n other_rate = best['boosting'].get('other_rate')\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n best['top_rate'] = top_rate\r\n best['other_rate'] = other_rate\r\n except:\r\n if boosting_list[best['boosting']]['boosting'] == 'goss':\r\n top_rate = best['top_rate']\r\n other_rate = best['other_rate']\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n best['top_rate'] = top_rate\r\n best['other_rate'] = other_rate\r\n best['boosting'] = boosting_list[best['boosting']]['boosting']#nested dict, index twice\r\n best['metric'] = metric_list[best['metric']]\r\n best['objective'] = objective_list[best['objective']]\r\n best['is_unbalance'] = is_unbalance_list[best['is_unbalance']]\r\n \r\n #cast floats of integer params to int\r\n for param in integer_params:\r\n best[param] = int(best[param])\r\n \r\n print('{' + '\\n'.join('{}: {}'.format(k, v) for k, v in best.items()) + '}')\r\n if diagnostic:\r\n return(best, trials)\r\n else:\r\n return(best)", "def filter_top_predictions(labels, boxes, scores):\n\n filtered_labels = []\n filtered_boxes = []\n filtered_scores = []\n # Loop through each unique label\n for label in set(labels):\n # Get first index of label, which is also its highest scoring occurrence\n index = labels.index(label)\n\n filtered_labels.append(label)\n filtered_boxes.append(boxes[index])\n filtered_scores.append(scores[index])\n\n if len(filtered_labels) == 0:\n return filtered_labels, torch.empty(0, 4), torch.tensor(filtered_scores)\n return filtered_labels, torch.stack(filtered_boxes), torch.tensor(filtered_scores)", "def evaluation(gt_dir,\n seg_dir,\n label_list,\n mask_dir=None,\n compute_score_whole_structure=False,\n path_dice=None,\n path_hausdorff=None,\n path_hausdorff_99=None,\n path_hausdorff_95=None,\n path_mean_distance=None,\n crop_margin_around_gt=10,\n list_incorrect_labels=None,\n list_correct_labels=None,\n use_nearest_label=False,\n recompute=True,\n verbose=True):\n\n # check whether to recompute\n compute_dice = not os.path.isfile(path_dice) if (path_dice is not None) else True\n compute_hausdorff = not os.path.isfile(path_hausdorff) if (path_hausdorff is not None) else False\n compute_hausdorff_99 = not os.path.isfile(path_hausdorff_99) if (path_hausdorff_99 is not None) else False\n compute_hausdorff_95 = not os.path.isfile(path_hausdorff_95) if (path_hausdorff_95 is not None) else False\n compute_mean_dist = not os.path.isfile(path_mean_distance) if (path_mean_distance is not None) else False\n compute_hd = [compute_hausdorff, compute_hausdorff_99, compute_hausdorff_95]\n\n if compute_dice | any(compute_hd) | compute_mean_dist | recompute:\n\n # get list label maps to compare\n path_gt_labels = utils.list_images_in_folder(gt_dir)\n path_segs = utils.list_images_in_folder(seg_dir)\n path_gt_labels = utils.reformat_to_list(path_gt_labels, length=len(path_segs))\n if len(path_gt_labels) != len(path_segs):\n print('gt and segmentation folders must have the same amount of label maps.')\n if mask_dir is not None:\n path_masks = utils.list_images_in_folder(mask_dir)\n if len(path_masks) != len(path_segs):\n print('not the same amount of masks and segmentations.')\n else:\n path_masks = [None] * len(path_segs)\n\n # load labels list\n label_list, _ = utils.get_list_labels(label_list=label_list, labels_dir=gt_dir)\n n_labels = len(label_list)\n max_label = np.max(label_list) + 1\n\n # initialise result matrices\n if compute_score_whole_structure:\n max_dists = np.zeros((n_labels + 1, len(path_segs), 3))\n mean_dists = np.zeros((n_labels + 1, len(path_segs)))\n dice_coefs = np.zeros((n_labels + 1, len(path_segs)))\n else:\n max_dists = np.zeros((n_labels, len(path_segs), 3))\n mean_dists = np.zeros((n_labels, len(path_segs)))\n dice_coefs = np.zeros((n_labels, len(path_segs)))\n\n # loop over segmentations\n loop_info = utils.LoopInfo(len(path_segs), 10, 'evaluating', print_time=True)\n for idx, (path_gt, path_seg, path_mask) in enumerate(zip(path_gt_labels, path_segs, path_masks)):\n if verbose:\n loop_info.update(idx)\n\n # load gt labels and segmentation\n gt_labels = utils.load_volume(path_gt, dtype='int')\n seg = utils.load_volume(path_seg, dtype='int')\n if path_mask is not None:\n mask = utils.load_volume(path_mask, dtype='bool')\n gt_labels[mask] = max_label\n seg[mask] = max_label\n\n # crop images\n if crop_margin_around_gt is not None:\n gt_labels, cropping = edit_volumes.crop_volume_around_region(gt_labels, margin=crop_margin_around_gt)\n seg = edit_volumes.crop_volume_with_idx(seg, cropping)\n\n if list_incorrect_labels is not None:\n seg = edit_volumes.correct_label_map(seg, list_incorrect_labels, list_correct_labels, use_nearest_label)\n\n # compute Dice scores\n dice_coefs[:n_labels, idx] = fast_dice(gt_labels, seg, label_list)\n\n # compute Dice scores for whole structures\n if compute_score_whole_structure:\n temp_gt = (gt_labels > 0) * 1\n temp_seg = (seg > 0) * 1\n dice_coefs[-1, idx] = dice(temp_gt, temp_seg)\n else:\n temp_gt = temp_seg = None\n\n # compute average and Hausdorff distances\n if any(compute_hd) | compute_mean_dist:\n\n # compute unique label values\n unique_gt_labels = np.unique(gt_labels)\n unique_seg_labels = np.unique(seg)\n\n # compute max/mean surface distances for all labels\n for index, label in enumerate(label_list):\n if (label in unique_gt_labels) & (label in unique_seg_labels):\n mask_gt = np.where(gt_labels == label, True, False)\n mask_seg = np.where(seg == label, True, False)\n tmp_max_dists, mean_dists[index, idx] = surface_distances(mask_gt, mask_seg, [100, 99, 95])\n max_dists[index, idx, :] = np.array(tmp_max_dists)\n else:\n mean_dists[index, idx] = max(gt_labels.shape)\n max_dists[index, idx, :] = np.array([max(gt_labels.shape)] * 3)\n\n # compute max/mean distances for whole structure\n if compute_score_whole_structure:\n tmp_max_dists, mean_dists[-1, idx] = surface_distances(temp_gt, temp_seg, [100, 99, 95])\n max_dists[-1, idx, :] = np.array(tmp_max_dists)\n\n # write results\n if path_dice is not None:\n utils.mkdir(os.path.dirname(path_dice))\n np.save(path_dice, dice_coefs)\n if path_hausdorff is not None:\n utils.mkdir(os.path.dirname(path_hausdorff))\n np.save(path_hausdorff, max_dists[..., 0])\n if path_hausdorff_99 is not None:\n utils.mkdir(os.path.dirname(path_hausdorff_99))\n np.save(path_hausdorff_99, max_dists[..., 1])\n if path_hausdorff_95 is not None:\n utils.mkdir(os.path.dirname(path_hausdorff_95))\n np.save(path_hausdorff_95, max_dists[..., 2])\n if path_mean_distance is not None:\n utils.mkdir(os.path.dirname(path_mean_distance))\n np.save(path_mean_distance, max_dists[..., 2])", "def create_tree(data_set, labels):\n labels = copy.copy(labels)\n class_list = [ eg[-1] for eg in data_set]\n # if all classes are same\n if class_list.count(class_list[0]) == len(class_list):\n return class_list[0]\n # only have class feature\n if len(data_set[0]) == 1:\n return majority_cnt(class_list)\n best_feat = choose_best_feature(data_set)\n best_feat_cls = labels[best_feat]\n node = {best_feat_cls: {}}\n del(labels[best_feat])\n feat_values = [eg[best_feat] for eg in data_set]\n unique_values = set(feat_values)\n for value in unique_values:\n sub_cls = labels[:]\n sub_ds = splite_dataset(data_set, best_feat, value)\n node[best_feat_cls][value] = create_tree(sub_ds, sub_cls)\n\n return node", "def flep(tree_adj, nodes_sign, edge_weight, root, return_fullcut_info=False):\n # start = clock()\n assert isinstance(tree_adj, dict)\n if not (isinstance(nodes_sign, tuple) and len(nodes_sign) == 2):\n nodes_sign = (nodes_sign, nodes_sign)\n if root in nodes_sign[0]:\n cutp, cutn = (MAX_WEIGHT, 0) if nodes_sign[0][root] < 0 else (0, MAX_WEIGHT)\n val_1 = nodes_sign[0][root]*MAX_WEIGHT\n cutp_, cutn_ = (MAX_WEIGHT, 0) if nodes_sign[1][root] < 0 else (0, MAX_WEIGHT)\n val_2 = nodes_sign[1][root]*MAX_WEIGHT\n return (val_1, val_2), {}, {root: (True, -1, cutp, cutn, cutp_, cutn_)}\n stack = []\n status = defaultdict(lambda: (False, -1, 0, 0, 0, 0))\n stack.append(root)\n while stack:\n v = stack.pop()\n if v >= 0:\n discovered, pred, cutp, cutn, cutp_, cutn_ = status[v]\n else:\n v = -(v+100)\n discovered, pred, cutp, cutn, cutp_, cutn_ = status[v]\n for child in tree_adj[v]:\n if status[child][1] != v:\n continue\n eweight = edge_weight[(child, v) if child < v else (v, child)]\n _, _, childp, childn, childp_, childn_ = status[child]\n cutp += min(childp, childn + eweight)\n cutn += min(childn, childp + eweight)\n cutp_ += min(childp_, childn_ + eweight)\n cutn_ += min(childn_, childp_ + eweight)\n status[v] = (discovered, pred, cutp, cutn, cutp_, cutn_)\n # print('{}: (+: {}, -: {})'.format(v, cutp, cutn))\n if v == root:\n # FLEP_CALLS_TIMING.append(clock() - start)\n intermediate = {}\n if return_fullcut_info:\n intermediate = {n: vals[2:6]\n for n, vals in status.items()\n if vals[0] and n not in nodes_sign[0]}\n return (((cutn - cutp), (cutn_ - cutp_)), intermediate, status)\n\n if not discovered:\n status[v] = (True, pred, cutp, cutn, cutp_, cutn_)\n if v in nodes_sign[0]:\n # don't go beyond revealed nodes\n continue\n stack.append(-(v+100))\n for w in tree_adj[v]:\n discovered, pred, cutp, cutn, cutp_, cutn_ = status[w]\n if pred == -1 and w != root:\n if w in nodes_sign[0]:\n cutp, cutn = (MAX_WEIGHT, 0) if nodes_sign[0][w] < 0 else (0, MAX_WEIGHT)\n cutp_, cutn_ = (MAX_WEIGHT, 0) if nodes_sign[1][w] < 0 else (0, MAX_WEIGHT)\n status[w] = (discovered, v, cutp, cutn, cutp_, cutn_)\n if not discovered:\n stack.append(w)\n assert False, root", "def find_best_threshold(y, y_hat, step_size, score_func, maximize=True):\n best_thres, best_score = 0.0, 0.0 if maximize else 1.0\n for thres in np.arange(0, 1, step_size):\n score = score_for_threshold(y, y_hat, score_func, thres)\n if (maximize and (score > best_score)) or (not maximize and (score < best_score)):\n best_score = score\n best_thres = thres\n\n return best_thres, best_score", "def nearest_neighbors_metrics(all_pcs, all_labels, this_unit_id, max_spikes_for_nn, n_neighbors):\n\n total_spikes = all_pcs.shape[0]\n ratio = max_spikes_for_nn / total_spikes\n this_unit = all_labels == this_unit_id\n\n X = np.concatenate((all_pcs[this_unit, :], all_pcs[np.invert(this_unit), :]), 0)\n\n n = np.sum(this_unit)\n\n if ratio < 1:\n inds = np.arange(0, X.shape[0] - 1, 1 / ratio).astype('int')\n X = X[inds, :]\n n = int(n * ratio)\n\n nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree').fit(X)\n distances, indices = nbrs.kneighbors(X)\n\n this_cluster_inds = np.arange(n)\n\n this_cluster_nearest = indices[:n, 1:].flatten()\n other_cluster_nearest = indices[n:, 1:].flatten()\n\n hit_rate = np.mean(this_cluster_nearest < n)\n miss_rate = np.mean(other_cluster_nearest < n)\n\n return hit_rate, miss_rate", "def genuines_and_impostors(self, scores, labels):\r\n print('Computing genuine scores and impostor scores')\r\n scores_dimension, genuine_dimension, impostor_dimension = self._define_dimensions(scores, labels)\r\n genuine_score = np.zeros(shape=(genuine_dimension, 1))\r\n impostor_score = np.zeros(shape=(impostor_dimension, 1))\r\n indg = 0\r\n indi = 0\r\n for i in range(scores_dimension):\r\n for j in range(i):\r\n if labels[i] == labels[j]:\r\n genuine_score[indg, 0] = scores[i, j];\r\n indg = indg + 1;\r\n else:\r\n impostor_score[indi, 0] = scores[i, j];\r\n indi = indi + 1;\r\n gen_unique = np.unique(genuine_score)\r\n imp_unique = np.unique(impostor_score)\r\n print('Defining the thresholds')\r\n thresholds = np.concatenate(([0], gen_unique, imp_unique, [1]))\r\n thresholds = np.unique(thresholds)\r\n if np.max(np.shape(thresholds)) > 100:\r\n thresholds = self._compute_thresholds(0.01)\r\n return genuine_score, impostor_score, thresholds", "def postprocess2(scores, classes, bboxes, iou_threshold=0.2, score_threshold=0.5):\n n = len(scores)\n \n count_per_class = {cls:0 for cls in classes}\n bbox_per_class = {cls:[] for cls in classes}\n score_per_class = {cls:[] for cls in classes}\n\n for i in range(n):\n count_per_class[classes[i]] += 1\n bbox_per_class[classes[i]] += [bboxes[i]]\n score_per_class[classes[i]] += [scores[i]]\n \n det_num = 0\n det_classes = [] \n det_scores = []\n det_bboxes = []\n\n for cls in count_per_class:\n current_count = count_per_class[cls]\n current_scores = np.array(score_per_class[cls], np.float32)\n current_bboxes = np.array(bbox_per_class[cls], np.int32)\n\n idx = np.argsort(current_scores)[::-1]\n sorted_scores = current_scores[idx]\n sorted_bboxes = current_bboxes[idx]\n\n top_k_ids = []\n size = 0\n i = 0\n\n while i < current_count:\n if sorted_scores[i] < score_threshold:\n break\n top_k_ids.append(i)\n det_num += 1\n det_classes.append(cls)\n det_scores.append(sorted_scores[i])\n det_bboxes.append(sorted_bboxes[i])\n size += 1\n i += 1\n\n while i < current_count:\n tiled_bbox_i = np.tile(sorted_bboxes[i], (size, 1))\n ious, iofs, ioss = iou_bbox(tiled_bbox_i, sorted_bboxes[top_k_ids])\n max_iou = np.max(ious)\n # max_iof = np.max(iofs)\n # max_ios = np.max(ioss)\n # temp = np.max((max_iof, max_ios))\n if max_iou > iou_threshold:\n i += 1\n else:\n break\n\n return det_num, np.array(det_scores, np.float32), np.array(det_classes, np.int32), np.array(det_bboxes, np.int32)", "def scoreslabels_2_tarnon(scores,labels):\n tt = labels==1\n tar = scores[tt]\n non = scores[np.logical_not(tt)]\n return tar, non", "def _split_threshold(self, node):\n\n # define the score to improve upon\n if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:\n # split only if min(children scores) > node.score\n force_split = False\n best_score = node.score\n else:\n # force split: just take the best (even if children are worse)\n force_split = True\n best_score = None\n\n left, right = None, None\n\n # iterate over embedding dimensions (first ones are more reliable)\n # up to max_n_vec (included), until we found an improving split\n for _vec in range(self.n_vec):\n\n # get the candidate thresholds along this dimension\n threshs = self._get_candidate_thresholds(node, _vec)\n\n # look for an improving best split along this eigenvector\n for _t in threshs:\n # compute the split\n below_thresh = self.E[node.ids, _vec] < _t\n _lids = node.ids[below_thresh]\n _rids = node.ids[np.logical_not(below_thresh)]\n # check if the tubes are not too small\n _nl, _nr = len(_lids), len(_rids)\n is_valid = _nl >= self.min_leaf_size and _nr >= self.min_leaf_size\n if is_valid:\n # compute the score of the new tubes only\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # get the score of this split\n split_score = min(_sl, _sr)\n if best_score is None or split_score > best_score:\n # better split\n best_score = split_score\n node.has_children = True\n node.thresh = _t\n left = SpectralNode(\n _lids, _vec, score=_sl, name=node.name + \"0\")\n right = SpectralNode(\n _rids, _vec, score=_sr, name=node.name + \"1\")\n\n # check stopping criterion\n if node.has_children:\n # we found an improving split\n if _vec > 0 or not force_split:\n # found an improving non-forced split: stop here\n break\n\n return left, right", "def fit(self, data, labels, labels_pred):\n self.n_samples, dim = data.shape\n self.labels_unique = np.unique(labels)\n self.n_classes = len(self.labels_unique)\n if self.n_neighbors is None:\n # Set number of nearest neighbors based on the maximum number of samples per class and the neighborhood\n # constant\n num = 0\n for c in self.labels_unique:\n ind = np.where(labels == c)[0]\n if ind.shape[0] > num:\n num = ind.shape[0]\n\n self.n_neighbors = int(np.ceil(num ** self.neighborhood_constant))\n\n logger.info(\"Number of samples: {:d}. Data dimension = {:d}.\".format(self.n_samples, dim))\n logger.info(\"Number of classes: {:d}.\".format(self.n_classes))\n logger.info(\"Number of neighbors (k): {:d}.\".format(self.n_neighbors))\n logger.info(\"Fraction of outliers (alpha): {:.4f}.\".format(self.alpha))\n if self.model_dim_reduction:\n data = transform_data_from_model(data, self.model_dim_reduction)\n dim = data.shape[1]\n logger.info(\"Applying dimension reduction to the data. Projected dimension = {:d}.\".format(dim))\n\n # Distance from each sample in `data` to the `1 - alpha` level sets corresponding to each class\n distance_level_sets = np.zeros((self.n_samples, self.n_classes))\n self.index_knn = dict()\n self.epsilon = dict()\n indices_sub = dict()\n for j, c in enumerate(self.labels_unique):\n logger.info(\"Processing data from class '{}':\".format(c))\n logger.info(\"Building a KNN index for all the samples from class '{}'.\".format(c))\n indices_sub[c] = np.where(labels == c)[0]\n data_sub = data[indices_sub[c], :]\n self.index_knn[c] = KNNIndex(\n data_sub, n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distances to the k nearest neighbors of each sample\n _, nn_distances = self.index_knn[c].query_self(k=self.n_neighbors)\n # Radius or distance to the k-th nearest neighbor for each sample\n radius_arr = nn_distances[:, self.n_neighbors - 1]\n\n # Smallest radius `epsilon` such that only `alpha` fraction of the samples from class `c` have radius\n # greater than `epsilon`\n if self.alpha > 0.:\n self.epsilon[c] = np.percentile(radius_arr, 100 * (1 - self.alpha), interpolation='midpoint')\n\n # Exclude the outliers and build a KNN index with the remaining samples\n mask_incl = radius_arr <= self.epsilon[c]\n mask_excl = np.logical_not(mask_incl)\n num_excl = mask_excl[mask_excl].shape[0]\n else:\n # Slightly larger value than the largest radius\n self.epsilon[c] = 1.0001 * np.max(radius_arr)\n\n # All samples are included in the density level set\n mask_incl = np.ones(indices_sub[c].shape[0], dtype=np.bool)\n mask_excl = np.logical_not(mask_incl)\n num_excl = 0\n\n if num_excl:\n logger.info(\"Excluding {:d} samples with radius larger than {:.6f} and building a KNN index with \"\n \"the remaining samples.\".format(num_excl, self.epsilon[c]))\n self.index_knn[c] = KNNIndex(\n data_sub[mask_incl, :], n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distance to the nearest neighbor of each sample that is part of the KNN index\n _, dist_temp = self.index_knn[c].query_self(k=1)\n ind = indices_sub[c][mask_incl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n\n # Distance to the nearest neighbor of each sample that is not a part of the KNN index (outliers)\n _, dist_temp = self.index_knn[c].query(data_sub[mask_excl, :], k=1)\n ind = indices_sub[c][mask_excl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n else:\n # No need to rebuild the KNN index because no samples are excluded.\n # Distance to the nearest neighbor of each sample\n distance_level_sets[indices_sub[c], j] = nn_distances[:, 0]\n\n logger.info(\"Calculating the trust score for the estimation data.\")\n for c in self.labels_unique:\n # Compute the distance from each sample from class `c` to the level sets from the remaining classes\n data_sub = data[indices_sub[c], :]\n for j, c_hat in enumerate(self.labels_unique):\n if c_hat == c:\n continue\n\n _, dist_temp = self.index_knn[c_hat].query(data_sub, k=1)\n distance_level_sets[indices_sub[c], j] = dist_temp[:, 0]\n\n self.scores_estim = self._score_helper(distance_level_sets, labels_pred)\n return self", "def _exhaustive_best_subset(X, y, nbest=8, score=\"bic\"):\n \n assert score in [\"bic\", \"aic\"], \"Unknown score\"\n assert nbest > 0, \"nbest must be positive\"\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def score_iterable():\n def get_bic(feature_subset):\n return OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return OLS(y, Xc[feature_subset]).fit().aic\n \n get_score = get_bic if score == \"bic\" else get_aic\n\n # Recipe from https://docs.python.org/3/library/itertools.html#itertools-recipes\n def powerset(iterable):\n \"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\"\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))\n\n for k in powerset(X.columns):\n kp = ['(Intercept)', *k]\n yield get_score(kp), kp\n \n return heapq.nsmallest(nbest, score_iterable())", "def flatten_binary_scores(scores, labels, ignore=None):\n scores = tf.reshape(scores, (-1,))\n labels = tf.reshape(labels, (-1,))\n if ignore is None:\n return scores, labels\n valid = tf.not_equal(labels, ignore)\n vscores = tf.boolean_mask(scores, valid, name='valid_scores')\n vlabels = tf.boolean_mask(labels, valid, name='valid_labels')\n return vscores, vlabels", "def get_tpr_from_threshold(scores,labels, threshold_list):\n tpr_list = []\n hack_scores = []\n for score, label in zip(scores,labels):\n if label == 1:\n hack_scores.append(float(score))\n hack_scores.sort(reverse=True)\n hack_nums = len(hack_scores)\n for threshold in threshold_list:\n hack_index = 0\n while hack_index < hack_nums:\n if hack_scores[hack_index] <= threshold:\n break\n else:\n hack_index += 1\n if hack_nums != 0:\n tpr = hack_index * 1.0 / hack_nums\n else:\n tpr = 0\n tpr_list.append(tpr)\n return tpr_list", "def postprocess(scores, classes, bboxes, iou_threshold=0.3, score_threshold=0.5):\n n = len(scores)\n \n det_num = 0\n det_classes = [] \n det_scores = []\n det_bboxes = []\n\n idx = np.argsort(scores)[::-1]\n sorted_scores = scores[idx]\n sorted_bboxes = bboxes[idx]\n sorted_classes = classes[idx]\n\n top_k_ids = []\n i = 0\n\n while i < n:\n if sorted_scores[i] < score_threshold:\n break\n\n top_k_ids.append(i)\n det_num += 1\n det_scores.append(sorted_scores[i])\n det_bboxes.append(sorted_bboxes[i])\n det_classes.append(sorted_classes[i])\n i += 1\n\n while i < n:\n tiled_bbox_i = np.tile(sorted_bboxes[i], (det_num, 1)) \n flags = (sorted_classes[top_k_ids]==sorted_classes[i])*1.0 \n ious, iofs, ioss = iou_bbox(tiled_bbox_i, sorted_bboxes[top_k_ids]) \n max_iou = np.max(ious) \n # max_iof = np.max(iofs*flags) \n # max_ios = np.max(ioss*flags) \n # temp = np.max((max_iof, max_ios))\n if max_iou > iou_threshold:\n i += 1\n else:\n break\n\n return det_num, np.array(det_scores, np.float32), np.array(det_classes, np.int32), np.array(det_bboxes, np.int32)", "def prune_duplicates(\n labels: 'ObjectDetectionLabels',\n score_thresh: float,\n merge_thresh: float,\n max_output_size: Optional[int] = None) -> 'ObjectDetectionLabels':\n if max_output_size is None:\n max_output_size = len(labels)\n pruned_boxlist = non_max_suppression(\n labels.boxlist,\n max_output_size=max_output_size,\n iou_threshold=merge_thresh,\n score_threshold=score_thresh)\n return ObjectDetectionLabels.from_boxlist(pruned_boxlist)", "def _bestFeat2split(dataSet, impurity_crit, min_impurity_decrease, min_samples_split):\n\t\tm, n = dataSet.shape\n\t\tbestFeatInd, bestVal = None, DecisionTree._make_leaf(dataSet, impurity_crit)\n\n\t\tif m < min_samples_split or len(set(dataSet[:,-1])) == 1:\n\t\t\treturn bestFeatInd, bestVal\n\n\t\timpurity = m * impurity_crit(dataSet)\n\t\tmin_impurity = np.inf\n\t\t\n\n\t\tfor feat_ind in range(n-1):\n\t\t\tif type(dataSet[:, feat_ind][0]) != str:\n\t\t\t\tuniqVal = set(dataSet[:, feat_ind])\n\t\t\telse:\n\t\t\t\tuniqVal = map(set, subsets(list(dataSet[:, feat_ind])))\n\t\t\tfor val in uniqVal:\n\t\t\t\tD1, D2 = DecisionTree._binarySplit(dataSet, feat_ind, val)\n\t\t\t\tif len(D1) < min_samples_split or len(D2) < min_samples_split:\n\t\t\t\t\tcontinue\n\t\t\t\tnew_impurity = len(D1)*impurity_crit(D1) + len(D2)*impurity_crit(D2)\n\t\t\t\tif impurity - new_impurity < min_impurity_decrease:\n\t\t\t\t\tcontinue\n\t\t\t\tif new_impurity < min_impurity:\n\t\t\t\t\tmin_impurity = new_impurity\n\t\t\t\t\tbestFeatInd = feat_ind; bestVal = val\n\t\treturn bestFeatInd, bestVal", "def __find_best_split(self, x, y):\n data = np.transpose(np.vstack((np.transpose(x), y)))\n num_features = data.shape[1] - 1\n\n # initialise splitting rule components\n integer_splitting_rule = None\n feature_index_to_split = None\n max_info_gain = 0\n\n # iterate over all the features and find best splits within these\n for feature in range(num_features):\n info_gain, split_int = self.__find_best_split_in_feature(\n data[:, [feature, -1]])\n if info_gain is None:\n continue\n # update max info gain so far as it iterates over features\n if info_gain > max_info_gain:\n max_info_gain = info_gain\n feature_index_to_split = feature\n integer_splitting_rule = int(split_int)\n\n return feature_index_to_split, integer_splitting_rule", "def balance_classes(data, labels):\n\n index_dict = {}\n\n for idx, label in enumerate(labels):\n if label not in index_dict:\n index_dict[label] = [idx]\n else:\n index_dict[label] += [idx]\n\n index_list = list(index_dict.values())\n\n min_balanced_number = min([len(l) for l in index_list])\n\n index_to_take_list = np.concatenate([\n np.random.choice(l, min_balanced_number, replace=False)\n for l in index_list\n ])\n\n np.random.shuffle(index_to_take_list)\n\n return data[index_to_take_list], labels[index_to_take_list]", "def _split_to_wordpieces_with_labels(\n self, tokens: List[str], labels: List[int]\n ) -> Tuple[List[str], List[int], List[int]]:\n bert_tokens = [] # Original tokens split into wordpieces.\n bert_labels = [] # Label for each wordpiece.\n # Index of each wordpiece that starts a new token.\n token_start_indices = []\n for i, token in enumerate(tokens):\n # '+ 1' is because bert_tokens will be prepended by [CLS] token later.\n token_start_indices.append(len(bert_tokens) + 1)\n pieces = self._tokenizer.tokenize(token)\n bert_tokens.extend(pieces)\n bert_labels.extend([labels[i]] * len(pieces))\n return bert_tokens, bert_labels, token_start_indices", "def neighbor_optimizer(neighbors_values, nb_splits, data_fun):\r\n\r\n X, y = data_fun()[0:2]\r\n\r\n index = fold(nb_splits, X)\r\n mean = []\r\n\r\n for value in neighbors_values:\r\n\r\n acc = []\r\n clf = KNeighborsClassifier(n_neighbors = value)\r\n\r\n # n-fold cross-validation\r\n\r\n for test_index, train_index in index:\r\n\r\n X_train, y_train = list(map(X.__getitem__, train_index)), list(map(y.__getitem__, train_index))\r\n X_test, y_test = list(map(X.__getitem__, test_index)), list(map(y.__getitem__, test_index))\r\n\r\n clf = clf.fit(X_train, y_train)\r\n acc.append(clf.score(X_test, y_test))\r\n\r\n # Mean accuracy over all folds of the CV\r\n\r\n mean.append(np.mean(acc))\r\n\r\n return neighbors_values[np.argmax(mean)], max(mean)", "def reduce_by_labels(values, labels, weights=None, target_labels=None,\n red_op='mean', axis=0, dtype=None):\n\n if axis == 1 and values.ndim == 1:\n axis = 0\n\n if target_labels is None:\n uq_tl = np.unique(labels)\n idx_back = None\n else:\n uq_tl, idx_back = np.unique(target_labels, return_inverse=True)\n\n if weights is not None:\n weights = np.atleast_2d(weights)\n\n v2d = np.atleast_2d(values)\n if axis == 1:\n v2d = v2d.T\n\n if isinstance(red_op, str):\n fred = _get_redop(red_op, weights=weights, axis=1)\n else:\n fred = red_op\n\n if dtype is None:\n dtype = np.float64\n if red_op in {'min', 'max', 'sum', 'mode'}:\n dtype = values.dtype\n\n mapped = np.empty((v2d.shape[0], uq_tl.size), dtype=dtype)\n for i, lab in enumerate(uq_tl):\n mask = labels == lab\n wm = None if weights is None else weights[:, mask]\n\n if isinstance(red_op, str):\n mapped[:, i] = fred(v2d[:, mask], wm)\n\n else:\n for idx in range(v2d.shape[0]):\n mapped[idx, i] = fred(v2d[idx, mask], wm)\n\n if idx_back is not None:\n mapped = mapped[:, idx_back]\n\n if axis == 1:\n mapped = mapped.T\n\n if values.ndim == 1:\n return mapped[0]\n return mapped", "def make_stratified_split_of_segmentation_dataset(\n dataset: Union[Dataset, np.ndarray, List],\n num_classes: int,\n split_ratio: Optional[float] = 0.2,\n names_of_classes: Optional[int] = None,\n verbose: bool = False,\n ignore_index: Optional[bool] = None,\n max_optimization_iterations: int = 1000000,\n split_n_sample_slack: int = 0,\n):\n disable_tqdm = not verbose\n if isinstance(dataset, Dataset):\n label_fn = _calc_label_fn(dataset[0])\n dataset = [label_fn(dataset[_i]) for _i in trange(len(dataset), disable=disable_tqdm)]\n icm = instance_class_matrix(dataset, num_classes, disable=disable_tqdm)\n # TODO: remove columns with ignore_index\n # icm = icm[:, 1:].numpy()\n ds_cc = icm.sum(axis=0)\n ds_swcc = (icm > 0).astype(np.long).sum(axis=0)\n if names_of_classes is None:\n names_of_classes = [f\"class_{_i}\" for _i in range(num_classes)]\n dataset_stats = pd.DataFrame({\n 'class_count': ds_cc,\n 'samples_with_class_count': ds_swcc\n }, index=names_of_classes)\n if verbose:\n print(dataset_stats.sort_values('samples_with_class_count', ascending=False))\n optimization_weights_for_classes = np.zeros(icm.shape[1], dtype=np.float)\n # TODO: override weights (importance of classes)\n optimization_weights_for_classes = 1.0 / ds_cc\n optimization_weights_for_classes[ds_cc == 0] = 0\n optimization_weights_for_classes /= optimization_weights_for_classes.sum()\n if verbose:\n print('\\n'.join(f\"{_f:1.9f}\" for _f in optimization_weights_for_classes))\n num_samples = icm.shape[0]\n testset_size = int(np.floor(num_samples * split_ratio))\n\n def calc_cost(subsample):\n subset_class_voxels = icm[subsample].sum(axis=0)\n per_class_ratios = subset_class_voxels / ds_cc.astype(np.float)\n return (optimization_weights_for_classes * np.abs(split_ratio - per_class_ratios)).sum()\n\n cost_stats = []\n best_cost = np.inf\n best_testset = None\n for _ in trange(max_optimization_iterations):\n if split_n_sample_slack:\n subsample_size = np.random.randint(testset_size - split_n_sample_slack, testset_size + split_n_sample_slack)\n else:\n subsample_size = testset_size\n random_testset = np.random.permutation(num_samples)[:subsample_size]\n _cost = calc_cost(random_testset)\n if _cost < best_cost:\n best_cost = _cost\n best_testset = random_testset\n cost_stats.append(_cost)\n\n subset_class_stats = icm[best_testset].sum(axis=0)\n per_class_ratios = subset_class_stats / ds_cc.astype(np.float)\n residual = np.abs(split_ratio - per_class_ratios)\n # TODO: need to account for ignore_index\n # optimization_results = pd.DataFrame({\n # 'weights': optimization_weights_for_classes,\n # 'ratios': per_class_ratios\n # }, index=names_of_classes[1:])\n # TODO: plot histograms of splits\n # if verbose:\n # pd.Series(cost_stats).plot(kind='hist')\n # pd.Series(cost_stats).plot(kind='hist', bins=50)\n # icm[:, optimization_weights_for_classes == 0].sum(axis=1)\n # optimization_weights_for_classes == 0\n # removed_classes = np.where(optimization_weights_for_classes==0)[0] + 1\n # scenes_with_no_classes_but_removed = np.where(icm[:,optimization_weights_for_classes!=0].sum(axis=1)==0)[0]\n # for _scene_id in scenes_with_no_classes_but_removed:\n # print(f\"scene_id={_scene_id}: {labels[_scene_id]['semantic'].unique()}\")\n return best_testset", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def best_split(self, X, y, attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of information gain/gini gain seen so far\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = information_gain(y,attr_val,self.type)\n if (cur_if>global_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr\n else:\n global_if = float('inf')\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = gini_gain(y,attr_val)\n if (global_if>cur_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr", "def all_pair_loss(scores, targets):\n labels = targets[0]\n weights = targets[4].data if len(targets) == 5 else None\n loss_op = nn.BCEWithLogitsLoss(weight=weights, reduction=\"sum\")\n loss = loss_op(scores, labels)\n return loss / n", "def cls_wrapper(target_labels, predicted_labels):\n # Find which targets contribute to the loss (targets with non-neutral labels)\n contributing_indices = tf.where(tf.not_equal(target_labels, -1))\n\n # Take contributing\n target_labels = tf.gather_nd(target_labels, contributing_indices)\n contributing_prediction = tf.gather_nd(predicted_labels, contributing_indices)\n\n # Compute loss\n res = function(target_labels,\n contributing_prediction)\n\n # Zero batch size case\n return K.switch(tf.size(res) > 0, K.mean(res), tf.constant(0.0))", "def _split_k_best_hypotheses(self, hts):\n hts_info = [(-1 * ht.conf, idx) for idx, ht in enumerate(hts)]\n sorted_hts_info = sorted(hts_info)\n best_hts_idx = set([\n i for _, i in sorted_hts_info[:self.max_hypotheses]])\n best_k_hts = [ht for idx, ht in enumerate(hts)\n if idx in best_hts_idx]\n other_hts = [ht for idx, ht in enumerate(hts)\n if idx not in best_hts_idx]\n return best_k_hts, other_hts", "def make_pool(X, y, prelabeled=np.arange(5)):\n y = y.argmax(axis=1)\n # a set of labels is already labeled by the oracle\n y_train_labeled = np.array([None] * len(y))\n #y_train_labeled =np.empty((len(y), 2))* np.nan\n y_train_labeled[prelabeled] = y[prelabeled]\n\n # we are making a pool of the train data\n # the 'prelabeled' labels of the dataset are already labeled.\n return Dataset(X, y_train_labeled), Dataset(X, y)", "def test_add_empty_nodes_with_label_when_splitting(self):\n print \"----- test_add_empty_nodes_with_label_when_splitting -----\"\n sel_axis = (lambda axis: axis)\n \n #create tree, first node splits in x direction\n tree = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n kdtree.visualize(tree)\n \n point_left = [0.4, 0.5]\n tree.split2(point_left, axis = 0)\n kdtree.visualize(tree)\n \n point1 = [0.3, 0.5]\n found_node = tree.get_path_to_leaf(point1)[-1]\n correct_node1 = 3\n self.assertEqual(found_node.label, correct_node1, \"Not correct node found\")\n \n point_right = [0.6, 0.5]\n tree.split2(point_right, axis = 1)\n kdtree.visualize(tree)\n \n point2 = [0.6, 0.7]\n found_node = tree.get_path_to_leaf(point2)[-1]\n correct_node2 = 6\n self.assertEqual(found_node.label, correct_node2, \"Not correct node found\")\n \n print \"----- end: test_add_empty_nodes_with_label_when_splitting -----\"", "def detect_splits(self):\n logg.info(' abstracted graph will have {} nodes'.format(self.n_splits+1))\n indices_all = np.arange(self.X.shape[0], dtype=int)\n segs = [indices_all]\n if False: # this is safe, but not compatible with on-the-fly computation\n tips_all = np.array(np.unravel_index(np.argmax(self.Dchosen), self.Dchosen.shape))\n else:\n if self.iroot is not None:\n tip_0 = np.argmax(self.Dchosen[self.iroot])\n else:\n tip_0 = np.argmax(self.Dchosen[0]) # just a random index, here fixed to \"0\"\n tips_all = np.array([tip_0, np.argmax(self.Dchosen[tip_0])])\n # we keep a list of the tips of each segment\n segs_tips = [tips_all]\n if self.clusters_precomputed_names:\n self.segs_names_original = [', '.join(self.clusters_precomputed_names)]\n segs_undecided = [True]\n segs_adjacency = [[]]\n segs_distances = np.zeros((1, 1))\n segs_adjacency_nodes = [{}]\n # logg.info(' do not consider groups with less than {} points for splitting'\n # .format(self.min_group_size))\n for ibranch in range(self.n_splits):\n if self.clusters == 'unconstrained_segments':\n iseg, new_tips = self.select_segment(segs, segs_tips, segs_undecided)\n if iseg == -1:\n logg.info('... partitioning converged')\n break\n logg.info('... branching {}:'.format(ibranch + 1),\n 'split group', iseg)\n segs_distances = self.do_split(segs, segs_tips,\n segs_undecided,\n segs_adjacency,\n segs_distances,\n iseg, new_tips)\n else:\n logg.msg(' split', ibranch + 1, v=4)\n stop, segs_distances = self.do_split_constrained(segs, segs_tips,\n segs_adjacency,\n segs_adjacency_nodes,\n segs_distances)\n if stop: break\n\n # segments\n self.segs = segs\n self.segs_tips = segs_tips\n self.segs_sizes = []\n for iseg, seg in enumerate(self.segs): self.segs_sizes.append(len(seg))\n\n # the full, unscaled adjacency matrix\n self.segs_adjacency_full_attachedness = 1/segs_distances\n # if self.attachedness_measure == 'connectedness':\n # norm = np.sqrt(np.multiply.outer(self.segs_sizes, self.segs_sizes))\n # self.segs_adjacency_full_attachedness /= norm\n self.segs_adjacency_full_confidence, self.segs_adjacency_tree_confidence \\\n = self.compute_adjacency_confidence(\n self.segs_adjacency_full_attachedness,\n segs_adjacency,\n self.tree_based_confidence)\n np.fill_diagonal(self.segs_adjacency_full_attachedness, 0)", "def recursive_threshold_search(\n metric_name, metric_val, y_proba, y_true, sample_weights=None, verbose=False\n):\n ts_next = np.linspace(0, 1, 11)\n prev_min = -1\n prev_max = 999\n ts_final = None\n n_points = 5\n it = 0\n eps_rel = 1e-3\n while True:\n it += 1\n ts, trps, fprs, purities = calc_metrics(\n ts_next, y_proba, y_true, sample_weights\n )\n\n if metric_name == \"score\" or metric_name == \"proba\":\n vals = ts\n elif metric_name == \"eff\":\n vals = trps\n elif metric_name == \"mistag_rate\":\n vals = fprs\n elif metric_name == \"purity\":\n vals = purities\n else:\n raise ValueError(f\"illegal value for `metric_name`: {metric_name}\")\n\n idx = np.argmin(abs(vals - metric_val))\n if abs(vals[idx] - metric_val) / max(metric_val, 1e-10) < eps_rel:\n if verbose:\n print(f\"finish with t={ts[idx]}, v={vals[idx]} [target={metric_val}]\")\n break\n\n if it > 10:\n if verbose:\n print(\n f\"finish with t={ts[idx]}, v={vals[idx]} [target={metric_val}] [due to REP]\"\n )\n break\n\n prev_min = np.min(vals)\n prev_max = np.max(vals)\n\n if idx == 0:\n ts_next = np.linspace(ts[0], ts[1], n_points)\n continue\n if idx == len(ts) - 1:\n ts_next = np.linspace(ts[-2], ts[-1], n_points)\n continue\n\n if (vals[idx] - metric_val) * (vals[idx + 1] - metric_val) < 0:\n pair = ts[idx], ts[idx + 1]\n ts_next = np.linspace(min(pair), max(pair), n_points)\n elif (vals[idx] - metric_val) * (vals[idx - 1] - metric_val) < 0:\n pair = ts[idx], ts[idx - 1]\n ts_next = np.linspace(min(pair), max(pair), n_points)\n if abs(vals[idx] - metric_val) / max(metric_val, 1e-10) > 10 * eps_rel:\n print(\n f\"Warning: returning {vals[idx]} while target was {metric_val}, relative diff. = {abs(vals[idx]-metric_val) / max(metric_val, 1e-10)}\"\n )\n return ts[idx], vals[idx]", "def getAllContributingAlgorithmsToBest(algnamelist, target_lb=1e-8, \n target_ub=1e2):\n \n print \"Generating best algorithm data from given algorithm list...\\n\", \n customgenerate(algnamelist)\n \n bestalgfilepath = 'bestCustomAlg'\n picklefilename = os.path.join(bestalgfilepath, 'bestalg.pickle')\n fid = open(picklefilename, 'r')\n bestalgentries = pickle.load(fid)\n fid.close()\n print 'loading of best algorithm data done.'\n \n countsperalgorithm = {}\n for (d, f) in bestalgentries:\n print 'dimension:', d, ', function:', f\n print f\n setofalgs = set(bestalgentries[d,f].algs)\n # pre-processing data to only look at targets >= target_lb:\n correctedbestalgentries = []\n for i in range(0,len(bestalgentries[d,f].target)):\n if ((bestalgentries[d,f].target[i] >= target_lb) and\n (bestalgentries[d,f].target[i] <= target_ub)):\n \n correctedbestalgentries.append(bestalgentries[d,f].algs[i])\n print len(correctedbestalgentries)\n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += correctedbestalgentries.count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"", "def batch_predict(tree_adj, training_signs, edge_weight):\n # since shazoo use the revealed signs as-is, it's ok to use the same name\n training_signs, l2_values, rta_signs = training_signs\n all_nodes_to_predict = set(tree_adj) - set(training_signs)\n logging.debug('batch_predict has %d nodes to predict', len(all_nodes_to_predict))\n methods = ['l2cost', 'rta', 'shazoo']\n # fields are current_closest_hinge, current_sign, current_dst_to_closest_hinge\n node_predictions = {m: defaultdict(lambda: (None, None, 2e9)) for m in methods}\n hinge_value = {m: {} for m in methods}\n total_iter = 0\n while all_nodes_to_predict:\n some_root_of_a_border_tree = next(iter(all_nodes_to_predict))\n hinge_nodes, border_tree_nodes = find_hinge_nodes(tree_adj, edge_weight, training_signs,\n some_root_of_a_border_tree,\n with_visited=True)\n unmarked = border_tree_nodes - hinge_nodes\n for u in hinge_nodes:\n if u in hinge_value['shazoo']:\n continue\n vals, _, status = flep(tree_adj, (training_signs, rta_signs), edge_weight, u)\n hinge_value['shazoo'][u] = sgn(vals[0])\n hinge_value['rta'][u] = sgn(vals[1])\n if not USE_SCIPY:\n continue\n border_tree = build_border_tree_from_mincut_run(status, edge_weight)\n _, E, El, leaves_sign, _, _ = border_tree\n L = {u: l2_values[u] for u in leaves_sign}\n mapped_E, mapped_El_L, mapping = preprocess_edge_and_leaves(E, El, L)\n val = solve_by_zeroing_derivative(mapped_E, mapped_El_L, mapping, L,\n reorder=False)[0][u]\n hinge_value['l2cost'][u] = sgn(val)\n predicted_in_that_border_tree = set()\n inner_iter = 0\n # to avoid the same fork being picked again and again\n unmarked.add(some_root_of_a_border_tree)\n while unmarked:\n one_to_predict = next(iter(unmarked))\n hinge_tree = get_hinge_tree(one_to_predict, tree_adj, hinge_nodes)\n other_predicted = set()\n for h, h_val in iteritems(hinge_value['shazoo']):\n if h not in hinge_tree:\n continue\n predicted = propagate_hinge(hinge_tree, h, h_val, node_predictions['shazoo'],\n edge_weight)\n for u in predicted:\n prediction_info = node_predictions['shazoo'][u]\n used_hinge = prediction_info[0]\n node_predictions['rta'][u] = (used_hinge, hinge_value['rta'][used_hinge],\n prediction_info[2])\n if not USE_SCIPY:\n continue\n node_predictions['l2cost'][u] = (used_hinge, hinge_value['l2cost'][used_hinge],\n prediction_info[2])\n other_predicted.update(predicted)\n predicted_in_that_border_tree.update(other_predicted)\n unmarked -= other_predicted\n inner_iter += 1\n if inner_iter > len(tree_adj):\n import time\n logging.critical('batch predict failed in the inner loop')\n persistent.save_var('__fail_{}.my'.format(int(time.time())), (tree_adj, (training_signs, l2_values, rta_signs), edge_weight))\n raise RuntimeError('batch predict failed in the inner loop')\n all_nodes_to_predict -= predicted_in_that_border_tree\n total_iter += 1\n if total_iter > len(tree_adj):\n import time\n logging.critical('batch predict failed in the outer loop')\n persistent.save_var('__fail_{}.my'.format(int(time.time())), (tree_adj, (training_signs, l2_values, rta_signs), edge_weight))\n raise RuntimeError('batch predict failed in the outer loop')\n logging.debug('batch_predict has actually predicted %d nodes', len(node_predictions) - len(training_signs))\n return {m: {u: v[1] for u, v in iteritems(node_predictions[m]) if u not in training_signs}\n for m in methods}", "def assign(self,\n bboxes,\n num_level_bboxes,\n cls_scores,\n bbox_preds,\n gt_bboxes,\n gt_bboxes_ignore=None,\n gt_labels=None):\n INF = 100000000\n bboxes = bboxes[:, :4]\n bbox_preds = bbox_preds.detach()\n cls_scores = cls_scores.detach()\n\n num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)\n\n # NOTE DeFCN style cost function\n # compute iou between all bbox and gt\n overlaps = self.iou_calculator(bbox_preds, gt_bboxes)\n # compute cls cost for bbox and GT\n cls_cost = torch.sigmoid(cls_scores[:, gt_labels])\n # make sure that we are in element-wise multiplication\n assert cls_cost.shape == overlaps.shape\n # overlaps is actually a cost matrix\n overlaps = cls_cost**(1 - self.alpha) * overlaps**self.alpha\n\n # assign 0 by default\n assigned_gt_inds = overlaps.new_full((num_bboxes, ),\n 0,\n dtype=torch.long)\n\n if num_gt == 0 or num_bboxes == 0:\n # No ground truth or boxes, return empty assignment\n max_overlaps = overlaps.new_zeros((num_bboxes, ))\n if num_gt == 0:\n # No truth, assign everything to background\n assigned_gt_inds[:] = 0\n if gt_labels is None:\n assigned_labels = None\n else:\n assigned_labels = overlaps.new_full((num_bboxes, ),\n -1,\n dtype=torch.long)\n return AssignResult(\n num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n # compute center distance between all bbox and gt\n gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0\n gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0\n gt_points = torch.stack((gt_cx, gt_cy), dim=1)\n\n bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0\n bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0\n bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)\n\n distances = (bboxes_points[:, None, :] -\n gt_points[None, :, :]).pow(2).sum(-1).sqrt()\n\n if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None\n and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):\n ignore_overlaps = self.iou_calculator(\n bboxes, gt_bboxes_ignore, mode='iof')\n ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)\n ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr\n distances[ignore_idxs, :] = INF\n assigned_gt_inds[ignore_idxs] = -1\n\n # Selecting candidates based on the center distance\n candidate_idxs = []\n start_idx = 0\n for level, bboxes_per_level in enumerate(num_level_bboxes):\n # on each pyramid level, for each gt,\n # select k bbox whose center are closest to the gt center\n end_idx = start_idx + bboxes_per_level\n distances_per_level = distances[start_idx:end_idx, :]\n selectable_k = min(self.topk, bboxes_per_level)\n _, topk_idxs_per_level = distances_per_level.topk(\n selectable_k, dim=0, largest=False)\n candidate_idxs.append(topk_idxs_per_level + start_idx)\n start_idx = end_idx\n candidate_idxs = torch.cat(candidate_idxs, dim=0)\n\n # get corresponding iou for the these candidates, and compute the\n # mean and std, set mean + std as the iou threshold\n candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]\n overlaps_mean_per_gt = candidate_overlaps.mean(0)\n overlaps_std_per_gt = candidate_overlaps.std(0)\n overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt\n\n is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]\n\n # limit the positive sample's center in gt\n for gt_idx in range(num_gt):\n candidate_idxs[:, gt_idx] += gt_idx * num_bboxes\n ep_bboxes_cx = bboxes_cx.view(1, -1).expand(\n num_gt, num_bboxes).contiguous().view(-1)\n ep_bboxes_cy = bboxes_cy.view(1, -1).expand(\n num_gt, num_bboxes).contiguous().view(-1)\n candidate_idxs = candidate_idxs.view(-1)\n\n # calculate the left, top, right, bottom distance between positive\n # bbox center and gt side\n l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]\n t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]\n r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)\n b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)\n is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01\n is_pos = is_pos & is_in_gts\n\n # if an anchor box is assigned to multiple gts,\n # the one with the highest IoU will be selected.\n overlaps_inf = torch.full_like(overlaps,\n -INF).t().contiguous().view(-1)\n index = candidate_idxs.view(-1)[is_pos.view(-1)]\n overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]\n overlaps_inf = overlaps_inf.view(num_gt, -1).t()\n\n max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)\n assigned_gt_inds[\n max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1\n\n if gt_labels is not None:\n assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n pos_inds = torch.nonzero(\n assigned_gt_inds > 0, as_tuple=False).squeeze()\n if pos_inds.numel() > 0:\n assigned_labels[pos_inds] = gt_labels[\n assigned_gt_inds[pos_inds] - 1]\n else:\n assigned_labels = None\n return AssignResult(\n num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)", "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n #redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n #merge = False # use merge-NMS\n\n output = [np.zeros((0, 6))] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n # Detections matrix nx6 (xyxy, conf, cls)\n # best class only\n conf = x[:, 5:].max(1, keepdims=True)\n j = np.argmax(x[:, 5:], axis=1)\n j = j.reshape(j.shape[0],1)\n #x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n x = np.concatenate((box, conf, j.astype(np.float32)),axis=1)\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort()[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n #i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n i = nms(boxes, scores, iou_thres) # NMS\n \n output[xi] = x[i]\n\n return output", "def partition(self, data, labels):\n\t\t#TODO remove\n\t\tprint(\"label shape {}\".format(labels.shape))\n\t\treturn self.kfold.split(data[0], labels)", "def optimal_assignment(\n labels_pred: np.ndarray | Tensor,\n *,\n labels_true: np.ndarray | Tensor,\n num_classes: int | None = None,\n encode: bool = True,\n) -> dict[int, int]:\n # row_ind maps from class ID to cluster ID: cluster_id = row_ind[class_id]\n # col_ind maps from cluster ID to class ID: class_id = row_ind[cluster_id]\n cost_matrix, decodings_pred, decodings_true = compute_cost_matrix(\n labels_pred=labels_pred, labels_true=labels_true, num_classes=num_classes, encode=encode\n )\n\n if cost_matrix.shape[0] == cost_matrix.shape[1]:\n label_preds, _, _ = lapjv(-cost_matrix)\n else:\n _, label_preds = linear_sum_assignment(-cost_matrix)\n label_map = {}\n for label_true, label_pred in enumerate(label_preds):\n if decodings_true is not None:\n label_true = decodings_true[label_true]\n if decodings_pred is not None:\n label_pred = decodings_pred[label_pred]\n label_map[label_pred] = label_true\n\n return label_map", "def greedy(items_list, max_cost, key_function):\n tmp_list = sorted(items_list, key=key_function, reverse=True)\n cur_cost = 0\n cur_value = 0\n result = []\n\n for item in tmp_list:\n if cur_cost + item.getCost() <= max_cost:\n result.append(item)\n cur_cost += item.getCost()\n cur_value += item.getValue()\n return result, cur_value", "def partition(self, data, labels):\n\t\treturn self.kfold.split(labels)", "def equal_opportunity(\n preds: torch.Tensor,\n target: torch.Tensor,\n groups: torch.Tensor,\n threshold: float = 0.5,\n ignore_index: Optional[int] = None,\n validate_args: bool = True,\n) -> Dict[str, torch.Tensor]:\n num_groups = torch.unique(groups).shape[0]\n group_stats = _binary_groups_stat_scores(preds, target, groups, num_groups, threshold, ignore_index, validate_args)\n\n transformed_group_stats = _groups_stat_transform(group_stats)\n\n return _compute_binary_equal_opportunity(**transformed_group_stats)", "def linearAssignmentParcellation(col_labels, label_mapping, slabels):\n\n z = np.zeros((len(col_labels),))\n\n for k, v in label_mapping.items():\n indv = np.where(col_labels == v)[0]\n z[indv] = k\n\n maxt = np.max(z)\n inds = np.where(col_labels>0)[0]\n zeros = inds[(z[inds]==0)]\n\n leftovers = np.unique(col_labels[zeros])\n\n for j,left in enumerate(leftovers):\n indlft = np.where(col_labels == left)\n z[indlft] = maxt + j + 1\n\n return z", "def update(self, labels, preds):\n #labels, preds = check_label_shapes(labels, preds, True)\n\n for label, pred_label in zip(labels, preds):\n if len(pred_label.shape) > 2:\n pred_label = mx.nd.reshape(pred_label, shape=[-1, pred_label.shape[-1]])\n label = mx.nd.reshape(pred_label, shape=[-1])\n\n # Using argpartition here instead of argsort is safe because\n # we do not care about the order of top k elements. It is\n # much faster, which is important since that computation is\n # single-threaded due to Python GIL.\n pred_label = np.argpartition(pred_label.asnumpy().astype('float32'), -self.top_k)\n label = label.asnumpy().astype('int32')\n check_label_shapes(label, pred_label)\n num_dims = len(pred_label.shape)\n mask = (label != self.ignore_label).astype(np.int32)\n num_samples = mask.sum()\n\n num_classes = pred_label.shape[1]\n top_k = min(num_classes, self.top_k)\n for j in range(top_k):\n num_correct = ((pred_label[:, num_classes - 1 - j].flat == label.flat) * mask).sum()\n self.sum_metric += num_correct\n self.global_sum_metric += num_correct\n\n self.num_inst += num_samples\n self.global_num_inst += num_samples", "def build_task_weight_from_label(task_name_to_labels):\n task_name_to_weights = {}\n for task_name, label in task_name_to_labels.items():\n task_name_to_weights[task_name] = tf.cast(tf.divide(tf.cast(tf.count_nonzero(tf.greater_equal(label, 0)), dtype=tf.int32), tf.shape(label)[0]), dtype=tf.float32)\n\n return task_name_to_weights", "def propagate_labels_majority(image,labels):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels)\n outputs = zeros(amax(rlabels)+1,'i')\n counts = zeros(amax(rlabels)+1,'i')\n for rlabel, label_, count in cors.T:\n if not rlabel or not label_:\n # ignore background correspondences\n continue\n if counts[rlabel] < count:\n outputs[rlabel] = label_\n counts[rlabel] = count\n outputs[0] = 0\n return outputs[rlabels]", "def multiclass_objective(\n y_true: npt.NDArray, y_pred: npt.NDArray, weight: npt.NDArray = None\n) -> Tuple[npt.NDArray, npt.NDArray]:\n # TODO: remove reshaping once https://github.com/microsoft/LightGBM/pull/4925 is released\n y_pred = y_pred.reshape(y_true.shape[0], -1, order=\"F\")\n\n num_rows, num_class = y_pred.shape\n prob = softmax(y_pred)\n grad_update = np.zeros_like(prob)\n grad_update[np.arange(num_rows), y_true.astype(np.int32)] = -1.0\n grad = prob + grad_update\n factor = num_class / (num_class - 1)\n hess = factor * prob * (1 - prob)\n if weight is not None:\n weight2d = weight.reshape(-1, 1)\n grad *= weight2d\n hess *= weight2d\n\n # TODO: remove ravel once https://github.com/microsoft/LightGBM/pull/4925 is released\n grad = grad.ravel(order=\"F\")\n hess = hess.ravel(order=\"F\")\n\n return grad, hess", "def match(pos_thresh, neg_thresh, truths, priors, labels, loc_t, conf_t, idx_t, idx, loc_data):\n decoded_priors = point_form(priors)\n\n\n overlaps = jaccard(truths, decoded_priors)\n\n # Size [num_priors] best ground truth for each prior\n overlaps = P.Cast()(overlaps, mindspore.float32)\n best_truth_idx, best_truth_overlap = P.ArgMaxWithValue(0)(overlaps)\n\n\n _, drop_pad_overlap = P.ArgMaxWithValue(1)(overlaps)\n for i in range(overlaps.shape[0]):\n if drop_pad_overlap[i] == 0:\n overlaps = overlaps[:i, :]\n break\n\n for _ in range(overlaps.shape[0]):\n # Find j, the gt with the highest overlap with a prior\n # In effect, this will loop through overlaps.size(0) in a \"smart\" order,\n # always choosing the highest overlap first.\n best_prior_idx, best_prior_overlap = P.ArgMaxWithValue(1)(overlaps)\n cast = P.Cast()\n idx_j, _ = P.ArgMaxWithValue(0)(best_prior_overlap)\n # Find i, the highest overlap anchor with this gt\n\n i = best_prior_idx[idx_j]\n\n # Set all other overlaps with i to be -1 so that no other gt uses it\n overlaps[:, i] = mindspore.ops.ScalarCast()(-1, mindspore.float32)\n # Set all other overlaps with j to be -1 so that this loop never uses j again\n overlaps[idx_j, :] = mindspore.ops.ScalarCast()(-1, mindspore.float32)\n\n best_truth_overlap[i] = mindspore.ops.ScalarCast()(2, mindspore.float32)\n\n\n best_truth_idx = cast(best_truth_idx, mindspore.float16)\n new_best_truth_idx = mindspore.ops.expand_dims(best_truth_idx, 0)\n new_best_truth_idx[::, i] = idx_j\n best_truth_idx = mindspore.ops.Squeeze()(new_best_truth_idx)\n\n best_truth_idx = cast(best_truth_idx, mindspore.int32)\n\n matches = truths[best_truth_idx] # Shape: [num_priors,4]\n\n conf = labels[best_truth_idx] + 1 # Shape: [num_priors]\n\n conf[best_truth_overlap < neg_thresh] = 0 # label as background\n\n loc = encode(matches, priors)\n loc_t[idx] = loc # [num_priors,4] encoded offsets to learn\n\n conf_t[idx] = conf # [num_priors] top class label for each prior\n\n best_truth_idx = P.Cast()(best_truth_idx, mindspore.int32)\n idx_t[idx] = best_truth_idx # [num_priors] indices for lookup\n\n return 0", "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output", "def ranking_loss(scores, targets):\n costs = targets[1]\n true_ants = targets[2]\n weights = targets[4] if len(targets) == 5 else None\n true_ant_score = torch.gather(scores, 1, true_ants)\n top_true, _ = true_ant_score.max(dim=1)\n tmp_loss = scores.add(1).add(\n top_true.unsqueeze(1).neg()\n ) # 1 + scores - top_true\n if weights is not None:\n tmp_loss = tmp_loss.mul(weights)\n tmp_loss = tmp_loss.mul(costs)\n loss, _ = tmp_loss.max(dim=1)\n out_score = torch.sum(loss)\n return out_score / n", "def block_strategize(\n upper_limit: \"compute up to this dimension (inclusive)\",\n lower_limit: \"\"\"compute starting at this dimension,\n if ``None`` lowest unknown dimension is chosen.\"\"\" = None,\n c: \"overshoot parameter\" = 0.25,\n strategies_and_costs: \"previously computed strategies and costs to extend\" = None,\n lattice_type: \"one of 'qary' or 'qary-lv'\" = \"qary\",\n dump_filename: \"\"\"results are regularly written to this filename, if ``None``\n then ``data/fplll-block-simulations-{lattice_type}.sobj`` is used.\"\"\" = None,\n ncores: \"number of cores to use in parallel\" = 4,\n gh_factor: \"set target_norm^2 to gh_factor * gh^2\" = 1.00,\n rb: \"compute pruning parameters for `GH^(i/rb)` for `i in -rb, …, rb`\" = 1,\n greedy: \"use Greedy pruning strategy\" = False,\n sd: \"use self-dual strategy\" = False,\n preproc_loops: \"number of preprocessing tours\" = 2,\n ignore_preproc_cost: \"assume all preprocessing has the cost of LLL regardless of block size\" = False,\n):\n\n dump_filename, strategies, costs, lower_limit = _prepare_parameters(\n dump_filename,\n c,\n strategies_and_costs,\n lower_limit,\n lattice_type,\n preproc_loops,\n greedy,\n sd,\n ignore_preproc_cost,\n )\n\n if ncores > 1:\n workers = Pool(ncores)\n\n from cost import sample_r, _pruner_precision\n\n for d in range(lower_limit, upper_limit + 1):\n D = int((1 + c) * d + 1)\n r = sample_r(D, lattice_type=lattice_type)\n\n float_type = _pruner_precision(d, greedy)\n\n try:\n start = max(strategies[d - 1].preprocessing_block_sizes[-1], 2)\n except IndexError:\n start = 2\n\n if d < 60:\n stop = d\n else:\n stop = min(start + max(8, ncores), d)\n\n best = None\n\n for giant_step in range(start, stop, ncores):\n jobs, results = [], []\n for baby_step in range(giant_step, min(stop, giant_step + ncores)):\n opts = {\n \"greedy\": greedy,\n \"sd\": sd,\n \"gh_factor\": gh_factor,\n \"float_type\": float_type,\n \"radius_bound\": rb,\n \"preproc_loops\": preproc_loops,\n \"ignore_preproc_cost\": ignore_preproc_cost,\n }\n jobs.append((r, d, c, baby_step, strategies, costs, opts))\n\n if ncores == 1:\n for job in jobs:\n results.append(cost_kernel(job))\n else:\n results = workers.map(cost_kernel, jobs)\n\n do_break = False\n for cost, strategy in results:\n logging.debug(\n \"%3d :: C: %5.1f, P: %5.1f c: %.2f, %s\"\n % (d, log(cost[\"total cost\"], 2), log(cost[\"preprocessing\"], 2), cost[\"c\"], strategy)\n )\n if best is None or cost[\"total cost\"] < best[0][\"total cost\"]:\n best = cost, strategy\n if cost[\"total cost\"] > 1.1 * best[0][\"total cost\"]:\n do_break = True\n break\n if do_break:\n break\n\n costs.append(best[0])\n strategies.append(best[1])\n logging.info(\n \"%3d :: C: %5.1f, P: %5.1f c: %.2f, %s\"\n % (d, log(costs[-1][\"total cost\"], 2), log(costs[-1][\"preprocessing\"], 2), costs[-1][\"c\"], strategies[-1])\n )\n pickle.dump((strategies, costs), open(dump_filename, \"wb\"))\n dump_strategies_json(dump_filename.replace(\".sobj\", \"-strategies.json\"), strategies)\n\n return strategies, costs" ]
[ "0.6308325", "0.60905", "0.586987", "0.5617052", "0.5437539", "0.5403514", "0.5329129", "0.52851343", "0.52182657", "0.51694614", "0.5163954", "0.5138482", "0.5117913", "0.5107134", "0.5100632", "0.5094282", "0.50677717", "0.50626785", "0.50522244", "0.5046674", "0.50465786", "0.5039915", "0.49960294", "0.49689108", "0.4961738", "0.49561733", "0.49556974", "0.49542636", "0.4953122", "0.49509645", "0.4932803", "0.4897385", "0.48698118", "0.48691836", "0.48311475", "0.47959518", "0.47870895", "0.47839984", "0.47839984", "0.4768901", "0.47626263", "0.47603032", "0.4752373", "0.47476202", "0.47436485", "0.47215676", "0.47179472", "0.47176582", "0.47157285", "0.47148842", "0.47000504", "0.46973205", "0.46954396", "0.46935242", "0.46928272", "0.4690117", "0.46859843", "0.46842518", "0.46767792", "0.4675464", "0.46738228", "0.46688768", "0.46633276", "0.46578202", "0.4652691", "0.46468985", "0.4646102", "0.46405864", "0.4630156", "0.46272844", "0.4626046", "0.4622624", "0.46168125", "0.46134797", "0.46122724", "0.46027413", "0.4601757", "0.4599873", "0.45979682", "0.45972824", "0.45937043", "0.45935372", "0.45909336", "0.45904908", "0.4590447", "0.45895383", "0.45894447", "0.4585803", "0.45839995", "0.458319", "0.45812967", "0.4578409", "0.45634297", "0.45623183", "0.4562316", "0.45607644", "0.4557989", "0.45567152", "0.45504087", "0.45452508" ]
0.7924888
0
Normalizes an unnormalized histogram / probability distribution
def normalize(counts): numvals = sum(counts.itervalues()) if numvals <= 0: return counts res = dict() for (k,cnt) in counts.iteritems(): res[k] = float(cnt)/float(numvals) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalise_histogram(histogram):\n total_sum = np.sum(histogram)\n for i in range(len(histogram)):\n histogram[i] /= total_sum\n\n return histogram", "def normalization(distribution):\r\n total_sum = 0\r\n for number in distribution.values():\r\n total_sum += number\r\n \r\n for bin in distribution.keys():\r\n distribution[bin] = float(distribution[bin]) / total_sum\r\n\r\n return distribution", "def normalize(histogram):\n nbins = histogram.GetNbinsX()\n integral = histogram.Integral(1,nbins)\n newhist = histogram.Clone()\n newhist.Reset()\n for bin in range(1,nbins+1):\n ibinY = histogram.GetBinContent(bin)\n newhist.SetBinContent(bin,ibinY/integral)\n return newhist", "def normalize_hist(hist) -> [float]:\n hsum = np.sum(hist)\n newhist = []\n for hi in hist:\n newhist.append(hi/hsum)\n\n return newhist", "def normalize_probability(p_unnormalized):\n p_normalized=p_unnormalized/p_unnormalized.sum(axis=0)\n return p_normalized", "def OF1_CalculateNormalizedHistogram(image):\n\n raw = OF1_CalculateRawHistogram(image)\n norm = np.zeros(256, np.float_)\n\n for i in range(256):\n norm[i] = raw[i] / image.size\n\n return norm", "def norm_hist(self, hist_lower, hist_upper, lower, upper, value) :\n norm = 1.\n if upper>lower :\n norm = hist_lower.Integral()+(hist_upper.Integral()-hist_lower.Integral())/abs(upper-lower)*(value-lower)\n return norm", "def normalized(self):\n total = self.total()\n result = Histogram()\n for value, count in self.items():\n try:\n result[value] = count / float(total)\n except UnorderableElements:\n result = Histogram.from_dict(dict(result), key=hash)\n result[value] = count / float(total)\n return result", "def test_histogram_weighted_normalised(self):\n bin_edges, hist, unc, band = hist_w_unc(\n self.input, weights=self.weights, bins=self.n_bins, normed=True\n )\n\n np.testing.assert_array_almost_equal(self.bin_edges, bin_edges)\n np.testing.assert_array_almost_equal(self.hist_weighted_normed, hist)\n np.testing.assert_array_almost_equal(self.unc_weighted_normed, unc)\n np.testing.assert_array_almost_equal(self.band_weighted_normed, band)", "def normalize(distrib):\n if isinstance(distrib, dict):\n total_prob = sum(distrib.values())\n if total_prob <= 0.:\n # TODO: check bug > is this a sufficient condition for the error cases?\n # TODO: check bug > Is just returning distrib enough?\n InferenceUtils.log.warning(\"all assignments in the distribution have a zero probability, cannot be normalised\")\n return distrib\n\n for key, value in distrib.items():\n distrib[key] = distrib[key] / total_prob\n return distrib", "def test_normal(self):\r\n s = np.random.normal(-0.42, 0.55, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()", "def normalize(x):\r\n return x/norm(x)", "def normalize(image, xbar, sigma):\n image = image.transpose(2, 0, 1) # Switch to channel-first\n mean, std = np.array(xbar), np.array(sigma)\n image = (image - mean[:, None, None]) / std[:, None, None]\n return image.transpose(1, 2, 0)", "def stdProbabilityNorm(self):\n return 1./factorial(self.alpha-1)", "def normalize(X, mu, sigma):\n return (X - mu) / sigma", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def hist_normalize_linear(data, new_min, new_max):\n data_min = np.ma.min(data)\n data_max = np.ma.max(data)\n scaled = (data - data_min) * ((new_max - new_min) / (data_max - data_min))\n scaled.mask = data.mask\n return scaled", "def normalize(dist):\n return np.array(dist, 'double') / np.sum(dist)", "def normalize(counts):\n counts_sum0 = np.sum(counts, axis=0, keepdims=True)\n counts_sum1 = np.sum(counts, axis=1, keepdims=True)\n counts_sum = np.sum(counts)\n\n # Get residuals\n theta = 100\n mu = counts_sum1 @ counts_sum0 / counts_sum\n z = (counts - mu) / np.sqrt(mu + mu ** 2 / theta)\n\n # Clip to sqrt(n)\n n = counts.shape[0]\n z[z > np.sqrt(n)] = np.sqrt(n)\n z[z < -np.sqrt(n)] = -np.sqrt(n)\n\n return z", "def normalize(self):\n self._data /= self.norm()", "def normalize(values):\n return (values - np.mean(values)) / np.std(values)", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalize(probabilities):\n for person in probabilities:\n\n # normalize the \"gene\"\n geneSum = probabilities[person][\"gene\"][0] + probabilities[person][\"gene\"][1] + probabilities[person][\"gene\"][2]\n for i in range(3):\n probabilities[person][\"gene\"][i] /= geneSum\n\n # normalize the \"trait\"\n traitSum = probabilities[person][\"trait\"][True] + probabilities[person][\"trait\"][False]\n probabilities[person][\"trait\"][True] /= traitSum\n probabilities[person][\"trait\"][False] /= traitSum", "def normalize(vals):\n min_val = torch.min(vals)\n max_val = torch.max(vals)\n return (vals - min_val) / (max_val - min_val)", "def normal_upper_bound(probability: float, mu: float = 0, sigma: float = 1) -> float:\n return inverse_normal_cdf(probability, mu, sigma)", "def normalize(normal_map, norm_thres=0.5):\n norm = np.linalg.norm(normal_map, axis=-1)\n valid = norm > norm_thres\n normal_map[valid] = normalize_vec(normal_map[valid], axis=1)\n return normal_map", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalise(self):\n fitness_sum = np.sum(self.fitness)\n for i in range(self.loops):\n self.normalised_fitness[i] = self.fitness[i] / fitness_sum", "def convertHermiteToNormal(self,x):\n return self.sigma*x+self.untruncatedMean()", "def normalize(array):\n high = array.max()\n low = array.min()\n rng = high - low\n array[:] = 1.0 - ((high - array) / rng)", "def normal_upper_bound(probability: float,\n mu: float = 0,\n sigma: float = 1) -> float:\n return inverse_normal_cdf(probability, mu, sigma)", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normal_upper_bound(probability, mu=0, sigma=1):\r\n return ds_probability.inverse_normal_cdf(probability, mu, sigma)", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def stdProbabilityNorm(self):\n B = factorial(self.alpha-1)*factorial(self.beta-1)/factorial(self.alpha+self.beta-1)\n norm = 1.0/(2**(self.alpha+self.beta-1)*B)\n return norm", "def stdProbabilityNorm(self):\n return 0.5", "def normal_upper_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(probability, mu, sigma)", "def normal_upper_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(probability, mu, sigma)", "def normal_upper_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(probability, mu, sigma)", "def normal_upper_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(probability, mu, sigma)", "def normal_upper_bound(probability, mu=0, sigma=1):\n return inverse_normal_cdf(probability, mu, sigma)", "def normalize(x):\n\n return (x - x.values.min()) / (x.values.max() - x.values.min())", "def normalize(dist):\n\t\n\tif isinstance(dist, dict):\n\t\t# Make sure our keys/values line up in their lists\n\t\tkeys = dist.keys()\n\t\tvals = [dist[k] for k in keys]\n\t\tnormalize(vals)\n\t\tfor k,v in zip(keys,vals):\n\t\t\tdist[k] = v\n\t\treturn\n\tfdist = [float(d) for d in dist]\n\ts = sum(fdist)\n\tif s == 0:\n\t\treturn\n\tfdist = [d/s for d in fdist]\n\tfor i,d in enumerate(fdist):\n\t\tdist[i] = d", "def to_norm(data):\n print('The dtgeostats.utils.to_norm function is under construction - use with caution...')\n mu = np.mean(data)\n sd = np.std(data)\n z = (data - mu) / sd\n bins = len(z)\n\n # Get cumulative probability and normal-score values\n counts, bin_edges = np.histogram(z, bins=bins, normed=True)\n cprob = np.cumsum(counts)/sum(counts)*0.99 # = f[:, 1] or inv[:, 0]\n nscore_value = (bin_edges[:-1] + bin_edges[1:]) / 2 # = f[:, 0] or inv[:, 1]\n\n # Apply to data\n z = st.norm(0, 1).ppf(cprob)\n z = np.where(z == np.inf, np.nan, z)\n z = np.where(np.isnan(z), np.nanmax(z), z)\n return z, cprob, nscore_value", "def normalize(init_probs):\n total_prob = sum(init_probs)\n if total_prob > 0. + InferenceUtils._eps:\n for idx in range(len(init_probs)):\n init_probs[idx] = init_probs[idx] / total_prob\n\n # TODO: check refactor > do we have to return distrib with new instance?\n return init_probs", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normal_pmf(x: np.array, mean: float, sigma: float) -> np.array:\n x = np.exp(-1 / 2 * ((x - mean) / sigma) ** 2)\n x /= np.sqrt(2 * np.pi * sigma ** 2)\n x /= x.sum()\n return x", "def normalization(sample):\n sample = sample + 100\n # 2^20 = 1048576\n return np.log2(sample * 1048576/np.sum(sample))", "def normalize(freqs):\n square_sum = 0\n for _,v in freqs.items():\n square_sum += v**2\n rss = math.sqrt(square_sum)\n for k in freqs:\n freqs[k] /= rss\n return freqs", "def normaliseTraces(ds, verbose=False):\n normalisation = measureUnfoldedLevel(ds)\n if verbose:\n points = getIndexedTraces(ds)\n pyplot.figure(figsize=(9, 5.6))\n pyplot.hist2d(points[:,0], points[:,1], \n bins=(70*2, 50*2),\n range = [[0, 900], [-0.45, 0.05]],\n cmax = 100000/2 # clip max\n );\n pyplot.plot([0,700], [normalisation]*2, \"-r\")\n \n return ds.trace.copy() / np.abs(normalisation)", "def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n # random normal too slow\n #out_random = np.random.normal(0, 1, size = volume.shape)\n out_random = np.zeros(volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images", "def _normalise(self):\n if not self.is_unit():\n n = self.norm\n if n > 0:\n self.q = self.q / n", "def normalize(tensor: np.ndarray):\n if len(tensor.shape) < 4:\n tensor = np.expand_dims(tensor, axis=2)\n mean = np.array([tensor[..., chn, :].mean() for chn in range(tensor.shape[2])])\n std = np.array([tensor[..., chn, :].std() for chn in range(tensor.shape[2])])\n return (tensor - mean[:, np.newaxis]) / std[:, np.newaxis]", "def normalize(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_NORM) )\n\n ret_df = df.copy()\n t = ret_df[comm_keys]\n ret_df[comm_keys] = (t - t.mean()) / t.std()\n\n return ret_df", "def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data", "def _normalize(M):\r\n\r\n minVal = np.min(M)\r\n maxVal = np.max(M)\r\n\r\n Mn = M - minVal;\r\n\r\n if maxVal == minVal:\r\n return np.zeros(M.shape);\r\n else:\r\n return Mn / (maxVal-minVal)", "def cs4243_histnorm(image, grey_level=256):\n res_image = image.copy()\n ##your code here ###\n min_pixel = np.amin(res_image)\n max_pixel = np.amax(res_image)\n res_image = (res_image - min_pixel) / (max_pixel - min_pixel) * (grey_level-1)\n ####\n return res_image", "def entropy(x, bins, normalize=False, xy_probabilities=False):\n # calculate probabilities if xy_probabilities == False\n if xy_probabilities:\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x),1,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n \n # add a small number to all probabilities if zero occurs\n if x.any(0):\n p = x + 1e-15\n else:\n p = x\n else:\n # get the bins\n bins = np.histogram_bin_edges(x, bins)\n\n # calculate the empirical probabilities\n count = np.histogram(x, bins=bins)[0]\n\n # if counts should be None, raise an error\n if np.sum(count) == 0:\n raise ValueError('The histogram cannot be empty. Adjust the bins to ' +\n 'fit the data')\n # calculate the probabilities\n p = (count / np.sum(count)) + 1e-15\n\n\n # calculate the Shannon Entropy\n if normalize:\n # get number of bins\n nbins = len(p)\n # maximal entropy: uniform distribution\n normalizer = np.log2(nbins) \n\n return - p.dot(np.log2(p)) / normalizer\n else:\n return - p.dot(np.log2(p))", "def _normalize(self, value_dict):\n median = np.median([value_dict[i] for i in list(value_dict.keys())])\n n = len(value_dict.keys())\n if median < 1.0 / float(n):\n divisor = 1.0 / float(n)\n else:\n divisor = median\n return_dict = {}\n for i in list(value_dict.keys()):\n return_dict[i] = float(value_dict[i]) / float(divisor)\n return return_dict", "def reduced_normal_pmf(x: np.array, mean: float, sigma: float) -> np.array:\n x = np.exp(-1 / 2 * ((x - mean) / sigma) ** 2)\n x /= np.sqrt(2 * np.pi * sigma ** 2)\n x[mean] = 0.\n x /= x.sum()\n return x", "def denormalize(x):\n x_max = np.percentile(x, 98)\n x_min = np.percentile(x, 2)\n x = (x - x_min) / (x_max - x_min)\n x = x.clip(0, 1)\n return x", "def denormalize(x):\n x_max = np.percentile(x, 98)\n x_min = np.percentile(x, 2)\n x = (x - x_min) / (x_max - x_min)\n x = x.clip(0, 1)\n return x", "def freqnorm(freq):\n t = 0.0\n for i in freq:\n t += freq[i]\n for i in freq:\n freq[i] /= t\n return freq", "def normal_distr(x, mu, sigma, s=1):\n \n return s * 1/(sigma * torch.sqrt(torch.tensor(2 * np.pi))) * torch.exp((-1/2) * ((x - mu) / sigma) ** 2)", "def sum_normed (self):\n norm = self.sum\n return Hist2D (\n self.xbins, self.ybins,\n self.values / norm,\n self.errors / norm)", "def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total", "def _quantile_normalization(arr, mode=\"mean\"):\n n = len(arr)\n perc = percentileofscore\n arr_ = arr.copy()[~np.isnan(arr)]\n out = np.zeros(n)\n for i in range(n):\n if not np.isnan(arr[i]):\n out[i] = norm.ppf(perc(arr_, arr[i], mode) / 100.)\n else:\n out[i] = np.nan\n return out", "def normalize(a):\n a = np.array(a)\n return a / np.linalg.norm(a)", "def normalizer(sampler, vmin, vmax, scaling='linear',\n bias=0.5, contrast=1):\n def result(x, y):\n raw = sampler(x, y)\n r = normalize(raw, vmin, vmax, bias, contrast, scaling)\n return r\n return result", "def _normalize(self, inp):\n \n return inp/inp.sum()", "def normalized_hist(data1, data2, ax, color1, color2, bin_number=50):\n D1Hist, D1bins = np.histogram(data1, bins=bin_number, density=True)\n nD1Hist, nD1bins = np.histogram(data2, bins=bin_number, density=True)\n center = (D1bins[:-1] + D1bins[1:])/2\n width = 0.7 * (D1bins[1] - D1bins[0])\n medianD1 = np.median(data1)\n medianD2 = np.median(data2)\n\n ax.bar(center, D1Hist, width=width, align='center', label='D1', alpha=0.5, color=color1)\n ax.bar(center, nD1Hist, width=width, align='center', label='nD1', alpha=0.5, color=color2)\n ax.legend()\n ymin, ymax = ax.get_ybound()\n ax.vlines(medianD1, ymin, ymax, color=color1)\n ax.vlines(medianD2, ymin, ymax, color=color2)\n return ax", "def normalise(x, dim=1):\n norm = torch.sqrt( torch.pow(x,2.).sum(dim) )\n if dim>0:\n x /= norm.unsqueeze(dim)\n return x", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def normalize(arr, stats=False):\n arr = np.array(arr)\n mean = arr.mean()\n std = arr.std()\n normed = (arr - mean) / std\n if not stats:\n return normed\n return normed, mean, std", "def normalize(X, norm=..., *, axis=..., copy=..., return_norm=...):\n ...", "def normalize(self, factor):", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def vis_normalize(a, s=0.1):\n return s * (a - a.mean()) / (max(a.std(), 1e-4)) + 0.5", "def norm(predictors, normalize=None):\n \n if normalize == \"std\":\n predictors_norm = (predictors - predictors.mean()) / predictors.std()\n elif normalize == \"minmax\":\n predictors_norm = (predictors - predictors.min()) / (predictors.max() - predictors.min())\n\n return predictors_norm", "def normal_lower_bound(probability: float, mu: float = 0, sigma: float = 1) -> float:\n return inverse_normal_cdf(1 - probability, mu, sigma)", "def normalize(attributions):\n # keepdims for division broadcasting\n total = np.abs(attributions).sum(axis=1, keepdims=True)\n\n return np.abs(attributions) / total", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def normalise(da):\n return (da - da.min()) / (da.max() - da.min())", "def normalise(x):\n return (x - jnp.min(x)) / (jnp.max(x) - jnp.min(x))", "def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))", "def test_histogram_weighted_not_normalised(self):\n bin_edges, hist, unc, band = hist_w_unc(\n self.input, weights=self.weights, bins=self.n_bins, normed=False\n )\n\n np.testing.assert_array_almost_equal(self.bin_edges, bin_edges)\n np.testing.assert_array_almost_equal(self.hist_weighted, hist)\n np.testing.assert_array_almost_equal(self.unc_weighted, unc)\n np.testing.assert_array_almost_equal(self.band_weighted, band)", "def log_normalize(log_prob, axis):\n log_sum = logsumexp(log_prob, axis=axis)\n \n if not isinstance(log_sum, np.ndarray):\n log_sum = np.array([log_sum])\n if log_prob.shape[0] == log_sum.shape[0]:\n # column normalize \n return (log_prob.transpose() - log_sum).transpose()\n else:\n # row normalize\n return log_prob - log_sum", "def _smooth_distribution(p, eps=0.0001):\n is_zeros = (p == 0).astype(np.float32)\n is_nonzeros = (p != 0).astype(np.float32)\n n_zeros = is_zeros.sum()\n n_nonzeros = p.size - n_zeros\n if not n_nonzeros:\n raise ValueError('The discrete probability distribution is malformed. All entries are 0.')\n eps1 = eps * float(n_zeros) / float(n_nonzeros)\n assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1)\n hist = p.astype(np.float32)\n hist += eps * is_zeros + (-eps1) * is_nonzeros\n assert (hist <= 0).sum() == 0\n return hist", "def un_normalize(tensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n mean = torch.FloatTensor(mean).view(1,3,1,1)\n std = torch.FloatTensor(std).view(1,3,1,1)\n \n image = tensor.cpu().detach()\n image = image*std+mean\n image = image.numpy()\n \n image = np.transpose(image, (0,2,3,1))\n \n #print(np.max(image))\n #print(np.min(image))\n return image", "def effectsize_normal(self, prob=None):\n if prob is None:\n prob = self.prob1\n return stats.norm.ppf(prob) * np.sqrt(2)", "def normal_lower_bound(probability: float,\n mu: float = 0,\n sigma: float = 1) -> float:\n return inverse_normal_cdf(1 - probability, mu, sigma)", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def normalize(image):\n min = np.min(image)\n max = np.max(image)\n normalImg = 255*(image - min) / (max - min)\n return normalImg", "def percentile_normalization(data: np.ndarray, percentile: int = 1) -> np.ndarray:\n\n min_percentile = np.percentile(data, percentile)\n max_percentile = np.percentile(data, 100 - percentile)\n\n # limit maximum intensity of data by max_percentile\n data[data >= max_percentile] = max_percentile\n\n # limit minimum intensity of data by min_percentile\n data[data <= min_percentile] = min_percentile\n\n return data", "def normalize(X):\n # z-score\n mean = np.mean(X, axis=(0, 1, 2, 3))\n std = np.std(X, axis=(0, 1, 2, 3))\n # avoid dividing zero by adding a very small number\n X = (X - mean) / (std + 1e-7)\n\n return X", "def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n out_random = np.random.normal(0, 1, size = volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def norm(self):" ]
[ "0.7903847", "0.773003", "0.76129586", "0.73363006", "0.724139", "0.6925099", "0.6923839", "0.6893623", "0.6813233", "0.6747994", "0.6707329", "0.6559963", "0.6552609", "0.65408355", "0.65334505", "0.65331537", "0.6515405", "0.6507021", "0.6436819", "0.6389514", "0.6374892", "0.6368749", "0.6353334", "0.6347107", "0.6337352", "0.6331567", "0.6329658", "0.6329658", "0.6323587", "0.6307619", "0.6293717", "0.62853926", "0.62818074", "0.6275611", "0.62497735", "0.622897", "0.6225197", "0.6220885", "0.6220885", "0.6220885", "0.6220885", "0.6220885", "0.61961997", "0.61668783", "0.6165872", "0.61489433", "0.6147824", "0.6147824", "0.613233", "0.6117983", "0.6111177", "0.60903347", "0.6085307", "0.60782534", "0.6077356", "0.6072542", "0.6068619", "0.60573494", "0.6051811", "0.60492", "0.6047017", "0.60433143", "0.6040236", "0.6034481", "0.6034481", "0.60274583", "0.6025287", "0.60140395", "0.6010335", "0.60077447", "0.60020506", "0.5999237", "0.599112", "0.5989684", "0.5986892", "0.59868", "0.5983899", "0.5983563", "0.59779614", "0.59763503", "0.5970131", "0.59637815", "0.59605956", "0.5953193", "0.5949466", "0.59332937", "0.5923996", "0.5910387", "0.59022135", "0.59007037", "0.5897135", "0.5886923", "0.5882373", "0.5878954", "0.58784086", "0.5869006", "0.5864357", "0.58631575", "0.58602", "0.585854" ]
0.62111866
42
Predicts the label of the given entry. If it contains None elements (missing values), the return value is a probability distribution over possible outcomes (given by a dict).
def predict(self,entry): if self.type == 'v': return self.value v = entry[self.feature] if v is None: #multiple childrens' predictions counts = defaultdict(int) labels = self.predict_all(entry,counts) if len(counts) == 1: return counts.keys()[0] #return a probability distribution return normalize(counts) #maximum likelihood #return argmax(counts) if self.type == 's': c = None try: c = self.children[v] except KeyError: #print "Unseen value for feature",self.feature,": ",v best = None bestDist = float('inf') for (val,c) in self.children.iteritems(): if abs(val - v) < bestDist: bestDist = abs(val - v) best = c c = best return c.predict(entry) elif self.type == 'i': if v <= self.value: return self.children[0].predict(entry) else: return self.children[1].predict(entry) raise RuntimeError("Invalid DecisionTreeNode type?")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, observation):\n\t\t# TODO - complete this\n\t\tp_max = 0\n\t\tpredict = None\n\t\tfor label in self.possible_labels:\n\t\t\tpossiblity = 1\n\t\t\tlabel_gaussian = self.gaussians.get(label)\n\t\t\tfor i in range(len(observation)):\n\t\t\t\t(mean, std) = label_gaussian[0][i]\n\t\t\t\tvalue = observation[i]\n\t\t\t\tpossiblity *= self.gaussians_calc(value, mean, std)\n\t\t\tif p_max < possiblity:\n\t\t\t\tp_max = possiblity\n\t\t\t\tpredict = label\n\n\t\treturn predict", "def predict(self, key):\n return self.counts.get(key, 1.0)", "def predict(self,entry):\n assert self.root is not None,\"Decision tree is not initialized\"\n return self.root.predict(entry)", "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def get_label(prob_label, target):\n return target if random.random() <= prob_label else 1 - target", "def predict(self, unknown):\n for title in unknown:\n for ind in range(len((unknown[list(unknown.keys())[0]]))):\n unknown[title][ind] = (unknown[title][ind] - self.normalization_n[ind]) / (self.normalization_d[ind])\n print(unknown)\n unknown_labels = {}\n for title in unknown:\n neighbors = self.k_neighbors(unknown[title], self.dataset, self.k)\n unknown_labels[title] = self.rate(neighbors, self.labels)\n return unknown_labels", "def predict_probability_model(*args):\n final_data = None\n any_null = validate_none(args)\n if any_null:\n final_data = transform_fields(args[-3:])\n final_data = list(args[0:5]) + final_data\n predicted = test_model(final_data)\n converts, styles = user_converts(predicted)\n\n return [f'{predicted} %', converts] + styles", "def predict_label(self, x, weight=None, cutting=0.5, predict_label=None):\n if predict_label is None:\n predict_label = self.pred_label\n if weight is None: weight = self.weights[-1]\n pred = self.predict(x, weight, cutting)\n pred[np.where(pred == 0)] = predict_label[0]\n pred[np.where(pred == 1)] = predict_label[1]\n return pred", "def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)", "def predict(self, input_data: dict)-> str:\n if self.probabilities is None or self.target_probabilities is None:\n raise ValueError('You need to fit the data first!!')\n\n # This will store target:probability for given dataset.\n all_probs = {} # a dict.\n\n # iterating all the target classes to find probab.. of it's occurence.\n\n for uniq_target_name in set(self.dataset[self.target_name]):\n probability = 1\n for feat_name in input_data:\n probability *= self.probabilities[feat_name][(input_data[feat_name], uniq_target_name)]\n probability *= self.target_probabilities[uniq_target_name]\n\n all_probs[probability] = uniq_target_name\n return all_probs[max(all_probs)]", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def predict(self, X):\n words = X.split()\n chance = []\n for cur_label in self.model[\"labels\"]:\n probability = self.model[\"labels\"][cur_label][\"probability\"]\n total_grade = math.log(probability, math.e)\n for word in words:\n word_dict = self.model[\"words\"].get(word, None)\n if word_dict:\n total_grade += math.log(word_dict[cur_label], math.e)\n chance.append((total_grade, cur_label))\n _, prediction = max(chance)\n return prediction", "def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label", "def predict(wav, labels, graph, input_name, output_name, how_many_labels):\n pred_lab, pred_prob=label_wav(wav, labels, graph, input_name, output_name, how_many_labels)\n return pred_lab, pred_prob", "def probability_of_default(model, prediction_features):\n return model.predict_proba(prediction_features)[:, 1]", "def compute_propability(word, label, dict):\n return dict[label][word] / sum(dict[label].values())", "def predict(sepal_length, sepal_width, petal_length, petal_width):\n data = np.array(\n [sepal_length, sepal_width, petal_length, petal_width]).reshape(1, -1)\n pred = champion.predict(data)[0]\n\n prediction = {\n 'label': str(pred),\n 'sepal_length': sepal_length,\n 'sepal_width': sepal_width,\n 'petal_length': petal_length,\n 'petal_width': petal_width\n }\n return prediction", "def predict(self, datum):\r\n probs = {}\r\n for class_ in set(self.train_classes):\r\n probs[class_] = self.distribution.class_prob[class_] * reduce(lambda x,y:x*y, [self.distribution.prob(feat_ind_feat[0],feat_ind_feat[1],class_) for feat_ind_feat in enumerate(datum)])\r\n return max(probs, key=lambda x:probs[x])", "def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]", "def predict_one(self):\n return (self.contexted.calc_p(\"\", self.seen + \"1\") /\n float(self.contexted.calc_p(\"\", self.seen)))", "def predict_next(self, seq):\n context = tuple(seq[-2:]) # last two words\n pc = self.probas[context] # conditional distribution\n words, probs = zip(*pc.items()) # convert to list\n return np.random.choice(words, p=probs)", "def predict(self, inputs):\n if self.use_logistic:\n return self.predict_labels_logistic(self.w, inputs)\n return predict_labels(self.w, inputs)", "def predict_prob(self, _input_data):\n yhat_probs = (self.merged_model).predict(_input_data, verbose=0)\n\n return yhat_probs[:, 0]", "def predict(probs):\n return np.argmax(probs, axis=0)", "def prediction(name=None, message=''):", "def predict_and_eval(arg_pair: EviPair):\n global NN_CORRECT_PRED, SAME_PRED\n\n user_choice = get_user_input(arg_pair)\n x_input, _ = x_and_y_from_evi_pair(arg_pair)\n nn_prediction = model.predict(x_input)[0][0]\n\n # Mapping from probabilities to\n\n pred_class = 2 if nn_prediction > 0.5 else 1\n if pred_class == arg_pair.label:\n NN_CORRECT_PRED += 1\n if user_choice == pred_class:\n SAME_PRED += 1\n\n return pred_class", "def predict(self, input_data):\n if not self.predict_as_probability_:\n return self.ensemble_model_.predict(input_data)\n else:\n return self.ensemble_model_.predict_proba(input_data)", "def predict_all(self,entry,counts):\n if self.type == 'v':\n counts[self.value] += 1\n return\n v = entry[self.feature]\n if v is None:\n for val,c in self.children.iteritems():\n c.predict_all(entry,counts)\n return\n if self.type == 's':\n c = None\n try:\n c = self.children[v]\n except KeyError:\n #print \"Unseen value for feature\",self.feature,\": \",v\n best = None\n bestDist = float('inf')\n for (val,c) in self.children.iteritems():\n if abs(val - v) < bestDist:\n bestDist = abs(val - v)\n best = c\n c = best\n c.predict_all(entry,counts)\n elif self.type == 'i':\n if v <= self.value:\n self.children[0].predict_all(entry,counts)\n else:\n self.children[1].predict_all(entry,counts)\n return", "def predict(probs):\n # Your code here.\n return np.argmax(probs, axis=1)", "def predict_labels(self, repo_owner, repo_name, issue_num):\n logging.info(f'Predicting labels for the issue #{issue_num} from {repo_owner}/{repo_name}')\n # get probabilities of labels for an issue\n label_probabilities, issue_embedding = self.predict_issue_probability(repo_owner, repo_name, issue_num)\n\n # get label info from local file\n label_columns = self.load_label_columns()\n label_names = label_columns['labels']\n label_thresholds = label_columns['probability_thresholds']\n\n # check thresholds to get labels that need to be predicted\n predictions = {'labels': [], 'probabilities': []}\n for i in range(len(label_probabilities)):\n # if the threshold of any label is None, just ignore it\n # because the label does not meet both of precision & recall thresholds\n if label_thresholds[i] and label_probabilities[i] >= label_thresholds[i]:\n predictions['labels'].append(label_names[i])\n predictions['probabilities'].append(label_probabilities[i])\n return predictions, issue_embedding", "def get_probability(some_dict, some_string):\n lowercase_review = some_string.lower()\n split_review = lowercase_review.split()\n product = 1 \n for word in split_review:\n if word not in some_dict:\n probability = 0.00009\n #assigning unknown words a probability very close to zero\n else: \n probability = some_dict[word]\n product *= probability\n return product", "def predict(self, s):\n return max([(self.probability_s(s, c), c) for c in self.likelihood_probability.keys()])[1]", "def predict_picture_labels(picture):\n image_url = picture.image.url\n response = requests.post(PREDICT_ENDPOINT, json={\n 'image_url': image_url,\n })\n data = response.json()\n # Get the best matching labels\n right_labels = data['right_probabilities']\n right_label_id, right_label_prob = max(right_labels.items(), key=lambda i: i[1])\n left_labels = data['left_probabilities']\n left_label_id, left_label_prob = max(left_labels.items(), key=lambda i: i[1])\n # Save the labels\n picture.recognized_left_label_id = left_label_id\n picture.recognized_left_probability = left_label_prob\n picture.recognized_right_label_id = right_label_id\n picture.recognized_right_probability = right_label_prob\n picture.save()\n return picture", "def predict_single(self, line):\n # print(line)\n prob_list = {}\n for claz in self.class_list:\n prob_list[claz] = 1\n\n # for each cat column\n for col in self.cat_cols:\n val = line[col]\n for claz in self.class_list:\n prob_list[claz] *= self.prob_hub[col][claz][val]\n\n # for each num column\n for col in self.num_cols:\n val = line[col]\n # for each class\n for claz in self.class_list:\n mean, std = self.prob_hub[col][claz]\n prob_list[claz] *= calculate_prob(val, mean, std)\n\n return max(prob_list.items(), key=operator.itemgetter(1))[0]", "def predict_label(examples_set):\n all_labels = list(('yes', 'no'))\n prediction = 'no'\n\n for label in all_labels:\n all_same_label = True\n for example in examples_set:\n if example[14] != label:\n all_same_label = False\n break\n if all_same_label:\n prediction = label\n break\n return prediction", "def predict(self, observation):\n\n loss, prediction_probs = self.session.run(\n [self.loss, self.predictions],\n feed_dict={self.input_placeholder: observation,\n self.labels_placeholder: np.zeros(len(observation)),\n self.actions_placeholder: np.zeros((len(observation), self.num_actions))\n })\n\n return prediction_probs", "def predict():\n try:\n\n with open('configs.yaml', 'r') as f:\n configs = yaml.safe_load(f)\n\n if 'api' in configs and 'model_path' in configs['api']:\n model_path = configs['api']['model_path']\n if 'api' in configs and 'scaler_path' in configs['api']:\n scaler_path = configs['api']['scaler_path']\n\n model = Model(model_path=model_path, scaler_path=scaler_path)\n features_name = [\n 'GP', 'MIN', 'PTS', 'FGM',\n 'FGA', 'FG%', '3P Made', '3PA',\n '3P%', 'FTM', 'FTA', 'FT%', 'OREB',\n 'DREB', 'REB', 'AST', 'STL', 'BLK', 'TOV'\n ]\n\n x = []\n for f in features_name:\n value = request.args.get(f)\n if value: x.append(value)\n else : x.append(0)\n\n x_minmax = model.scaler.transform([x])\n y_pred = model.model.predict(x_minmax)[0]\n return \"Prediction: \" + str(y_pred) + \", so this player \" + {0:'is not', 1:'is'}[y_pred] + \" worth investing in NBA\"\n except ValueError as e:\n raise InvalidUsage(str(e), status_code=500)\n except yaml.YAMLError as e:\n raise InvalidUsage(str(e), status_code=500)\n except Exception as e:\n raise InvalidUsage(str(e), status_code=500)", "def predict(self, observation, *args, **kwargs):\n if self.env is not None and np.random.rand() <= self.epsilon:\n action = random.randrange(self.action_size)\n else:\n act_values = self.policy.predict(observation)\n action = np.argmax(act_values[0])\n return action, None", "def predict(self, X):\r\n return 1 if self.predict_prob(X) > 0.5 else 0", "def convertclasstoemotion(pred):\n \n label_conversion = {'0': 'neutral',\n '1': 'calm',\n '2': 'happy',\n '3': 'sad',\n '4': 'angry',\n '5': 'fearful',\n '6': 'disgust',\n '7': 'surprised'}\n\n for key, value in label_conversion.items():\n if int(key) == pred:\n label = value\n return label", "def a_value(y_true, y_pred_prob, zero_label=0, one_label=1):\n \n idx = np.isin(y_true, [zero_label, one_label])\n labels = y_true[idx]\n prob = y_pred_prob[idx, zero_label]\n sorted_ranks = labels[np.argsort(prob)]\n \n n0, n1, sum_ranks = 0, 0, 0\n n0 = np.count_nonzero(sorted_ranks==zero_label)\n n1 = np.count_nonzero(sorted_ranks==one_label)\n sum_ranks = np.sum(np.where(sorted_ranks==zero_label)) + n0\n\n return (sum_ranks - (n0*(n0+1)/2.0)) / float(n0 * n1) # Eqn 3", "def predict(self, row):\n label_vote = dict()\n for i in range(len(self.forest)):\n result = self.forest[i].predict(row)\n label = max(result, key=result.get)\n \n if label_vote.get(label, None) is None:\n label_vote[label] = 0\n\n label_vote[label] += 1\n \n return max(label_vote, key=result.get)", "def get_word_probability(self, label, term):\n\n if 'sod' in label:\n return self.cond_prob_sod[term]\n elif 'pop' in label:\n return self.cond_prob_pop[term]\n else:\n print(\"Just run the doctest Dev\")\n \n pass", "def predict():\r\n variance=request.args.get('variance')\r\n skewness=request.args.get('skewness')\r\n curtosis=request.args.get('curtosis')\r\n entropy=request.args.get('entropy')\r\n scaling=scaler.transform([[variance,skewness,curtosis,entropy]])\r\n prediction=classifier.predict(scaling)\r\n if prediction[0] == 0:\r\n return 'It is an Authentic Note'\r\n else:\r\n return \"It is a fake note\"", "def conditional_prob(self, label, datapoint):\r\n\r\n # REPLACE THE COMMAND BELOW WITH YOUR CODE\r\n feat_vec = self.x[datapoint]\r\n\r\n if label == 1:\r\n return self.conditional_prob_1(feat_vec)\r\n\r\n return 1 - self.conditional_prob_1(feat_vec)", "def predict(self, data):\t\t\n\t\tpredictions = {}\n\t\tfor tree in self.trees:\n\t\t\tprediction, prob = tree.predict(data)\n\t\t\tif prediction in predictions:\n\t\t\t\tpredictions[prediction] += prob\n\t\t\telse:\n\t\t\t\tpredictions[prediction] = prob\n\t\treturn max(predictions, key=predictions.get)", "def _predict(self, classify: np.array, n_preds=1):\r\n tmp = classify.argsort()[:, :n_preds] # Return the index of the best label classification\r\n preds = copy(tmp) # allow to copy tmp\r\n for index, target in enumerate(self.targets):\r\n preds = np.where(tmp == index, target, preds) # Return the target label corresponding to the index\r\n self.preds = preds", "def predict_one(self, rep, costs=0.0):\n scores = rep.dot(self.coef_.T) + costs\n # Manage the difference between scipy and numpy 1d matrices:\n scores = scores.reshape(self.n_classes_)\n # Set of highest scoring label indices (in case of ties):\n candidates = np.argwhere(scores==np.max(scores)).flatten()\n return random.choice(candidates)", "def PredictiveDist(self, label='pred'):\n # TODO: fill this in\n lam = 1\n pred = thinkbayes2.MakePoissonPmf(lam, 15)\n return pred", "def predict(self, phrases):\n Z = self.pipeline.transform(phrases)\n labels = self.classifier.predict(Z)\n if self.duplicates:\n for i, phrase in enumerate(phrases):\n label = self.dupes.get(phrase)\n if label is not None:\n labels[i] = label\n return labels", "def bl_predict(self, n_samples, data=None):\n\n if data is None:\n data = self.datas[self.train_idx]\n\n y_train = data.gen_labels()\n bl = DummyClassifier()\n bl.fit(np.random.rand(len(y_train), 1), y_train)\n\n return self._predict_proba(bl, np.random.rand(n_samples, 1))", "def predict_example(x, tree):\r\n\r\n # INSERT YOUR CODE HERE. NOTE: THIS IS A RECURSIVE FUNCTION.\r\n \r\n for branching_value, subtree in tree.items():\r\n attr_index = branching_value[0]\r\n attr_value = branching_value[1]\r\n split_decision = branching_value[2]\r\n\r\n if split_decision == (x[attr_index] == attr_value):\r\n if type(subtree) is dict:\r\n label = predict_example(x, subtree)\r\n else:\r\n label = subtree\r\n\r\n return label\r\n \r\n raise Exception('Function not yet implemented!')\r\n \r\n \"\"\"try:\r\n len(tree.keys())\r\n\r\n except Exception:\r\n return tree\r\n\r\n keys = tree.keys()\r\n item = list(keys)[0]\r\n\r\n if x[item[0]] == item[1]:\r\n return predict_example(x, tree[(item[0], item[1], True)])\r\n else:\r\n return predict_example(x, tree[(item[0], item[1], False)])\"\"\"", "def predict_example(ex: lit_types.JsonDict) -> lit_types.JsonDict:\n # Logit values for ['unknown', 'elephant', 'ant', 'whale'].\n logits = np.zeros((len(ANIMALS),))\n for db_rec in self._dataset.examples:\n animal_index = ANIMALS.index(db_rec['animal'])\n for field_name in self._dataset.spec():\n if ex[field_name] is None or db_rec[field_name] is None:\n continue\n if field_name == 'animal':\n continue\n field_spec_value = self._dataset.spec()[field_name]\n if (isinstance(field_spec_value, lit_types.CategoryLabel) or\n isinstance(field_spec_value, lit_types.Boolean)) and (\n ex[field_name] == db_rec[field_name]):\n logits[animal_index] += 1\n if isinstance(field_spec_value, lit_types.Scalar):\n logits[animal_index] += 1.0 - abs(ex[field_name] -\n db_rec[field_name])\n return scipy_special.softmax(logits)", "def label_predict(self, sentence):\n index_words = FileUtils.index_sentence(sentence, self.word_to_index)\n chunks = FileUtils.divide_sentence(index_words, Settings.seq_size)\n result = np.zeros(Settings.class_num)\n if Settings.cuda:\n self.model.cuda()\n \n for chunk in chunks:\n with torch.no_grad():\n chunk = torch.from_numpy(np.asarray(chunk)).view(1, Settings.seq_size)\n if Settings.cuda:\n chunk = chunk.cuda()\n \n predict = self.model(chunk)\n if Settings.cuda:\n predict = predict.cpu()\n predict = predict.numpy()[0]\n result += predict\n result /= len(chunks)\n\n target_index = np.argmax(result) + 1\n label = self.index_to_label.get(str(target_index))\n score = np.max(result)\n return label, score", "def test_predict(self):\n\n classifier = BertCCAMClassifier()\n classifier.load_model(\"models\")\n prediction = classifier.predict([\"bartosz\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}])\n\n # with multiple labels\n prediction = classifier.predict([\"ala bert\"])\n self.assertEqual(prediction, [{\"labels\": (\"A\", \"B\")}])\n\n # in a batch\n prediction = classifier.predict([\"bartosz\", \"adam\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}, {\"labels\": (\"A\",)}])", "def predict_proba(self):\n ...", "def p_word_given_label(vocab, training_data, label):\n\n smooth = 1 # smoothing factor\n word_prob = {}\n # TODO: add your code here\n total_word = 0\n\n word_prob[None] = 0\n\n\n for dic in training_data:\n\n for index0, i0 in enumerate(dic['bow']):\n if (list(dic['bow'])[index0] in word_prob):\n continue;\n word_prob[list(dic['bow'])[index0]] = 0\n #word_prob[None] = 0\n if(dic[\"label\"] == label):\n for index, i in enumerate(dic[\"bow\"]):\n if(list(dic['bow'])[index] in vocab):\n if(list(dic['bow'])[index] in word_prob):\n\n word_prob[list(dic['bow'])[index]] += dic[\"bow\"][i]\n else:\n word_prob[list(dic['bow'])[index]] = dic[\"bow\"][i]\n else:\n if(None in word_prob):\n word_prob[None] += dic[\"bow\"][i]\n else:\n word_prob[None] = 0\n\n total_word += dic[\"bow\"][i]\n #word_prob [None] = 5\n\n for h in word_prob:\n word_prob[h] = math.log((word_prob[h] + smooth*1)) - math.log((total_word + smooth*(len(vocab) +1)))\n\n\n return word_prob", "def predict(self, title):\n \n return self.knn_model.predict(self.target_lang_topics[title])", "def _predict_and_return_argmax_label(self, example):\n model_out = self._model.predict([example])\n softmax = list(model_out)[0]['preds']\n argmax = np.argmax(softmax)\n return self._model.output_spec()['preds'].vocab[argmax]", "def predict(self, ex):\r\n # Eval mode\r\n self.network.eval()\r\n\r\n source_ids = ex['source_ids']\r\n source_pos_ids = ex['source_pos_ids']\r\n source_type_ids = ex['source_type_ids']\r\n source_mask = ex['source_mask']\r\n label = ex['label']\r\n\r\n if self.use_cuda:\r\n label = label.cuda(non_blocking=True)\r\n source_ids = source_ids.cuda(non_blocking=True)\r\n source_pos_ids = source_pos_ids.cuda(non_blocking=True) \\\r\n if source_pos_ids is not None else None\r\n source_type_ids = source_type_ids.cuda(non_blocking=True) \\\r\n if source_type_ids is not None else None\r\n source_mask = source_mask.cuda(non_blocking=True) \\\r\n if source_mask is not None else None\r\n\r\n score = self.network(source_ids=source_ids,\r\n source_pos_ids=source_pos_ids,\r\n source_type_ids=source_type_ids,\r\n source_mask=source_mask)\r\n\r\n loss = self.criterion(score, label)\r\n probs = f.softmax(score, 1).data.cpu().numpy().tolist()\r\n predictions = np.argmax(score.data.cpu().numpy(), axis=1).tolist()\r\n\r\n return {\r\n 'loss': loss,\r\n 'probs': probs,\r\n 'predictions': predictions,\r\n }", "def posterior(self, model, sentence, label):\r\n\r\n if model == \"Simple\":\r\n cost = sum(\r\n [\r\n (\r\n (math.log(self.emission_probability[label[i]][sentence[i]]))\r\n + (math.log(self.posterior_probability[label[i]]))\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (math.log(1 / float(10 ** 10)))\r\n + (math.log(self.posterior_probability[label[i]]))\r\n )\r\n for i in range(len(sentence))\r\n ]\r\n )\r\n return cost\r\n elif model == \"Complex\":\r\n post_array = []\r\n for i in range(len(sentence)):\r\n if i == 0:\r\n post_array.append(\r\n self.emission_probability[label[i]][sentence[i]]\r\n * self.initial_probability[label[i]]\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10)) * self.initial_probability[label[i]]\r\n )\r\n elif i == 1:\r\n post_array.append(\r\n self.emission_probability[label[i]][sentence[i]]\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10))\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n )\r\n else:\r\n post_array.append(\r\n self.emission_probability[label[i]][sentence[i]]\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 2]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10))\r\n * (\r\n self.transition_probability[label[i - 1]][label[i]]\r\n * self.posterior_probability[label[i - 1]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * (\r\n self.transition_probability[label[i - 2]][label[i]]\r\n * self.posterior_probability[label[i - 2]]\r\n / self.posterior_probability[label[i]]\r\n )\r\n * self.posterior_probability[label[i]]\r\n )\r\n post_array = [math.log(p) for p in post_array]\r\n cost = sum(post_array)\r\n return cost\r\n\r\n elif model == \"HMM\":\r\n post_array = []\r\n for i in range(len(sentence)):\r\n if i == 0:\r\n post_array.append(\r\n (\r\n self.initial_probability[label[i]]\r\n * self.emission_probability[label[i]][sentence[i]]\r\n )\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (self.initial_probability[label[i]] * (1 / float(10 ** 8)))\r\n )\r\n else:\r\n emi = (\r\n (self.emission_probability[label[i]][sentence[i]])\r\n if sentence[i] in self.emission_probability[label[i]]\r\n else (1 / float(10 ** 10))\r\n )\r\n\r\n min_val = post_array[i - 1] * (\r\n (self.transition_probability[label[i - 1]][label[i]])\r\n )\r\n\r\n post_array.append(emi * min_val)\r\n\r\n post_array = [math.log(p) for p in post_array]\r\n\r\n cost = sum(post_array)\r\n\r\n return cost\r\n else:\r\n print(\"Unknown algorithm!\")", "def predict ( self, X: np.ndarray ):\n \n return self.predict_probability ( X = X )\n # End predict()", "def _predict(self, treenode, X):\n if treenode.is_leaf:\n return treenode.leaf_score\n elif pd.isnull(X[1][treenode.feature]):\n if treenode.nan_direction == 0:\n return self._predict(treenode.left_child, X)\n else:\n return self._predict(treenode.right_child, X)\n elif X[1][treenode.feature] < treenode.threshold:\n return self._predict(treenode.left_child, X)\n else:\n return self._predict(treenode.right_child, X)", "def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels", "def main(self, data):\n\t\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\t\teval_features = self.get_features(data, self.labels, tokenizer, self.max_seq_length)\n\t\tlabel, prob = self.predict(eval_features)\n\t\treturn label, prob", "def predict(self,X):\n if (int(self.classifier.predict(self.scaler.transform(X)))==-1):\n return \"popular\"\n else:\n return \"unpopular\"", "def predict(self, predPoints=None):", "def __do_predict(self, request, features):\n dmp_predictor.DmpPredictor().predict(request, features)\n\n return defines.ReturnCode.SUCC", "def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)", "def predict_flair(sentence):\n if sentence == \"\":\n return 0\n text = flair.data.Sentence(sentence)\n # stacked_embeddings.embed(text)\n flair_sentiment.predict(text)\n value = text.labels[0].to_dict()['value'] \n if value == 'POSITIVE':\n result = text.to_dict()['labels'][0]['confidence']\n else:\n result = -(text.to_dict()['labels'][0]['confidence'])\n return round(result, 3)", "def predict():\n import trace\n trace.predict()", "def predict(X, est_dict):\n \n def sign(arr):\n for i,a in enumerate(arr):\n if a >= 0:\n arr[i] = 1\n else:\n arr[i] = 0\n return arr\n \n def change_labels(arr, l1, l2):\n for i,a in enumerate(arr):\n if a == l1:\n arr[i] = l2\n return arr \n \n total = 0 \n for (tree, alpha) in est_dict.values():\n total += alpha * change_labels(tree.predict(X), 0, -1)\n \n return sign(total)", "def predict(self, features):\n vec = vectorize(features, self.vocab,\n self.dpvocab, self.projmat)\n label = self.clf.predict(vec)\n # print label\n return self.labelmap[label[0]]", "def predict(self, item, **kwargs):\n ## Encode the task information\n syl = ccobra.syllogistic_generalized.GeneralizedSyllogism(item)\n task_enc = syl.encoded_task\n enc_choices = [syl.encode_response(x) for x in item.choices]\n \n pred = self.get_answer(task_enc)\n \n return syl.decode_response(pred)", "def predict(self, data):\n xdata, _ = self.array_from_cases(data)\n preds = self.model.predict(xdata)\n label_preds = [dict(zip(self.binarizer.classes_, pred)) for pred in preds]\n return label_preds", "def predict_label(img, net_model, label):\n img1 = cv2.resize(img, (80, 80))\n predict = net_model.predict(img1.reshape(1, 80, 80, 3))\n maxi = predict[0][0]\n curs = 0\n test = 0\n for i, pred in enumerate(predict[0]):\n test += pred\n if pred > maxi:\n maxi = pred\n curs = i\n return label[curs]", "def predict_proba(self):\n if self.rank_prob is None:\n raise ValueError('No results available. Did you already call predict(...)?')\n\n return np.array([sum(map(lambda x: x[1], result)) / len(result) for result in self.rank_prob])", "def predict_response_variable(self, **kwargs):\n pass", "def predict(self, seq):\n raise Exception(\"You cannot predict with a base predictor.\")", "def predict():\n # initialize the data dictionary that will be returned from the\n # view\n data = {\"success\": False}\n\n # ensure an image was properly uploaded to our endpoint\n if flask.request.method == \"POST\":\n if flask.request.files.get(\"image\"):\n # read the image in PIL formats\n img = flask.request.files[\"image\"].read()\n img = Image.open(io.BytesIO(img))\n\n # preprocess the image and prepare it for classification\n img = predictor.prepare_image(img, target_size=(299, 299), http_request=True)\n\n # classify the input image and then initialize the list\n # of predictions to return to the client\n predictions = predictor.model.predict(img)\n\n dog_label = predictor.decode_prediction(np.argmax(predictions, axis=-1)[0])\n print(dog_label)\n result = {\"label\" : str(dog_label), \"probability\" : float(np.max(predictions[0]))}\n data[\"predictions\"] = result\n\n # indicate that the request was a success\n data[\"success\"] = True\n\n # return the data dictionary as a JSON response\n return flask.jsonify(data)", "def predict(self, example):\n return self.decisionTree.traverse_tree(example)", "def predict_(self, x):\n\t\tif len(x) < 1 or len(self.thetas) < 1 or x is None or self.thetas is None:\n\t\t\treturn None\n\t\treturn self.sigmoid_(np.matmul(self.add_intercept(x), self.thetas))", "def predict_bn(cp, prior0, prior1, data, attr):\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n class0 = classlist[0]\n class1 = classlist[1]\n # loops through test data and calculates a posterior probability for\n # each class\n attrs = attr['attr'].drop(attr.index[-1]).tolist()\n preds = []\n correct = 0\n for index, row in data.iterrows():\n actual_class = row['class']\n pp0 = 1.0\n pp1 = 1.0\n i = 0\n for a in attrs:\n attr_val = row[a]\n sub = cp[cp['attr']==a]\n sub = sub[sub['var']==attr_val]\n pp0 = pp0 * sub.get_value(i, class0) \n pp1 = pp1 * sub.get_value(i, class1) \n i = i + 1\n pp0 = (pp0 * prior0) \n pp1 = (pp1 * prior1) \n # prediction comparison\n predict = np.log(pp0) - np.log(pp1)\n if predict > 0:\n predicted_class = class0\n post_prob = pp0 / (pp0 + pp1)\n else:\n predicted_class = class1\n post_prob = pp1 / (pp0 + pp1)\n line = [predicted_class, actual_class, \"{:.12f}\".format(post_prob)]\n preds.append(line)\n if actual_class == predicted_class:\n correct = correct + 1\n \n return preds, correct", "async def predict(property: Property):\n prediction = model.predict(property.to_df())\n price = np.exp(prediction[0]) \n return '{}$ per night is an optimal price.'.format(round(price))", "def predict(self, obs):\n pass", "def predict(self, sample, **kwargs):\r\n return self.model.predict(sample, **kwargs)", "def predict(self, input):\n a = np.dot(np.transpose(self.final_weights), input)\n prob = self.sigmoid(a)\n return 1 if prob > 0.5 else 0", "async def dummy_predict(item: Item):\n predictions = ['HomeDepot', 'DunderMifflin', 'hometheater', 'EnterTheGungeon',\n 'cinematography', 'Tinder', 'LearnJapanese',\n 'futarp', 'OnePieceTC', 'Firefighting', 'fleshlight', 'lotr',\n 'knifeclub', 'sociopath', 'bleach', 'SCCM', 'GhostRecon',\n 'Ayahuasca', 'codes', 'preppers', 'grammar', 'NewSkaters',\n 'Truckers', 'southpark', 'Dreams', 'JUSTNOMIL',\n 'EternalCardGame', 'evangelion', 'mercedes_benz', 'Cuckold',\n 'writing', 'afinil', 'synology', 'thinkpad', 'MDMA', 'sailing',\n 'cfs', 'siacoin', 'ASUS', 'OccupationalTherapy', 'biology',\n 'thelastofus', 'lonely', 'swrpg', 'acting', 'transformers',\n 'vergecurrency', 'Beekeeping']\n\n recs = {} # store in dict\n\n n_results = 5 # fix to 5 results\n\n recommendations = random.sample(predictions, n_results)\n return {'subreddits': recommendations}", "def target_predict(self, inp):\n return self.target_model.predict(inp)", "def filter_specified_labels(self, repo_owner, repo_name, predictions):\n label_names = []\n label_probabilities = []\n # handle the yaml file\n yaml = get_yaml(owner=repo_owner, repo=repo_name)\n # user may set the labels they want to predict\n if yaml and 'predicted-labels' in yaml:\n for name, proba in zip(predictions['labels'], predictions['probabilities']):\n if name in yaml['predicted-labels']:\n label_names.append(name)\n label_probabilities.append(proba)\n else:\n logging.warning(f'YAML file does not contain `predicted-labels`, '\n 'bot will predict all labels with enough confidence')\n # if user do not set `predicted-labels`,\n # predict all labels with enough confidence\n label_names = predictions['labels']\n label_probabilities = predictions['probabilities']\n return label_names, label_probabilities", "def predict_model():\n data = request.json\n\n if data:\n predictor.pred_dict[\"model\"] = data[\"model\"]\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'", "def predict(self, doc):\n \n prob_positive = self._predict_doc(doc, 1)\n prob_negative = self._predict_doc(doc, 0)\n\n if prob_positive > prob_negative:\n return 1\n return 0", "def prior(training_data, label_list):\n\n smooth = 1 # smoothing factor\n logprob = {}\n # TODO: add your code here\n numfile1 = 0\n numfile2 = 0\n for dic in training_data:\n if(dic[\"label\"] == label_list[0]):\n numfile1 += 1\n elif(dic[\"label\"] == label_list[1]):\n numfile2 += 1\n numtotal = numfile1 + numfile2\n\n prob1 = (numfile1+smooth)/(numtotal+2)\n prob2 = (numfile2 + smooth) / (numtotal + 2)\n\n logprob[label_list[0]] = math.log(prob1)\n logprob[label_list[1]] = math.log(prob2)\n\n\n return logprob", "def predict(cls, input):\n clf = cls.get_model()\n return clf.predict(input)", "def predict(self, obs):\n pred_q = self.model(obs)\n return pred_q", "def predict(self, sample, **kwargs):\n return self.model.predict(sample, **kwargs)", "def predict(self, x):\n pred_labels = np.zeros((x.shape[0], 10))\n\n N = len(self.NET)\n for i in range(N):\n\n inputs = self.apply_dct_permutation(x.copy(), self.permutation[i])\n pred_labels += self.NET[i].model.predict(inputs)\n\n return pred_labels", "def _predict_and_return_score(self, example):\n model_out = self._model.predict([example])\n return list(model_out)[0]['score']", "def make_null_labeler():\n\n def labeler(data):\n return {**data, 'outcome': tf.zeros([1]), 'y0': tf.zeros([1]), 'y1': tf.zeros([1]), 'treatment': tf.zeros([1])}\n\n return labeler", "def fit_predict(self, X, y=None, sample_weight=None):\r\n return self.fit(X, sample_weight=sample_weight).labels_" ]
[ "0.6872355", "0.6781801", "0.66207117", "0.64889395", "0.6419287", "0.64013904", "0.63310605", "0.6304575", "0.62747353", "0.62652564", "0.6265103", "0.62375915", "0.61843824", "0.617628", "0.6124116", "0.605304", "0.60273445", "0.60081005", "0.5998837", "0.59917265", "0.59798676", "0.59644777", "0.5956716", "0.59515446", "0.5939579", "0.5937159", "0.59086853", "0.5906853", "0.5904412", "0.5904008", "0.58952415", "0.58781856", "0.5871602", "0.5860203", "0.5834783", "0.5812002", "0.58013475", "0.5799595", "0.57960385", "0.57929176", "0.57856214", "0.5777539", "0.577635", "0.576662", "0.575411", "0.5725929", "0.57253957", "0.5691212", "0.568875", "0.5656403", "0.56478065", "0.5644764", "0.56411386", "0.5639527", "0.56267637", "0.56173664", "0.56167275", "0.561651", "0.5614642", "0.56145746", "0.5613956", "0.56119305", "0.56095695", "0.56037164", "0.55995363", "0.55891484", "0.5583206", "0.557578", "0.5570643", "0.5557655", "0.55568373", "0.5556582", "0.5549621", "0.55490303", "0.5544487", "0.5528345", "0.5524398", "0.5524325", "0.55136245", "0.55126226", "0.5509508", "0.55089146", "0.55078197", "0.5507338", "0.5505303", "0.5501215", "0.5498544", "0.54982376", "0.5495108", "0.54900163", "0.5475952", "0.54744464", "0.5470432", "0.54681456", "0.5459822", "0.54580605", "0.54534006", "0.5452031", "0.5451645", "0.54506683" ]
0.65727895
3
Looks up the leaf node corresponding to the given entry. Does not handle missing values.
def lookup(self,entry): if self.type == 'v': return self v = entry[self.feature] assert v != None if self.type == 's': c = None try: c = self.children[v] except KeyError: #print "Unseen value for feature",self.feature,": ",v best = None bestDist = float('inf') for (val,c) in self.children.iteritems(): if abs(val - v) < bestDist: bestDist = abs(val - v) best = c c = best return c.lookup(entry) elif self.type == 'i': if v <= self.value: return self.children[0].lookup(entry) else: return self.children[1].lookup(entry) raise RuntimeError("Invalid DecisionTreeNode type?")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_parent_node(self, entry, key):\r\n parent = entry\r\n keys = key.split(\".\")\r\n for k in keys:\r\n try:\r\n parent = parent[k]\r\n except:\r\n raise Exception(\"key \\\"\" + key + \"\\\" was not found in HAR file\")\r\n return parent", "def get(self, entry: ConfigEntry) -> any:\n value = self.root\n if value is None:\n return None\n\n for key in entry.key_path:\n if self.ignore_case_in_keys:\n key = key.lower()\n value = value.get(key)\n if value is None:\n return entry.value\n\n return value", "def lookupVal(self, val):\n pybtlib.lookupVal.restype = ctypes.c_int\n pybtlib.lookupVal.argtypes = [ctypes.POINTER(Tree), ctypes.c_int]\n return pybtlib.lookupVal(ctypes.byref(self), val)", "def FindLeafNode(self, node, index):\n if node.start > index or node.end() <= index:\n if self.debug:\n print node.ToPrettyString();\n print index;\n raise ValueError(\"Node don't contain index\");\n if node.start == index and node.level == 0: return node;\n if not node.children:\n raise ValueError(\"Didn't find the index\");\n for child in node.children:\n if child.start <= index and child.end() > index:\n return self.FindLeafNode(child, index);\n if self.debug:\n print node.ToPrettyString();\n print index;\n print \"node.start=%d\" % node.start;\n print \"node.end=%d\" % node.end();\n raise ValueError(\"Shouldn't reach the end\");", "def check_leaf(leaf_value, dic, entry_list, messages, current_elem):\n value = traverse_dict(dic, entry_list)\n default_value = leaf_value['default']\n required_type = type(default_value)\n required = leaf_value['required']\n # messages.append(\"Checking leaf \" + str(entry_list))\n if required and value is None:\n add_message(\n messages, current_elem, \"The required value in \" + str(entry_list) + \" cannot be found!\"\n )\n if value is not None and not isinstance(value, required_type):\n add_message(\n messages,\n current_elem,\n \"The required value in \"\n + str(entry_list)\n + \" doesn't match expected type \"\n + str(required_type),\n )", "def get_entry(self, entry: str) -> Optional[Union['Directory', NormalFile, VirusFile, Entry]]:\n for e in self.get_entries():\n if e.get_name() == entry:\n return e", "def _extract_leaf(leaf):\n try:\n return re.match(r'leaf-(\\d+)', leaf).group(1)\n except:\n return None", "def find_path(t, entry):\n if t.entry == entry:\n return [t.entry]\n else:\n branches = [find_path(branch, entry) for branch in t.branches]\n for branch in branches:\n if branch:\n return [t.entry] + branch\n return False", "def lookup(self, key):\n k = self.get_position(key)\n\n if self.keys[k] == key:\n return node.values[k]\n\n # Lookup in the child node.\n if self.refs[k+1] == None:\n return None\n return self.refs[k+1].lookup(key)", "def search(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n current_node = self._root\n while current_node:\n if val == current_node._data:\n return current_node\n if val > current_node._data:\n current_node = current_node._rkid\n else:\n current_node = current_node._lkid\n return", "def _lookup(self, data):\n parent, current = None, self.root\n while current:\n if current < data: # data should be in right\n parent, current = current, current.right\n elif current > data: # data should be in left\n parent, current = current, current.left\n else: # equals\n return parent, current\n return parent, current", "def get_leaf(self, descr):\n matches = [x for x in self.leaves if x.descr == descr]\n if matches == []:\n raise RuntimeError(f\"Did not find any leaves matching '{descr}'\")\n if len(matches) > 1:\n raise RuntimeError(f\"Found multiple matching leaves: {matches}\")\n return matches[0]", "def lookup(self, key):\n # check that this tree actually has a root node\n debug.printMsg(\"Call made to Lookup\")\n debug.printMsg(\"checking if we have a BST\")\n if self.root:\n debug.printMsg(\"Calling Recursive Lookup\")\n (result, err) = self.recursiveLookup(key, self.root)\n # if we did not find anything\n if err: \n debug.printMsg(\"Oops, we couldn't find anything\")\n return None\n else: \n # we found a result\n debug.printMsg(\"we found: \")\n return result\n else:\n debug.printMsg(\"Oops, the BST seems to not exist\")\n # root doesnt exist\n return None", "def _lookup(self, key):\n key_hash = self.first_hash(key)\n entry = self.table[key_hash]\n if entry.key is None or entry is key:\n return entry\n free = None\n if entry.key is dummy:\n free = entry\n elif compare(entry.hash, key_hash) and key == entry.key:\n return entry\n\n i = key_hash\n while True:\n i += self.second_hash(key)\n i = i % self.size\n entry = self.table[i]\n if entry.key is None:\n return entry if free is None else free\n if entry.key is key or \\\n (compare(entry.hash, key_hash) and key == entry.key):\n return entry\n elif entry.key is dummy and free is None:\n free = dummy\n\n assert False, \"not reached\"", "def _get_value(match_entry: Dict, path0: str) -> any:\n if path0 is None:\n current_el = match_entry\n else:\n path = path0.split('/')\n current_el = match_entry\n for p in path:\n if current_el is None:\n break\n current_el = current_el.get(p)\n return current_el", "def lookup(self, val):\n if val < self.val:\n if self.left is None:\n return None, None\n return self.left.lookup(val)\n elif val > self.val:\n if self.right is None:\n return None, None\n return self.right.lookup(val)\n else:\n return self", "def get_leaf(self, leaf_index):\n return self.__leaves_db.get(encode_int(leaf_index))", "def leaf(self, value, depth, available):\n method_name = 'leaf_' + value.__class__.__name__\n method = getattr(self, method_name, self.generic_leaf)\n return method(value, depth, available)", "def get_entry(self, entry_name):\n if entry_name in self.entries: # Don't invoke constructor if not needed\n return self.entries[entry_name]\n return self.entries.setdefault(entry_name, PathElement(self.file_name, self.namespaces))", "def labelRoot(lut, label):\n result = lut[label]\n if lut[result] != result:\n result = labelRoot(lut, result)\n lut[label] = result\n return result", "def get(self,root,key):\n node = root\n for digit in key:\n node = node.children[ord(digit)-ord('0')]\n if(node==None):\n return None\n return node.value.value", "def get_node(self, key: str) -> Optional[Node]:", "def find_leaf(self, _key):\n cur_node = self.root\n while type(cur_node) is not leaf:\n\n flag = True\n for i, key in enumerate(cur_node.keys):\n if key > _key:\n cur_node = cur_node.pt[i]\n flag = False\n break\n \n # the value passed in is greater than all the keys in this node\n if flag:\n cur_node = cur_node.pt[-1]\n \n return cur_node", "def _get_leaf(leaf, d, pattern):\n xleaf = d.rsplit('/', 1)[-1].strip()\n check_pattern = re.match('\\*(\\.[a-zA-Z0-9]+)$', pattern)\n if check_pattern:\n xten = check_pattern.groups()[0]\n if xleaf[-len(xten):] == xten:\n xleaf = xleaf[:-len(xten)].strip()\n if xleaf.find(ROOT_LEAF_PREFIX) == 0:\n return leaf\n elif leaf.strip():\n return '{0}.{1}'.format(leaf, xleaf)\n else:\n return xleaf", "def lookup(self, key):\n return self.root.lookup(key)", "def predict(self,entry):\n assert self.root is not None,\"Decision tree is not initialized\"\n return self.root.predict(entry)", "def _find_node(self, item):\n # Start with the root node\n node = self.root\n # Loop until we descend past the closest leaf node\n while node is not None:\n # TODO: Check if the given item matches the node's data\n if ...:\n # Return the found node\n return node\n # TODO: Check if the given item is less than the node's data\n elif ...:\n # TODO: Descend to the node's left child\n node = ...\n # TODO: Check if the given item is greater than the node's data\n elif ...:\n # TODO: Descend to the node's right child\n node = ...\n # Not found\n return None", "def search(T,k):\r\n for t in T.data:\r\n if k == t.word:\r\n return t\r\n if T.isLeaf:\r\n return None\r\n return search(T.child[findChildB(T,k)],k)", "def recursiveLookup(self, key, curr):\n # basically repeat insert\n debug.printMsg(\"Entered recursiveLookup\")\n # if we found a match break\n debug.printMsg('Checking base condition: ' + key + ' = ' + curr.key)\n if key == curr.key:\n debug.printMsg(\"Success, found\")\n return (curr, None)\n # if the key is larger than curr\n elif key > curr.key:\n debug.printMsg(\"Nope, now checking if we should go right\")\n debug.printMsg(\"yep\")\n debug.printMsg(\"Check if we still have room to search\")\n if curr.hasRightChild():\n debug.printMsg(\"Moving further right\")\n # move onto the next node along the search path\n return self.recursiveLookup(key, curr.right)\n else:\n debug.printMsg(\"Nope, ran out of search path. bummer\")\n # hit the end and there was no match\n return (None, True)\n else:\n debug.printMsg(\"Nope, we're going left\") \n debug.printMsg(\"Check if we still have room to search\") \n if curr.hasLeftChild():\n debug.printMsg(\"Moving further left\")\n return self.recursiveLookup(key, curr.left)\n\n else:\n debug.printMsg(\"Shit balls, we ran out of search path\")\n return (None, True)", "def tree_to_leaf(self,\n x_row):\n node = self.tree[0]\n while True:\n if node.is_leaf:\n return node\n val = x_row[node.label]\n if np.isnan(val):\n node = self.tree[node.id_null]\n elif val <= node.cutoff:\n node = self.tree[node.id_lower]\n elif val >= node.cutoff:\n node = self.tree[node.id_higher]\n else:\n raise NameError", "def get_node(self, key: str) -> Node:", "def lookup(self, lkup):\n if self.data > lkup:\n if self.left:\n self.left.lookup(lkup)\n else:\n print(\"{} not found\".format(lkup))\n elif self.data < lkup:\n if self.right:\n self.right.lookup(lkup)\n else:\n print(\"{} not found\".format(lkup))\n else:\n print(\"{} found\".format(self.data))", "def _func_null_leaf(self, arr: list, search: str) -> list:\n return [a.get(search) for a in arr]", "def get_root(g,r,n): # g: graph; r: range; n: node address\n if 'head' in g.nodes[n]:\n if g.nodes[n]['head'] not in r:\n return n\n else:\n return get_root(g,r,g.nodes[n]['head'])\n return n", "def _find(self, val, cur_node):\n if val == cur_node.data:\n return cur_node\n elif val > cur_node.data:\n if not cur_node.right:\n return None\n return self._find(val, cur_node.right)\n elif val < cur_node.data:\n if not cur_node.left:\n return None\n return self._find(val, cur_node.left)", "def search(self, key):\r\n (node, index) = self.root, self.root.search(key)\r\n while not node.contains_key_at(key, index) and not node.is_leaf():\r\n node = node.children[index]\r\n index = node.search(key)\r\n\r\n return (node, index) if node.contains_key_at(key, index) else None", "def remove_entry(self, entry: Union[int, str, Entry]) -> Optional[Entry]:\n if isinstance(entry, Entry):\n target = entry\n elif isinstance(entry, int):\n target = self.__entries[entry]\n else:\n target = None\n for e in self.__entries:\n if e.get_name() == entry:\n target = e\n if target is None:\n return None\n if isinstance(target, Directory) and target.is_populated():\n return None\n self.__entries.remove(target)\n return target", "def get_node_from_clade(tree, split_to_node_map, clade):\n leaves = get_leaf_set(tree)\n\n # Check if the clade is the whole tree!\n if leaves == clade:\n return split_to_node_map[0]\n\n # Check if the clade contains leaves not in the tree itself\n if len(leaves.intersection(clade)) < len(clade):\n return None\n\n # Encode labels as split (integer) and return the node or none\n split = tree.taxon_namespace.taxa_bitmask(labels=clade)\n if split in split_to_node_map:\n return split_to_node_map[split]\n else:\n return None", "def _get(self, k, currNode):\n if not currNode:\n return\n if k < currNode.key:\n return self._get(k, currNode.leftChild)\n elif k > currNode.key:\n return self._get(k, currNode.rightChild)\n elif k == currNode.key:\n return currNode", "def add_entry(self, key, value, depth):\n current = self.entries.get(key, None)\n if current is None or current.depth > depth:\n self.entries[key] = NodeEntry(key, value, depth)\n elif current.depth == depth:\n raise RuntimeError('Collision [depth=%d] for entry [type=%s]: %s' % (depth, self.nodetype, key))", "def letter(leaf):\n return root(branches(leaf)[0])", "def search(self, key: int, possible_parent=False) -> TreeNode:\n node = prev_node = self.root\n while node:\n if key > node.val:\n prev_node = node\n node = node.right\n elif key == node.val:\n return node\n else:\n prev_node = node\n node = node.left\n if possible_parent:\n return prev_node\n return None", "def find_value_for_nested_key(mapping, key_of_interest, tree=[]):\n original_mapping = mapping\n logger.debug(\"Looking for key %s\", key_of_interest)\n logging.debug(\"Looking in %s\", mapping)\n logger.debug(\"Using tree %s\", tree)\n if tree:\n for leaf in tree:\n mapping = mapping[leaf]\n else:\n tree = [None]\n for leaf in reversed(tree):\n logging.debug(\"Looking in bottommost leaf %s\", leaf)\n for key, value in six.iteritems(mapping):\n if key == key_of_interest:\n return value\n if leaf:\n find_value_in_nested_key(original_mapping, key_of_interest, tree[:-1])\n warnings.warn(\"Couldn't find value for key %s\" % key_of_interest)\n # raise KeyError(\"Couldn't find value for key %s\", key_of_interest)", "def find(self, val):\n\n\t\tif not self.root:\n\t\t\treturn None\n\n\t\tQ = [self.root]\n\t\twhile Q:\n\t\t\tnode = Q.pop(0)\n\n\t\t\tif node.val == val:\n\t\t\t\treturn node\n\n\t\t\tif node.left:\n\t\t\t\tQ.append(node.left)\n\n\t\t\tif node.right:\n\t\t\t\tQ.append(node.right)\n\n\t\treturn None", "def find(self, value):\n # initialize node as root\n node = self.root\n\n # find value\n while node != None:\n\n # value found: return node\n if node.value == value:\n return node\n\n # value is smaller than node: search in left sub tree\n elif node.value > value:\n node = node.left\n\n # value is bigger than node: search in right sub tree\n else:\n node = node.right\n\n # value not found: return None\n return None", "def lookup(self, data, parent=None):\n if data < self.data:\n if self.left is None:\n return None, None\n return self.left.lookup(data, self)\n elif data > self.data:\n if self.right is None:\n return None, None\n return self.right.lookup(data, self)\n else:\n return self, parent", "def __getitem__(self, key):\n if self._root:\n node = self._getItemHelper(key, self._root)\n if node:\n return node.value\n else:\n return None\n else:\n return None", "def __find_node(self, element) -> _AVLTreeNode or None:\n\n curr_node = self.__root\n while curr_node is not None:\n\n if self.__key(element) < self.__key(curr_node.data):\n curr_node = curr_node.left\n elif self.__key(curr_node.data) < self.__key(element):\n curr_node = curr_node.right\n else:\n return curr_node\n\n return None", "def label(tree):\n return tree[0]", "def ChooseLeaf(self, node):\n\n if self.level == node.level + 1:\n # if current node level is higher than the node we want to insert, we find the good point.\n return self\n else:\n # Or iter its child nodes, to find the node with min area.\n increment = [(i, space_increase(self.leaves[i].MBR, node.MBR)) for i in range(len(self.leaves))]\n res = min(increment, key=lambda x: x[1])\n return self.leaves[res[0]].ChooseLeaf(node)", "def _get_slt_entry(self, entry):\n # Slt ptr is the second word in the SLT region (first is fingerprint)\n # Note how we deliberately don't use any debug information here (e.g.\n # finding the location of $_audio_slt_table).\n if Arch.addr_per_word == 4:\n sltptr_addr = Arch.pRegions['SLT'][0] + Arch.addr_per_word\n slt_entry_addr = self.get_data_pm(sltptr_addr) # index 0 of slt\n else:\n sltptr_addr = Arch.dRegions['SLT'][0] + Arch.addr_per_word\n slt_entry_addr = self.get_data(sltptr_addr) # index 0 of slt\n # Run through the slt looking for the entry we want, if we can't find\n # the one we're looking for maybe it's not in the list\n while entry > self.get_data(slt_entry_addr):\n slt_entry_addr += 2 * Arch.addr_per_word\n\n if entry == self.get_data(slt_entry_addr):\n return self.get_data(slt_entry_addr + Arch.addr_per_word)\n\n return None", "def search(self, prefix: str) -> TrieNode:\n leaf = self.root\n for level in range(len(prefix)):\n letter = prefix[level]\n\n if letter not in leaf.children:\n return self.get_node()\n leaf = leaf.children[letter]\n\n if leaf is not None:\n return leaf\n return self.get_node()", "def FindLeaf(self, node):\n result = []\n # If current node is not leaf node, just iter to find all the node with MBR\n if self.level != 1:\n for leaf in self.leaves:\n if contain(leaf.MBR, node.MBR):\n result.append(leaf.FindLeaf(node))\n for x in result:\n if x != None:\n return x\n # If current node is leaf, just iter this node to check whether index is same, and return\n else:\n for leaf in self.leaves:\n if leaf.index == node.index:\n return self", "def search(root, key):\n if root is None:\n return None\n else:\n if root.key == key:\n return root.value\n elif root.right is None and root.left is None:\n return None\n elif key >= root.key:\n return search(root.right, key)\n # No need to return root.right.value, since this should be\n # returned by root.key as root is replaced by root.right\n elif key < root.key:\n return search(root.left, key)\n # No need to return root.right.value, since this should be\n # returned by root.key as root is replaced by root.right", "def get_leaf_node(self, current_word):\n node = self.wordlist.find(current_word)\n\n if node is None:\n # current word is not in the Trie\n return None\n elif node.value != TRIE_BRANCH:\n # current word is already a leaf\n return current_word\n \n # descend down a random branch down the trie\n # until we hit a leaf\n while node.children:\n next_letter = random.choice(list(node.children.keys()))\n current_word += next_letter\n node = node.children.get(next_letter)\n \n return current_word", "def search(self, key):\n if self.key == key:\n if self.val is not None:\n return self.val\n else:\n return self.key\n\n \"\"\"If the key of the node is smaller than the root node's key, traverse the left subtree\"\"\"\n if self.key < key:\n self.left.search(key)\n\n \"\"\"If the key of the node is greater than the root node's key, traverse the right subtree \"\"\"\n if self.key > key:\n self.right.search(key)\n\n \"\"\"If tree is empty, return None\"\"\"\n return None", "def _get_node(self, key):\n\n index = self._hash_function(key) % self.capacity # Get the index by hashing the key\n node = self._buckets[index].contains(key) # Get the node with the key (if it exists)\n return node", "def search(self, val):\n currentNode = self.rootNode\n while True:\n if currentNode is None:\n print(\"Number not found.\")\n return None\n elif currentNode.val == val:\n print(\"Number found.\")\n return currentNode\n elif currentNode.val < val:\n currentNode = currentNode.right\n else:\n currentNode = currentNode.left", "def search(self, key):\n x = self.root\n\n while x is not self.nil:\n if key == x.key:\n break\n\n if key < x.key:\n x = x.left\n else:\n x = x.right\n return x", "def search(self, key):\n if self.root is None:\n return None\n return self.root.search(key)", "def __getitem__(self, key):\n\n if type(key) != self.type:\n raise TypeError\n\n first_char = key[:1]\n others = key[1:]\n\n if first_char not in self.children:\n print(\"FIRST_CHAR\", first_char)\n print(\"self.children\", self.children)\n raise KeyError\n\n if len(first_char) != 0 and len(others) == 0:\n node = self.children[first_char]\n\n if node.value is None:\n raise KeyError\n\n return node.value\n else:\n return self.children[first_char][others]", "def huffman_leaf(letter, weight):\n return tree(weight, [tree(letter)])", "def node_lookup(self, path, for_update=False):\n\n q = \"select node from nodes where path = ?\"\n self.execute(q, (path,))\n r = self.fetchone()\n if r is not None:\n return r[0]\n return None", "def _find_one_tree(tree: dict,\n func: Callable,\n args: Tuple,\n kwargs: Mapping,\n ) -> Union[dict, None]:\n frontier = []\n explored = set()\n for uid, item in tree.items():\n frontier.append((uid, item))\n while frontier:\n uid, item = frontier.pop()\n explored.add(uid)\n if func(item, *args, **kwargs):\n return item\n if \"children\" in item:\n for child_uid, child_item in item[\"children\"].items():\n if child_uid not in explored:\n frontier.append((child_uid, child_item))", "def binary_search(node, value):\n aux_node = None\n while node is not None and node.value != value:\n if value < node.value:\n aux_node = node.left\n node = aux_node\n else:\n aux_node = node.right\n node = aux_node\n return node if node.value == value else None", "def _get(self, root: TreeNode, key: TreeNode) -> TreeNode:\n # Always do the edge-case check, which could raise an error, FIRST!!\n if root is None: # BC2 - not found\n return None\n # BST-order traverse: examine root first, then recur left or recur right depending on key comparison\n if root.key == key: # BC1 - found\n return root\n\n result_left_subtree = None\n result_right_subtree = None\n\n if key < root.key:\n result_left_subtree = self._get(root.left, key)\n elif key > root.key:\n result_right_subtree = self._get(root.right, key)\n\n if result_left_subtree is not None:\n return result_left_subtree\n elif result_right_subtree is not None:\n return result_right_subtree\n else:\n return None", "def predict(self,entry):\n if self.type == 'v':\n return self.value\n v = entry[self.feature]\n if v is None:\n #multiple childrens' predictions\n counts = defaultdict(int)\n labels = self.predict_all(entry,counts)\n if len(counts) == 1:\n return counts.keys()[0]\n #return a probability distribution\n return normalize(counts)\n #maximum likelihood\n #return argmax(counts)\n if self.type == 's':\n c = None\n try:\n c = self.children[v]\n except KeyError:\n #print \"Unseen value for feature\",self.feature,\": \",v\n best = None\n bestDist = float('inf')\n for (val,c) in self.children.iteritems():\n if abs(val - v) < bestDist:\n bestDist = abs(val - v)\n best = c\n c = best\n return c.predict(entry)\n elif self.type == 'i':\n if v <= self.value:\n return self.children[0].predict(entry)\n else:\n return self.children[1].predict(entry)\n raise RuntimeError(\"Invalid DecisionTreeNode type?\")", "def lookup(self, c):\n p = self\n for start in range(len(c)):\n if not p.tail.has_key(c[start]):\n return None\n p = p.tail[c[start]]\n\n return p.result", "def get(self, key):\n if key is None:\n return None # None is not a valid key\n return get_from_subtree(self.root, key)", "def __find_key_in_level(node, key):\n for child in node.children:\n if child.key == key:\n return child\n\n return False", "def subNode(self, name):\n for nd in self.kids:\n if nd.name == name:\n return nd\n raise LookupError( 'name not found \"' + name + '\"' )", "def get(self, key):\n # Your code here\n\n idx = self.hash_index(key)\n\n # check if the index is in range\n if idx >= 0 and idx < self.capacity:\n curr_node = self.hash_table[idx]\n\n # check if any node at index exists\n if curr_node is None:\n return None\n\n # if there's already something at this index\n while curr_node is not None:\n \n # check to see if there is an entry at this index whose key matches the provided key\n while curr_node.key is not key:\n curr_node = curr_node.next\n \n # if we never found an entry with a matching key, return None\n if curr_node.key is not key or curr_node is None:\n return None\n else:\n return curr_node.value\n \n \n # otherwise return None if the index is not in range\n else:\n return None", "def find_node(self, node):\r\n for tree_node in self.traverse():\r\n if tree_node.content == node:\r\n return tree_node\r\n raise LookupError(\"Given node does not exist on the tree\")", "def find(self, prefix):\n node = self.root\n for char in prefix:\n if char not in node.keys():\n return None\n node = node[char]\n\n return node", "def _resolve_entry(self, path):\n upath = pycompat.fsdecode(path)\n ent = None\n if path in self._pending_changes:\n val = self._pending_changes[path]\n if val is None:\n raise KeyError\n return val\n t = self._tree\n comps = upath.split('/')\n te = self._tree\n for comp in comps[:-1]:\n te = te[comp]\n t = self._git_repo[te.id]\n ent = t[comps[-1]]\n if ent.filemode == pygit2.GIT_FILEMODE_BLOB:\n flags = b''\n elif ent.filemode == pygit2.GIT_FILEMODE_BLOB_EXECUTABLE:\n flags = b'x'\n elif ent.filemode == pygit2.GIT_FILEMODE_LINK:\n flags = b'l'\n else:\n raise ValueError('unsupported mode %s' % oct(ent.filemode))\n return ent.id.raw, flags", "def get_from_subtree(subtree, key):\n temp_subtree = subtree\n while temp_subtree is not None:\n if key == temp_subtree.key:\n return temp_subtree.value\n elif key < temp_subtree.key:\n temp_subtree = temp_subtree.left\n elif key > temp_subtree.key:\n temp_subtree = temp_subtree.right\n return None", "def generic_leaf(self, value, depth, available):\n return repr(value), False", "def __getitem__(self, key):\n result = self.tree[key]\n if result is not None:\n \"\"\"This needs to be deep-copied in order not to change the elements in the map via the reference, but\n return the value as in SetlX.\n The index 2 from key implies stands for the value as key-value-pairs are represented as lists of length 2\"\"\"\n return copy.deepcopy(result.key[2])", "def find_or_create(self, h, **kwargs):\n curr = self.root\n h_len, root_history_len = len(h), len(self.root.h)\n\n for step in range(root_history_len, h_len):\n curr = curr.get_child(h[step])\n if curr is None:\n return self.add(h, **kwargs)\n return curr", "def get_type_of_entry(dictionary, s, entry):\n return entry", "def search(self, val):\n if not self.root:\n return None\n else:\n return self._find(val, self.root)", "def _insert(self, key: int) -> TreeNode:\n node = self.root\n while True:\n # Check if a key is greater than node.\n if key > node.val:\n if not node.right:\n # node.right is a leaf\n node.right = TreeNode(val=key)\n node.right.parent = node\n return node\n node = node.right\n elif key < node.val:\n if not node.left:\n # node.left is a leaf\n node.left = TreeNode(val=key)\n node.left.parent = node\n return node\n node = node.left\n else:\n # print(f\"{key}: already in a Tree.\")\n return", "def __getitem__(self, node):\n j = self._hash_function(node)\n bucket = self._T[j]\n if bucket is None:\n raise KeyError(node)\n return bucket[node]", "def get_leaf_from_node(self, node: Node, idx: int) -> Node:\n num_dev = len(self.get_s1n_if_exist(node).ds)\n if idx >= num_dev:\n log.error(\n \"given index is greater than number of devices under the node\")\n\n return self.get_leaf_from_s1n(self.get_s1n_if_exist(node), idx)", "def first_leaf(tree):\n if 'contents' in tree:\n x = tree['contents'][0]\n return first_leaf(x)\n else:\n return tree", "def get(self, node):\n if node in self.val:\n return self.val[node]\n else:\n return self.initial", "def set_leaf_node(self, leaf_value):\n\n if not self.empty:\n try:\n node_key = self.node_key\n except AttributeError:\n node_key = '_'\n raise ValueError(\n 'Cannot modify a non-empty node. ' + \\\n 'If you meant to change type of node {}, '.format(node_key) + \\\n 'delete it first and then add an empty node with ' + \\\n 'the same key.')\n\n # check if leaf_value is a list-like object\n try:\n _ = iter(leaf_value)\n is_list = True\n except TypeError:\n is_list = False\n\n try:\n if is_list:\n leaf_value = [float(i) for i in leaf_value]\n else:\n leaf_value = float(leaf_value)\n except TypeError:\n raise TreeliteError('leaf_value parameter should be either a ' + \\\n 'single float or a list of floats')\n\n try:\n if is_list:\n _check_call(_LIB.TreeliteTreeBuilderSetLeafVectorNode(\n self.tree.handle,\n ctypes.c_int(self.node_key),\n c_array(ctypes.c_double, leaf_value),\n ctypes.c_size_t(len(leaf_value))))\n else:\n _check_call(_LIB.TreeliteTreeBuilderSetLeafNode(\n self.tree.handle,\n ctypes.c_int(self.node_key),\n ctypes.c_double(leaf_value)))\n self.empty = False\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a leaf node')", "def get_root(tree, node):\n if node not in tree.parents:\n tree.parents[node] = node\n tree.population[node] = 1\n return node\n\n root = node\n while(root != tree.parents[root]):\n root = tree.parents[root]\n return root", "async def leaf_it(d):\n async for _parent, _key, leaf in _walk({}, None, d):\n yield leaf", "def successor(self, key: int) -> TreeNode:\n tree_node = self.search(key, possible_parent=True)\n if tree_node:\n if tree_node.right and tree_node.val <= key:\n right_subtree = tree_node.right\n while right_subtree.left:\n right_subtree = right_subtree.left\n return right_subtree\n else:\n while tree_node:\n if tree_node.val > key:\n return tree_node\n tree_node = tree_node.parent\n return", "def __getitem__(self, k):\n if self.is_empty():\n raise KeyError('key Error:' + repr(k))\n else:\n p = self._subtree_search(self.root(), k)\n self._rebalance_access(p)\n #this might be an unsuccessful search, so deal with this...\n if k!=p.key():\n raise KeyError('key error:'+repr(k))\n return p.value()", "def find_paths(t, entry):\n paths = []\n if t.label == entry:\n return [[entry]]\n for b in t.branches:\n for p in find_paths(b, entry):\n paths.append([t.label] + p)\n return paths", "def _unpack(self, entry):\n return entry._value", "def getExactBranch(root, tag):\n tags = tag.split(':')\n if tags[0] == 'stats':\n tags = tags[1:]\n n = root\n for t in tags:\n newChildren = []\n for child in n.children:\n if child.nodeName == t or t == '*':\n newChildren.append(child)\n n.children = newChildren\n if n.children:\n n = n.children[0]\n if tags[-1] != '*':\n n.children = [] # prune off non-specified children tags", "def next_leaf(node):\n return len(node[1][0][1]) == 0", "def _recursiveSearch(self, data, node):\n\t\tif (not node):\n\t\t\treturn None\n\t\tif (node.value() == data):\n\t\t\treturn node\n\t\telif (data < node.value()):\n\t\t\treturn self._recursiveSearch(data, node.lchild())\n\t\telse:\n\t\t\treturn self._recursiveSearch(data, node.rchild())", "def get_child(self, val):\n if val in self._children:\n return self._children[val]", "def search(self, val):\n current = self.head\n # import pdb; pdb.set_trace()\n while current is not None:\n if current.data == val:\n return current\n current = current.next_node\n return None", "def _bddnode(root, lo, hi):\n\t# print(\"_bddnode\")\n\tif lo is hi:\n\t\tnode = lo\n\telse:\n\t\tkey = (root, lo, hi)\n\t\ttry:\n\t\t\tnode = _NODES[key]\n\t\texcept KeyError:\n\t\t\tnode = _NODES[key] = BDDNode(*key)\n\treturn node", "def _node_search(self, mapping: WizardDataMappingBaseEnum, root_node: str, target_dict: dict) -> tuple:\n keys = mapping.get_registration_field_reference(root_node).split(\".\")\n max_depth: int = len(keys) - 1\n\n return self._recursive_search(target_dict, keys, max_depth)" ]
[ "0.6321155", "0.62434244", "0.61522406", "0.6018858", "0.601176", "0.6007655", "0.59877974", "0.5881265", "0.5834447", "0.5798371", "0.57872856", "0.5778088", "0.57707995", "0.5761493", "0.57495886", "0.57285845", "0.57001275", "0.56570214", "0.56385493", "0.5637989", "0.560824", "0.560087", "0.5568936", "0.5553265", "0.55228597", "0.55168426", "0.55156344", "0.55140805", "0.55074525", "0.5483108", "0.5475628", "0.5441894", "0.5437002", "0.54320097", "0.54204243", "0.5413965", "0.5411312", "0.53963476", "0.53818935", "0.53811103", "0.5367797", "0.5362208", "0.53599566", "0.53597325", "0.5356773", "0.5333614", "0.53240836", "0.531572", "0.53144646", "0.53101176", "0.5299443", "0.5284668", "0.5273065", "0.5265759", "0.5263569", "0.5251193", "0.5249541", "0.5244442", "0.5226547", "0.5213636", "0.52029926", "0.5198382", "0.51929307", "0.5191464", "0.5182656", "0.5181722", "0.51805055", "0.5178836", "0.51726556", "0.51722586", "0.5165044", "0.5158512", "0.5152653", "0.5145649", "0.5139008", "0.5134315", "0.5133067", "0.51259834", "0.51239246", "0.5122238", "0.5117354", "0.5116053", "0.5113659", "0.51101327", "0.5108235", "0.5105493", "0.50991464", "0.509765", "0.5092955", "0.5085199", "0.5080388", "0.5077869", "0.5062199", "0.50609446", "0.5059636", "0.5056553", "0.50558716", "0.5049679", "0.50481623", "0.50465137" ]
0.6669498
0
Given a indexed database db, a list of labels (one for each id), and a list of ids to test, sets this node to the best label.
def pick_best_label(self,db,labels,ids): self.type = 'v' if len(labels) > 0: self.value = vote([labels[id] for id in ids]) else: self.value = None return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def learn(self,db,labels):\n self.keys = db.keys[:]\n labelindex = -1\n if isinstance(labels,str):\n labelindex = db.keys.index(labels)\n assert labelindex >= 0,\"label does not exist in database keys\"\n labels = db.get_column(labelindex)\n elif isinstance(labels,int):\n labelindex = labels\n labels = db.get_column(labelindex)\n else:\n assert len(labels) == len(db.entries)\n self.root = DecisionTreeNode()\n if labelindex >= 0:\n raise NotImplementedError(\"Ooops, taking out indexed label broken\")\n entries = np.delete(entries,labelindex,1)\n db = IndexedDatabase(db)\n if self.maxnodes != None:\n return self.greedy_learn_search(db,labels)\n else:\n self.deepest = 0\n return self.greedy_learn(self.root,db,labels,range(len(labels)))", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost", "def put_labels():\n dao.delete_all_labels()\n for label in request.json:\n if 'id' not in label or not label['id']:\n label['id'] = str(uuid.uuid4())\n dao.set_label(id=label['id'],\n name=label['name'],\n fields=label['fields'])\n return if_found(dao.get_labels())", "def identify_examples(self,db,labels,node):\n path = []\n while node.parent != None:\n nkey = None\n for (k,c) in node.parent().children.iteritems():\n if c is node:\n nkey = k\n break\n assert nkey != None\n path.append((node.parent(),nkey))\n node = node.parent()\n path = path[::-1]\n nids = len(labels)\n ids = []\n for id in xrange(nids):\n valid = True\n for n,ckey in path:\n f = n.feature\n val = featureMatrix[f,id]\n if val is None:\n #it's a None value, just continue on\n continue\n else:\n key = None\n if n.type == 'i':\n key = (0 if val <= n.value else 1)\n else:\n key = val\n if key != ckey:\n valid = False\n break\n if valid:\n ids.append(id)\n return ids", "def set_index(self, list):\n for key in list:\n self.find_label_by_id(key).index = True", "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qualified_name = label.qualified_name\n folded_name = qualified_name.casefold()\n if folded_name not in existing_labels:\n log.info(f\"Creating label {qualified_name}\")\n repo.create_label(**label.api_arguments)\n elif label != existing_labels[folded_name]:\n log.info(f\"Updating label {qualified_name}\")\n existing_label = existing_labels[folded_name]\n existing_label.edit(**label.api_arguments)\n else:\n log.info(f\"Label {qualified_name} already exists\")", "def save_data_to_db(labelled):\n add_query = sqlite3.connect(DB_PATH).cursor()\n add_query.execute(\n \"CREATE TABLE IF NOT EXISTS labels(text TEXT, label TEXT, score FLOAT)\")\n for entry in labelled:\n add_query.execute(\"\"\"INSERT INTO labels(text,label,score) VALUES(?,?,?)\"\"\",\n (entry))\n return", "def _compute_relevance_map(self, labels):\n\n ds_labels = np.zeros(self.ds_size)\n ds_relevance_map = 0\n for i in np.unique(labels):\n if i != 0:\n # 2.1- Compute the coarse label image\n y, x, z = np.where(labels == i)\n ds_labels[np.int32(y * self.full_to_ds_ratio[0]),\n np.int32(x * self.full_to_ds_ratio[1]), z] = i\n # 2.2- Compute the energy map\n M = np.ones_like(ds_labels)\n M[ds_labels == i] = 0\n distance_map = distance_transform_edt(M)\n ds_relevance_map += distance_map\n\n # 2.3- Normalize the energy map and compute the ROI\n ds_relevance_map = ds_relevance_map / ds_relevance_map.max()\n return ds_labels, ds_relevance_map", "def project(database, frequent_nodes, minsup, freq_labels, length, H, L, L_hat, n_graphs, n_pos, n_neg, pos_index, class_index, neg_index, graph_id_to_list_id, mapper, labels, model, constraints):\n\t# Declaring globals for recursive pattern mining\n\tglobal __subgraph_count\n\tglobal __positive_index\n\tglobal __n_pos\n\tglobal __n_graphs\n\tglobal __dataset\n\tglobal __pattern_set\n\tglobal __cl_constraints\n\tglobal __ml_constraints\n\tglobal __negative_index\n\tglobal __graph_id_to_list_id\n\tglobal __min_threshold\n\tglobal __min_index\n\n\t__graph_id_to_list_id = graph_id_to_list_id\n\t__ml_constraints = [c for c in constraints[0] if c[0] < n_graphs and c[1] < n_graphs]\n\t__cl_constraints = [c for c in constraints[1] if c[0] < n_graphs and c[1] < n_graphs]\n\t__positive_index = pos_index\n\t__negative_index = neg_index\n\t__n_pos = n_pos\n\t__n_graphs = n_graphs\n\t__H = H\n\t__L = L\n\t__L_hat = L_hat\n\t__dataset = []\n\t__pattern_set = []\n\t__subgraph_count = 0\n\t__min_threshold = sys.maxint\n\t__min_index = 0\n\tdfs_codes = []\n\tprojection_map = {}\n\tfeature_selection_model = None\n\n\tif model == \"top-k\":\n\t\tfeature_selection_model = TopKModel()\n\telif model == \"greedy\":\n\t\tfeature_selection_model = GreedyModel(__n_graphs, __positive_index)\n\telif model == \"gMGFL\":\n\t\tfeature_selection_model = GMGFLModel(__L, __L_hat)\n\telif model == \"gMLC\":\n\t\tfeature_selection_model = GMLCModel(__L, __H)\n\telse:\n\t\tlogging.log(logging.ERROR, \"Model %s not recognized\" %(model))\n\t\texit(0)\n\n\t# TODO: evaluate\n\t\"\"\"\n\tOnly constraints for current binary split\n\tfor con in __ml_constraints:\n\t\tif not labels[con[0]][class_index] == 1 and not labels[con[1]][class_index] == 1:\n\t\t\t__ml_constraints.remove((con[0], con[1]))\n\n\tfor con in __cl_constraints:\n\t\tif not labels[con[0]][class_index] == 1 and not labels[con[1]][class_index] == 1:\n\t\t\t__cl_constraints.remove((con[0], con[1]))\n\t\"\"\"\n\n\t# clean constraints from not applicable ones\n\tfor i, con in enumerate(__ml_constraints):\n\t\tif con[0] >= n_graphs or con[1] >= n_graphs:\n\t\t\t__ml_constraints.remove(con)\n\t\t\tcontinue\n\t\ttry:\n\t\t\tlist_id1 = __graph_id_to_list_id[con[0]]\n\t\t\tlist_id2 = __graph_id_to_list_id[con[1]]\n\t\t\t__ml_constraints[i] = (list_id1, list_id2)\n\t\texcept KeyError:\n\t\t\t__ml_constraints.remove(con)\n\n\tfor i, con in enumerate(__cl_constraints):\n\t\tif con[0] >= n_graphs or con[1] >= n_graphs:\n\t\t\t__cl_constraints.remove(con)\n\t\t\tcontinue\n\t\ttry:\n\t\t\tlist_id1 = __graph_id_to_list_id[con[0]]\n\t\t\tlist_id2 = __graph_id_to_list_id[con[1]]\n\t\t\t__cl_constraints[i] = (list_id1, list_id2)\n\t\texcept KeyError:\n\t\t\t__cl_constraints.remove(con)\n\n\t# TODO: Is this needed?\n\tfor l in frequent_nodes:\n\t\t__subgraph_count += 1\t\t\n\n\tfor g in database:\n\t\tfor n in g.nodes:\n\t\t\tedges = get_forward_init(n, g)\n\t\t\tif len(edges) > 0:\n\t\t\t\t for e in edges:\n\t\t\t\t\tnf = g.nodes[e.fromn]\n\t\t\t\t\tnt = g.nodes[e.to]\n\t\t\t\t\tdfsc = dfs_code(0,1,nf.label,e.label,nt.label)\n\t\t\t\t\tpdfs = pre_dfs(g.id,e,None)\n\t\t\t\t\t# because this is a root --> append the predecesspr dfs code (graph id, edge, None)\n\t\t\t\t\tif dfsc in projection_map:\n\t\t\t\t\t\tprojection_map[dfsc].append(pdfs)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprojection_map[dfsc] = [pdfs,]\n\n\t# Start Subgraph Mining\n\tthreshold = 0 \t# initial threshold for first length 1 subgraph\n\tfor pm in reversed(sorted(projection_map, key=dfs_code_compare)):\t# sorted by highest fromnode label (order is important)\n\t\tif len(projection_map[pm]) < minsup: # number of graphs, this initial pattern occurs (root patterns)\n\t\t\tcontinue\n\t\tdfs_codes.append(dfs_code(0,1,pm[2],pm[3],pm[4]))\t# initial pattern for this projection is always local 0, 1)\n\t\tdfs_codes = mine_subgraph(database, projection_map[pm],\n\t\t\t\t\t\t\tdfs_codes, minsup, length, mapper, feature_selection_model)\n\t\tdfs_codes.pop()\t# dfs_codes is a list of all projections for this initial pattern\n\treturn __dataset, __pattern_set", "def select_node_by_label(conn, label):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Nodes WHERE label=?\", (label,))", "def update_database(JobData, Labels):\n\n DB = boto3.resource('dynamodb')\n DBTable = DB.Table(os.environ['DB_TABLE_NAME'])\n\t\n DBData = {}\n DBData['VideoID'] = JobData['JobId']\n DBData['VideoName'] = JobData['Video']['S3ObjectName']\n DBData['S3Bucket'] = JobData['Video']['S3Bucket']\n DBData['Labels'] = []\n\n print('Total number of labels detected was {}'.format(len(Labels)))\n\t\n\t# Now want to create a list of unique labels, number of occurrences, time of occurrences and average confidence\n for Label in Labels:\n if len(DBData['Labels']) == 0: # Populate the first item\n add_new_label(DBData['Labels'], Label)\n else:\n FoundMatch = False\n for UniqueLabel in DBData['Labels']:\n if Label['Label']['Name'] == UniqueLabel['LabelName']:\n update_label(UniqueLabel, Label)\n FoundMatch = True\n break\n # If we haven't found a match, need to add another unique label\n if not FoundMatch: add_new_label(DBData['Labels'], Label)\n\n # Now put this into the database. DynamoDB doesn't support Python float format so fix this\n DBData = FixFloats(DBData)\n DBTable.put_item(Item = DBData)\n\n return", "def choose_ltv(self, label):\n tids = self.node_tids[label]\n vals = self.node_vals[label]\n losses = [self.tid_losses_dct[tid] for tid in tids]\n\n # -- try to return the value corresponding to one of the\n # trials that was previously chosen\n tid_set = set(tids)\n for tid in self.best_tids:\n if tid in tid_set:\n idx = tids.index(tid)\n rval = losses[idx], tid, vals[idx]\n break\n else:\n # -- choose a new best idx\n ltvs = sorted(zip(losses, tids, vals))\n best_idx = int(self.rng.geometric(1.0 / self.avg_best_idx)) - 1\n best_idx = min(best_idx, len(ltvs) - 1)\n assert best_idx >= 0\n best_loss, best_tid, best_val = ltvs[best_idx]\n self.best_tids.append(best_tid)\n rval = best_loss, best_tid, best_val\n return rval", "def update_labels(self, nidxs, y):\n\n y = np.array(y, dtype=bool)\n for n, yi in zip(nidxs, y):\n self.node_labels[n] = [self.labels[i] for i, j in enumerate(yi) if j]\n\n return self", "def load_idx_to_label(dataset_name):\n if dataset_name == 'imagenet':\n path = 'https://gist.githubusercontent.com/yrevar/'\n path += '6135f1bd8dcf2e0cc683/raw/'\n path += 'd133d61a09d7e5a3b36b8c111a8dd5c4b5d560ee'\n path += '/imagenet1000_clsid_to_human.pkl'\n idx_to_label = pickle.load(urllib.request.urlopen(path))\n \n elif dataset_name == 'indoor_scenes':\n label_to_idx = {'airport_inside': 0,\n 'bar': 1,\n 'bedroom': 2,\n 'casino': 3,\n 'inside_subway': 4,\n 'kitchen': 5,\n 'livingroom': 6,\n 'restaurant': 7,\n 'subway': 8,\n 'warehouse': 9}\n idx_to_label = {idx: label for label, idx in label_to_idx.items()}\n \n elif dataset_name == 'pubfig10':\n celebs = ['Aaron-Eckhart', 'Adriana-Lima',\n 'Angela-Merkel', 'Beyonce-Knowles', \n 'Brad-Pitt', 'Clive-Owen', \n 'Drew-Barrymore', 'Milla-Jovovich', \n 'Quincy-Jones', 'Shahrukh-Khan']\n idx_to_label = { i: celebs[i] for i in range(len(celebs)) }\n\n elif dataset_name == 'pubfig83':\n celebs = ['adam-sandler', 'alex-baldwin', 'angelina-jolie', 'anna-kournikova', 'ashton-kutcher', 'avril-lavigne',\n 'barack-obama', 'ben-affleck', 'beyonce-knowles', 'brad-pitt', 'cameron-diaz', 'cate-blanchett', 'charlize-theron',\n 'christina-ricci', 'claudia-schiffer', 'clive-owen', 'colin-farell', 'colin-powell', 'cristiano-ronaldo', 'daniel-craig',\n 'daniel-radcliffe', 'david-beckham', 'david-duchovny', 'denise-richards', 'drew-barrymore', 'dustin-hoffman', 'ehud-olmert',\n 'eva-mendes', 'faith-hill', 'george-clooney', 'gordon-brown', 'gwyneth-paltrow', 'halle-berry', 'harrison-ford',\n 'hugh-jackman', 'hugh-laurie', 'jack-nicholson', 'jennifer-aniston', 'jennifer-lopez', 'jennifer-lovehewitt',\n 'jessica-alba', 'jessica-simpson', 'joaquin-phoenix', 'john-travolta', 'julia-roberts', 'jula-stiles', 'kate-moss',\n 'kate-winslet', 'katherine-heigl', 'keira-knightley', 'kiefer-sutherland', 'leonardo-dicaprio', 'lindsay-lohan', 'mariah-carey',\n 'martha-stewart', 'matt-damon', 'meg-ryan', 'meryl-streep', 'michael-bloomberg', 'mickey-rourke', 'miley-cyrus',\n 'morgan-freeman', 'nicole-kidman', 'nicole-richie', 'orlando-bloom', 'reese-witherspoon', 'renee-zellweger', 'ricky-martin',\n 'robert-gates', 'sania-mirza', 'scarlett-johansson', 'shahrukh-khan', 'shakira', 'sharon-stone', 'silvio-berlusconi',\n 'stephen-colbert', 'steve-carell', 'tom-cruise', 'uma-thurman', 'victoria-beckham', 'viggo-mortensen', 'will-smith', 'zac-efron']\n idx_to_label = { i: celebs[i] for i in range(len(celebs)) }\n\n elif dataset_name == 'vggface2':\n path = \"../utils/vggface2_80_to_complete.pkl\"\n with open(path, 'rb') as file:\n idx_to_label = pickle.load(file)\n\n else:\n raise NotImplementedError\n \n return idx_to_label", "def test_label_anonymizing(self):\n class User(Base):\n @property\n def prop_score(self):\n return sum([tag.prop_score for tag in self.tags])\n\n class Tag(Base):\n @property\n def prop_score(self):\n return self.score1 * self.score2\n \n for labeled, labelname in [(True, 'score'), (True, None), (False, None)]:\n clear_mappers()\n \n tag_score = (tags_table.c.score1 * tags_table.c.score2)\n user_score = select([func.sum(tags_table.c.score1 *\n tags_table.c.score2)],\n tags_table.c.user_id == users_table.c.id)\n \n if labeled:\n tag_score = tag_score.label(labelname)\n user_score = user_score.label(labelname)\n else:\n user_score = user_score.as_scalar()\n \n mapper(Tag, tags_table, properties={\n 'query_score': column_property(tag_score),\n })\n\n\n mapper(User, users_table, properties={\n 'tags': relation(Tag, backref='user', lazy=False), \n 'query_score': column_property(user_score),\n })\n\n session = create_session()\n session.save(User(name='joe', tags=[Tag(score1=5.0, score2=3.0), Tag(score1=55.0, score2=1.0)]))\n session.save(User(name='bar', tags=[Tag(score1=5.0, score2=4.0), Tag(score1=50.0, score2=1.0), Tag(score1=15.0, score2=2.0)]))\n session.flush()\n session.clear()\n\n def go():\n for user in session.query(User).all():\n self.assertEquals(user.query_score, user.prop_score)\n self.assert_sql_count(testing.db, go, 1)\n\n\n # fails for non labeled (fixed in 0.5):\n if labeled:\n def go():\n u = session.query(User).filter_by(name='joe').one()\n self.assertEquals(u.query_score, u.prop_score)\n self.assert_sql_count(testing.db, go, 1)\n else:\n u = session.query(User).filter_by(name='joe').one()\n self.assertEquals(u.query_score, u.prop_score)\n \n for t in (tags_table, users_table):\n t.delete().execute()", "def test_dbscan_feature():\n # Parameters chosen specifically for this task.\n # Different eps to other test, because distance is not normalised.\n eps = 0.8\n min_samples = 10\n metric = 'euclidean'\n # Compute DBSCAN\n # parameters chosen for task\n core_samples, labels = dbscan(X, metric=metric,\n eps=eps, min_samples=min_samples)\n\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(labels)) - int(-1 in labels)\n assert_equal(n_clusters_1, n_clusters)\n\n db = DBSCAN(metric=metric)\n labels = db.fit(X, eps=eps, min_samples=min_samples).labels_\n\n n_clusters_2 = len(set(labels)) - int(-1 in labels)\n assert_equal(n_clusters_2, n_clusters)", "def parse_first_database(db, percentage_ids, alignment_lengths):\n #@@@ Try blast parser object\n results = MinimalBlastParser9(db)\n\n #@@@ cogent.util.transform.cartesian_product\n options = [(p,a) for p in percentage_ids for a in alignment_lengths]\n\n best_hits = {}\n for total_queries, (metadata, hits) in enumerate(results):\n fields = [i.strip() for i in metadata['FIELDS'].split(',')]\n name = metadata['QUERY']\n percentage_id = fields.index('% identity')\n bit_score = fields.index('bit score')\n alg_length = fields.index('alignment length')\n evalue = fields.index('e-value')\n subject_id = fields.index('Subject id')\n\n if not hits: \n continue\n\n best_hits[name] = []\n for p,a in options:\n # best bit score\n bbs = 0\n result = None\n\n for h in hits:\n h[percentage_id] = float(h[percentage_id])\n h[alg_length] = float(h[alg_length])\n h[bit_score] = float(h[bit_score])\n\n if h[percentage_id]>=p and h[alg_length]>=a and h[bit_score]>bbs:\n result = { 'a': { 'subject_id': h[subject_id],\n 'percentage_id': h[percentage_id],\n 'bit_score': h[bit_score],\n 'alg_length': int(h[alg_length]),\n 'evalue': float(h[evalue]) },\n 'b': { 'subject_id': None, \n 'bit_score': -1 } }\n bbs = h[bit_score]\n best_hits[name].append(result)\n\n return total_queries+1, best_hits", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n self.num_steps += 1\n # if this is the first step, then just return the seed set\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n\n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n\n pool = Subset(train_data, subsample_idx)\n labelled = Subset(train_data, labelled_idx)\n # get ranking of datapoints\n idx_to_add = self.score(model, pool, labelled)\n\n # choose top scoring datapoints to label\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n\n return new_labelled_idx, new_unlabelled_idx", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n self.num_steps += 1\n # if this is the first step, then just return the seed set\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n \n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n # initialise dataloader. Loads data in order of unlabelled idx\n pool = Subset(train_data, subsample_idx)\n\n # get ranking of unlabelled datapoints from batchBALD\n ranking = self.score(model, pool)\n\n # choose top scoring datapoints to label\n num_query = min(self.num_query, len(subsample_idx))\n idx_to_add = ranking[:num_query] # take in order given\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n return new_labelled_idx, new_unlabelled_idx", "def read_labelmap_vidor(labelmap_file):\n\n labelmap = []\n class_ids = set()\n name = \"\"\n class_id = \"\"\n\n with open('idx_to_pred.pkl', 'rb') as f:\n idx_to_pred = pickle.load(f)\n\n # with PathManager.open(labelmap_file, \"r\") as f:\n # import pdb; pdb.set_trace()\n # for line in f:\n # if line.startswith(\" name:\"):\n # name = line.split('\"')[1]\n # elif line.startswith(\" id:\") or line.startswith(\" label_id:\"):\n # class_id = int(line.strip().split(\" \")[-1])\n # labelmap.append({\"id\": class_id, \"name\": name})\n # class_ids.add(class_id)\n # return labelmap, class_ids\n\n \"\"\"\n (Pdb) categories\n [{'id': 1, 'name': 'bend/bow (at the waist)'}, {'id': 3, 'name': 'crouch/kneel'}, {'id': 4, 'name': 'dance'}, {'id': 5, 'name': 'fall down'}, {'id': 6, 'name': 'get up'}, {'id': 7, 'name': 'jump/leap'}, {'id': 8, 'name': 'lie/sleep'}, {'id': 9, 'name': 'martial art'}, {'id': 10, 'name': 'run/jog'}, {'id': 11, 'name': 'sit'}, {'id': 12, 'name': 'stand'}, {'id': 13, 'name': 'swim'}, {'id': 14, 'name': 'walk'}, {'id': 15, 'name': 'answer phone'}, {'id': 17, 'name': 'carry/hold (an object)'}, {'id': 20, 'name': 'climb (e.g., a mountain)'}, {'id': 22, 'name': 'close (e.g., a door, a box)'}, {'id': 24, 'name': 'cut'}, {'id': 26, 'name': 'dress/put on clothing'}, {'id': 27, 'name': 'drink'}, {'id': 28, 'name': 'drive (e.g., a car, a truck)'}, {'id': 29, 'name': 'eat'}, {'id': 30, 'name': 'enter'}, {'id': 34, 'name': 'hit (an object)'}, {'id': 36, 'name': 'lift/pick up'}, {'id': 37, 'name': 'listen (e.g., to music)'}, {'id': 38, 'name': 'open (e.g., a window, a car door)'}, {'id': 41, 'name': 'play musical instrument'}, {'id': 43, 'name': 'point to (an object)'}, {'id': 45, 'name': 'pull (an object)'}, {'id': 46, 'name': 'push (an object)'}, {'id': 47, 'name': 'put down'}, {'id': 48, 'name': 'read'}, {'id': 49, 'name': 'ride (e.g., a bike, a car, a horse)'}, {'id': 51, 'name': 'sail boat'}, {'id': 52, 'name': 'shoot'}, {'id': 54, 'name': 'smoke'}, {'id': 56, 'name': 'take a photo'}, {'id': 57, 'name': 'text on/look at a cellphone'}, {'id': 58, 'name': 'throw'}, {'id': 59, 'name': 'touch (an object)'}, {'id': 60, 'name': 'turn (e.g., a screwdriver)'}, {'id': 61, 'name': 'watch (e.g., TV)'}, {'id': 62, 'name': 'work on a computer'}, {'id': 63, 'name': 'write'}, {'id': 64, 'name': 'fight/hit (a person)'}, {'id': 65, 'name': 'give/serve (an object) to (a person)'}, {'id': 66, 'name': 'grab (a person)'}, {'id': 67, 'name': 'hand clap'}, {'id': 68, 'name': 'hand shake'}, {'id': 69, 'name': 'hand wave'}, {'id': 70, 'name': 'hug (a person)'}, {'id': 72, 'name': 'kiss (a person)'}, {'id': 73, 'name': 'lift (a person)'}, {'id': 74, 'name': 'listen to (a person)'}, {'id': 76, 'name': 'push (another person)'}, {'id': 77, 'name': 'sing to (e.g., self, a person, a group)'}, {'id': 78, 'name': 'take (an object) from (a person)'}, {'id': 79, 'name': 'talk to (e.g., self, a person, a group)'}, {'id': 80, 'name': 'watch (a person)'}]\n (Pdb) class_whitelist\n {1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 20, 22, 24, 26, 27, 28, 29, 30, 34, 36, 37, 38, 41, 43, 45, 46, 47, 48, 49, 51, 52, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 76, 77, 78, 79, 80}\n \"\"\"", "def label_index2node(label_index, labels):\n hi_pairs, med_pairs = labels\n if label_index < len(hi_pairs):\n return hi_pairs[label_index][0]\n else:\n error_msg = \"there is no node with label \"+str(label_index)\n assert label_index-len(hi_pairs) < len(med_pairs), error_msg\n return med_pairs[label_index-len(hi_pairs)][0]", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n self.num_steps += 1\n # if this is the first step, then just return the seed set\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n\n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n # initialise dataloader. Loads data in order of unlabelled idx\n pool = Subset(train_data, subsample_idx)\n\n # get scores on unlabelled datapoints\n scores = self.score(model, pool)\n # TODO get some metrics on the scores/plot?\n\n # choose top scoring datapoints to label\n num_query = min(self.num_query, len(subsample_idx))\n idx_to_add = np.argsort(scores)[-num_query:]\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n return new_labelled_idx, new_unlabelled_idx", "def parse_second_database(db, best_hits, percentage_ids_other,\n alignment_lengths_other):\n results = MinimalBlastParser9(db)\n\n #@@@ create function to return results\n for metadata, hits in results:\n fields = [i.strip() for i in metadata['FIELDS'].split(',')]\n name = metadata['QUERY']\n percentage_id = fields.index('% identity')\n bit_score = fields.index('bit score')\n alg_length = fields.index('alignment length')\n evalue = fields.index('e-value')\n subject_id = fields.index('Subject id')\n\n if name in best_hits:\n for i,(p,a) in enumerate([(p,a) for p in percentage_ids_other \\\n for a in alignment_lengths_other]):\n if not best_hits[name][i]:\n continue\n\n # best bit score\n bbs = 0\n result = None\n for h in hits:\n h[percentage_id] = float(h[percentage_id])\n h[alg_length] = float(h[alg_length])\n h[bit_score] = float(h[bit_score]) \n if h[percentage_id]>=p and h[alg_length]>=a and h[bit_score]>bbs:\n result = { 'subject_id': h[subject_id],\n 'percentage_id': h[percentage_id],\n 'bit_score': h[bit_score],\n 'alg_length': int(h[alg_length]),\n 'evalue': float(h[evalue]) }\n bbs = h[bit_score]\n if result:\n best_hits[name][i]['b'] = result", "def find_label_by_id(self, _id):\n search = True\n i = 0\n while search:\n if i == len(self.labels):\n break;\n\n if self.labels[i].id == _id:\n return self.labels[i]\n search = False\n #print self.labels[i].id\n i += 1\n if search:\n return None", "def set_label(self, labels_set=None):\n for pos in labels_set:\n self._q_bnn_circ.x(self.outputs[int(pos)])", "def put(self, id):\n context = request.environ.get('context')\n resp = dbapi.netdevices_labels_update(context, id, request.json)\n response = {\"labels\": list(resp.labels)}\n return response, 200, None", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n self.num_steps += 1\n # if this is the first step, then just return the seed set\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n\n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n # initialise dataloader. Loads data in order of unlabelled idx\n pool = Subset(train_data, subsample_idx)\n\n # get scores on unlabelled datapoints\n scores = self.score(model, pool)\n\n # choose top scoring datapoints to label\n num_query = min(self.num_query, len(subsample_idx))\n idx_to_add = np.argsort(scores)[-num_query:]\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n return new_labelled_idx, new_unlabelled_idx", "def _determine_index(self, id):\n\n return bisect.bisect_left(self._max_node_ids, id)", "def one_hot_encoding((uri, label), all_labels):\n labels = [0]*NUM_LABELS\n for i, l in enumerate(all_labels):\n if label == l:\n labels[i] = 1\n yield uri, labels", "def rdf_update_labels(rdf, node):\n final_list = []\n for i in node.get_labels():\n # print(i)\n final_list += rdf_get_branch(rdf, i)\n for i in final_list:\n node.add_label(i)", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n # if this is the first step, then just return the seed set\n self.num_steps += 1\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n\n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n # initialise dataloader. Loads data in order of unlabelled idx\n pool = Subset(train_data, subsample_idx)\n # get scores on unlabelled datapoints\n scores = self.score(model, pool)\n\n # choose top scoring datapoints to label\n num_query = min(self.num_query, len(subsample_idx))\n idx_to_add = np.argsort(scores)[-num_query:]\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n return new_labelled_idx, new_unlabelled_idx", "def update_labels(self, frame_num, labels):\n assert self.labels_exist(frame_num)\n assert not self._is_frozen[frame_num]\n assert self._frame_labels[frame_num].shape == self._frame_labels[frame_num].shape\n self._frame_labels[frame_num] = labels\n if labels.numel() > 0:\n self._highest_instance_id = max(self._highest_instance_id, labels.max().item())\n return self._highest_instance_id", "def node_assignment_bipartite(edge_index: nb.int64[:,:],\n edge_label: nb.int64[:],\n primaries: nb.int64[:],\n n: nb.int64) -> nb.int64[:]:\n group_ids = np.arange(n, dtype=np.int64)\n others = [i for i in range(n) if i not in primaries]\n for i in others:\n inds = edge_index[:,1] == i\n if np.sum(inds) == 0:\n continue\n indmax = np.argmax(edge_label[inds])\n group_ids[i] = edge_index[inds,0][indmax]\n\n return group_ids", "def update_db(bus_id=def_id):\n # Instantiate the SQL class for the business data we will be pulling\n sql = Sql(bus_id)\n\n # Check if we have previously analyzed the requested business\n # If not, pull the raw data and processing the data\n if sql.check() is not False:\n print \"Already in database!\"\n return\n\n # Get business data (name, country, etc) from Yelp API\n # Limited to 25,000 Yelp API calls per day\n # There are over 4 million reviews and over 140,000 businesses in database\n while True:\n try:\n bus_info = Yelp(bus_id)\n break\n except ValueError:\n pass\n except YelpAPI.YelpAPIError:\n return\n\n # Grab review text from SQL database\n sql.pull_reviews()\n\n # Use our trained XGBoost Classifier and TFIDF vectorizer\n # to determine whether each review is \"Favorable\" or \"Unfavorable\"\n model.predict(sql.reviews)\n\n # Conduct sentiment analysis and evaluate word counts in order to\n # \"penalize\" the weighting of reviews that don't fit the threshold\n nlp = Nlp(sql.reviews, sql.stars, model.preds,\n bus_info.name, bus_info.country, bus_info.city)\n\n # Assign variables from all the objects attributes we created\n # and then input them into a tuple.\n # The tuple is used to populate the SQL database for faster lookup of\n # our analysis at a later time\n # The tuple is also used to populate our dictionary which will be\n # used for variables that will be rendered on our website\n name = nlp.name\n city = nlp.city\n country = nlp.country\n old_rating = int(100 * nlp.stars_avg / 5)\n new_rating = int(nlp.new_rating * 100)\n rev_count = len(sql.reviews)\n count_5 = sql.stars.count(5)\n count_4 = sql.stars.count(4)\n count_3 = sql.stars.count(3)\n count_2 = sql.stars.count(2)\n count_1 = sql.stars.count(1)\n fav_count = (model.preds == 1).sum()\n unfav_count = (model.preds == 0).sum()\n avg_wts = int(100*sum(nlp.avg_wts) / len(nlp.avg_wts))\n bus_tup = (bus_id, name, city, country, old_rating, new_rating,\n rev_count, count_5, count_4, count_3, count_2, count_1,\n fav_count, unfav_count, avg_wts)\n sql.insert(bus_tup)\n print bus_tup", "def assign_labels(self, data):\n data[self.label] = self.labeler(data.index.values)", "def feature_subset(self,node,db,labels,ids):\n return None", "def __init__(self, database, session, max_suggestions=5, max_courses=30, cache_mult=4):\n\n self._nodes = dict() # dict with courseid keys, CourseNode vals\n self._max_suggestions = max_suggestions\n self._max_courses = max_courses\n self._cache_mult = cache_mult\n \n db = database\n\n # Get dict mapping courses to unitary weights\n unitary_dict = db.get_unitary_dict(session)\n\n # Get dict mapping courses to adjacent courses and weights\n edge_dict = db.get_edges_dict(session)\n\n # Create CourseNodes\n for courseid in unitary_dict:\n courseNode = CourseGraph.CourseNode(courseid=courseid, edges=dict(), popularity=unitary_dict[courseid])\n self._nodes[courseid] = courseNode\n\n\n # Create course edge dict for each CourseNode\n for courseid in edge_dict:\n node = self._nodes[courseid] # get node of interest\n adj_courses = edge_dict[courseid] # get inner dict {otherid: edge_weight}\n for otherid in adj_courses:\n other_node = self._nodes[otherid]\n node.addEdge(other_node, adj_courses[otherid])", "def create_tree(data_set, labels):\n labels = copy.copy(labels)\n class_list = [ eg[-1] for eg in data_set]\n # if all classes are same\n if class_list.count(class_list[0]) == len(class_list):\n return class_list[0]\n # only have class feature\n if len(data_set[0]) == 1:\n return majority_cnt(class_list)\n best_feat = choose_best_feature(data_set)\n best_feat_cls = labels[best_feat]\n node = {best_feat_cls: {}}\n del(labels[best_feat])\n feat_values = [eg[best_feat] for eg in data_set]\n unique_values = set(feat_values)\n for value in unique_values:\n sub_cls = labels[:]\n sub_ds = splite_dataset(data_set, best_feat, value)\n node[best_feat_cls][value] = create_tree(sub_ds, sub_cls)\n\n return node", "def test_dbscan_similarity():\n # Parameters chosen specifically for this task.\n eps = 0.15\n min_samples = 10\n # Compute similarities\n D = distance.squareform(distance.pdist(X))\n D /= np.max(D)\n # Compute DBSCAN\n core_samples, labels = dbscan(D, metric=\"precomputed\",\n eps=eps, min_samples=min_samples)\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)\n\n assert_equal(n_clusters_1, n_clusters)\n\n db = DBSCAN(metric=\"precomputed\")\n labels = db.fit(D, eps=eps, min_samples=min_samples).labels_\n\n n_clusters_2 = len(set(labels)) - int(-1 in labels)\n assert_equal(n_clusters_2, n_clusters)", "def boostedTrees(train, \n labels, \n test, \n column_names = None, \n target = 'target',\n max_iterations = 200, \n min_child_weight = 5, \n step_size = 0.2, \n max_depth = 10, \n class_weights = None, \n min_loss_reduction = 0.5,\n verbose = 0,\n outlier_frac=0.0,\n outlier_method='EE',\n rescale_pred=False):\n if outlier_frac > 0:\n train, labels = filter_data(train, labels, cut_outlier_frac = outlier_frac, method = outlier_method, use_caching=False) # remove ourliers\n if column_names is None:\n column_names = range(np.shape(train)[1])\n target = 'target'\n newTrain = np.vstack((train.T, labels)).T\n pdTrain = pd.DataFrame(newTrain, columns = np.append(column_names,target))\n trainFrame = gl.SFrame(pdTrain)\n del newTrain, pdTrain\n pdTest = pd.DataFrame(test, columns = column_names)\n testFrame = gl.SFrame(pdTest)\n del pdTest\n model = gl.boosted_trees_classifier.create(trainFrame, \n target=target, \n max_iterations=max_iterations, \n min_child_weight=min_child_weight,\n step_size = step_size,\n max_depth = max_depth,\n class_weights = class_weights,\n min_loss_reduction = min_loss_reduction,\n verbose = verbose)\n preds = model.predict_topk(testFrame, output_type='probability', k=9)\n preds['id'] = preds['id'].astype(int)\n #some hacky dataframe magic, creates Nx10 matrix (id in first column)\n preds = preds.unstack(['class', 'probability'], 'probs').unpack('probs', '').sort('id')\n\n newPreds = preds.to_dataframe().values\n newPreds = newPreds[:,1:] #remove the id column\n del preds, model\n \n assert np.shape(newPreds)[0] == np.shape(test)[0], \"conversion failed somewhere, size doesn't match\"\n \n if rescale_pred:\n newPreds = rescale_prior(newPreds, np.bincount(labels))\n return newPreds", "def encode_target(_id):\n con, cur = connect_to_postgres()\n cur.execute(\"\"\"SELECT _id, income_label FROM adult where _id = {}\"\"\".format(_id))\n this_id, income_label = cur.fetchone()\n assert this_id == _id\n greater_than_50k = (income_label == ' >50K')\n cur.execute(\"\"\"\n BEGIN;\n UPDATE adult\n SET target = {}\n WHERE _id = {};\n COMMIT;\n \"\"\".format(greater_than_50k, _id))\n con.close()", "def get_label_set(corpusID):\n existing_corpus = DBcorpus.query.get_or_404(corpusID)\n corpus_data = fix_corpus_format(CorpusSchema().dump(existing_corpus).data)\n\n results = []\n for label in labels_set(existing_corpus):\n results.append(LabelSchema().dump(label).data)\n\n return {\"corpus\": corpus_data, \"labels\": results }, 200", "def load_labels(self, labels):\n self.labels = pd.DataFrame(labels, index=[\"label\"]).T", "def update_best_iteration_results(self, **kwargs):\n \n column_names = self.sql_model_instance.graphs.graph_column_names\n \n for key, value in kwargs.iteritems():\n \n if key in column_names:\n \n setattr(self.sql_model_instance, key, value)\n \n self.db.session.add(self.sql_model_instance)\n self.db.session.commit()", "def get_by_label(self, label, table, verbose=True):\n assert (self.connected)\n \n theId = -1\n GET_BY_LABEL_COMMAND = \"SELECT id,label FROM {0} WHERE samples.label = \\\"{1}\\\"\".format(table, label)\n \n \n self.cursor.execute(GET_BY_LABEL_COMMAND)\n \n for row in self.cursor:\n theId = row[0]\n break\n \n if verbose and theId != -1: \n print(\"Item with id {0} and label '{1}' retrieved.\".format(theId, label))\n elif verbose: \n print(\"No item in the table '{0}' with the label '{1}' was found.\".format(table, label))\n \n return int(theId)", "def _db_store(self, labels: Sequence[Tuple[int, np.ndarray]], table: str) -> None:\r\n # Labels are expected to be\r\n # [\r\n # (class, points),\r\n # (class, points)\r\n # .\r\n # .\r\n # .\r\n # ]\r\n # Where points are np.arrays\r\n # There should also always be one fish in the scene => len(labels) >= 1\r\n\r\n n_points = np.prod(labels[0][1].shape)\r\n\r\n gen = ((self.n, class_, *points.ravel().round(3)) for class_, points in labels)\r\n\r\n # First two \"?\" are for image id and class respectively, rest are for points\r\n sql_command = (\r\n f'INSERT INTO {table} VALUES {(\"?\",\"?\",*[\"?\" for i in range(n_points)])}'\r\n ).replace(\"'\", \"\")\r\n\r\n self.cursor.executemany(sql_command, gen)", "def run(self, path_db: Path):\n found_files = self._file_loader.load(path_db)\n for key, value in found_files.items():\n self._db_ranking.save(key, value)", "def example(data_dir=None):\n if data_dir is None:\n raise ValueError(\"data_dir is not set.\")\n\n # Load text data\n (x_train_text, y_train_id), (x_test_text, y_test_id), (label_to_id, id_to_label) = \\\n load_text_and_label_id_from_files(Path(data_dir), test_dataset_ratio=0.2, errors='ignore')\n\n num_files_train = len(x_train_text)\n num_labels_train = len(set(y_train_id))\n\n num_files_test = len(x_test_text)\n num_labels_test = len(set(y_test_id))\n\n log.info(\"Number of labels found in training dataset: %d\" % (num_labels_train))\n log.info(\"Number of documents found in training dataset: %d\" % (num_files_train))\n\n log.info(\"Number of labels found in test dataset: %d\" % (num_labels_test))\n log.info(\"Number of documents found in test dataset: %d\" % (num_files_test))\n\n tfid = TfidfVectorizer() # More weight for words appearing frequently within fewer docs instead of in many docs.\n\n x_train_vectors = tfid.fit_transform(x_train_text) # Convert training dataset to tfid matrix\n x_test_vectors = tfid.transform(x_test_text) # Convert test set to tfid matrix\n\n classifier = MultinomialNB(alpha=.01) # Naive Bayes classifier, per doc alpha accounts for features missing in training samples\n classifier.fit(x_train_vectors, y_train_id)\n\n y_hat_test = classifier.predict(x_test_vectors) # y_hat contains list of string labels\n\n # Test against test dataset\n total_size = y_hat_test.shape[0]\n y_test = y_test_id\n\n matched_indices = (y_hat_test == y_test)\n\n matched_array = y_test[matched_indices]\n matched_count = matched_array.shape[0]\n log.info(\n \"Matched: %d out of Total: %d (%f percent)\" % (matched_count, total_size, matched_count * 100 / total_size))", "def load_datasets(args, train_test_split=0):\n logger.info(\"Loading data...\")\n df_data_path = \"./data/df_data.pkl\"\n graph_path = \"./data/text_graph.pkl\"\n if not os.path.isfile(df_data_path) or not os.path.isfile(graph_path):\n logger.info(\"Building datasets and graph from raw data... Note this will take quite a while...\")\n generate_text_graph(args.train_data, args.infer_data, args.max_vocab_len)\n df_data = load_pickle(\"df_data.pkl\")\n G_dict = load_pickle(\"text_graph.pkl\")\n G = G_dict[\"graph\"]\n \n if train_test_split == 0:\n infer_idx_start = G_dict[\"infer_idx_start\"]\n del G_dict\n \n logger.info(\"Building adjacency and degree matrices...\")\n A = nx.to_numpy_matrix(G, weight=\"weight\"); A = A + np.eye(G.number_of_nodes())\n degrees = []\n for d in G.degree(weight=None):\n if d == 0:\n degrees.append(0)\n else:\n degrees.append(d[1]**(-0.5))\n degrees = np.diag(degrees)\n X = np.eye(G.number_of_nodes()) # Features are just identity matrix\n A_hat = degrees@A@degrees\n f = X # (n X n) X (n X n) x (n X n) X (n X n) input of net\n \n if train_test_split == 1:\n logger.info(\"Splitting labels for training and inferring...\")\n ### stratified test samples\n test_idxs = []\n for b_id in df_data[\"label\"].unique():\n dum = df_data[df_data[\"label\"] == b_id]\n if len(dum) >= 4:\n test_idxs.extend(list(np.random.choice(dum.index, size=round(args.test_ratio*len(dum)), replace=False)))\n save_as_pickle(\"test_idxs.pkl\", test_idxs)\n # select only certain labelled nodes for semi-supervised GCN\n selected = []\n for i in range(len(df_data)):\n if i not in test_idxs:\n selected.append(i)\n save_as_pickle(\"selected.pkl\", selected)\n else:\n logger.info(\"Preparing training labels...\")\n test_idxs = [i for i in range(infer_idx_start, len(df_data))]\n selected = [i for i in range(infer_idx_start)]\n save_as_pickle(\"selected.pkl\", selected)\n save_as_pickle(\"test_idxs.pkl\", test_idxs)\n \n f_selected = f[selected]; f_selected = torch.from_numpy(f_selected).float()\n f_not_selected = f[test_idxs]; f_not_selected = torch.from_numpy(f_not_selected).float()\n labels_selected = list(df_data.loc[selected]['label'])\n if train_test_split == 1: \n labels_not_selected = list(df_data.loc[test_idxs]['label'])\n else:\n labels_not_selected = []\n \n f = torch.from_numpy(f).float()\n save_as_pickle(\"labels_selected.pkl\", labels_selected)\n save_as_pickle(\"labels_not_selected.pkl\", labels_not_selected)\n logger.info(\"Split into %d train and %d test lebels.\" % (len(labels_selected), len(labels_not_selected)))\n return f, X, A_hat, selected, labels_selected, labels_not_selected, test_idxs", "def add_labels(self, frame_nums, labels):\n assert all([self._frame_labels[t] is None for t in frame_nums])\n for t, labels_t in zip(frame_nums, labels):\n self._frame_labels[t] = labels_t\n if labels_t.numel() > 0:\n self._highest_instance_id = max(self._highest_instance_id, labels_t.max().item())\n\n return self._highest_instance_id + 1", "def _select_samples_to_label(self, data, trainer_cls, session):\n # Select the candidate samples for self-labeling, and make predictions.\n # Remove the validation samples from the unlabeled data, if there, to avoid\n # self-labeling them.\n indices_unlabeled = data.get_indices_unlabeled()\n val_ind = set(data.get_indices_val())\n indices_unlabeled = np.asarray(\n [ind for ind in indices_unlabeled if ind not in val_ind])\n predictions = trainer_cls.predict(\n session, indices_unlabeled, is_train=False)\n\n # Select most confident nodes. Compute confidence and most confident label,\n # which will be used as the new label.\n predicted_label = np.argmax(predictions, axis=-1)\n confidence = predictions[np.arange(predicted_label.shape[0]),\n predicted_label]\n # Sort from most confident to least confident.\n indices_sorted = np.argsort(confidence)[::-1]\n indices_unlabeled = indices_unlabeled[indices_sorted]\n confidence = confidence[indices_sorted]\n predicted_label = predicted_label[indices_sorted]\n\n # Keep only samples that have at least min_confidence_new_label confidence.\n confident_indices = np.argwhere(\n confidence > self.min_confidence_new_label)[:, 0]\n if confident_indices.shape[0] == 0:\n logging.info(\n 'No unlabeled nodes with confidence > %.2f. '\n 'Skipping self-labeling...', self.min_confidence_new_label)\n selected_samples = np.zeros((0,), dtype=np.int64)\n selected_labels = np.zeros((0,), dtype=np.int64)\n return selected_samples, selected_labels\n\n if data.keep_label_proportions:\n # Pick the top num_samples_to_label most confident nodes, while making\n # sure the ratio of the labels are kept.\n # First keep only nodes which achieve the min required confidence.\n num_confident = len(confident_indices)\n nodes_with_min_conf = indices_unlabeled[:num_confident]\n labels_with_min_conf = predicted_label[:num_confident]\n # Out of these, select the desired number of samples per class,\n # according to class proportions.\n selected_samples = []\n selected_labels = []\n for label, prop in data.label_prop.items():\n num_samples_to_select = int(prop * self.num_samples_to_label)\n label_idxs = np.where(labels_with_min_conf == label)[0]\n if len(label_idxs) <= num_samples_to_select:\n # Select all available samples labeled with this label.\n selected_samples.append(nodes_with_min_conf[label_idxs])\n selected_labels.append(labels_with_min_conf[label_idxs])\n elif num_samples_to_select > 0:\n # Select the first ones, since they are sorted by confidence.\n selected_samples.append(\n nodes_with_min_conf[label_idxs][:num_samples_to_select])\n selected_labels.append(\n labels_with_min_conf[label_idxs][:num_samples_to_select])\n selected_samples = np.concatenate(selected_samples)\n selected_labels = np.concatenate(selected_labels)\n else:\n # Pick the top num_samples_to_label most confident nodes,\n # irrespective of their labels.\n idx = np.amax(confident_indices)\n max_idx = min(self.num_samples_to_label - 1, idx)\n selected_samples = indices_unlabeled[:max_idx + 1]\n selected_labels = predicted_label[:max_idx + 1]\n\n return selected_samples, selected_labels", "def __init__(self, list_IDs, labels, batch_size=32, shuffle=True):\n self.dim = len(labels[labels.index2word[0]])\n self.batch_size = batch_size\n self.labels = labels\n self.list_IDs = list_IDs\n self.shuffle = shuffle\n self.on_epoch_end()\n\n # set up the encoder with all the possible chars\n self.eosTag = '#' # because it is not contained in the corpus\n self.enc = OneHotEncoder(handle_unknown='ignore')\n i = 0\n chars = []\n for word in labels.index2word:\n i += 1\n if i == 900: # the first 900 word should contain all possible chars\n break\n else:\n for c in word:\n if [c] not in chars:\n chars.append([c])\n chars.append([self.eosTag])\n self.hot_enc_len = len(chars)\n self.enc.fit(chars)\n self.word_length = 10", "def __get_labels(self):\n\n uncertain_pairs_index = self.__query_pairs()\n\n to_label_raw = self.all_raw_data.loc[uncertain_pairs_index]\n to_label_features = self.all_features.loc[uncertain_pairs_index]\n\n # Remove uncertain pairs from the candidate pool\n self.all_features.drop(uncertain_pairs_index, axis=0, inplace=True)\n\n labels_list = []\n for index, row in to_label_raw.iterrows():\n\n print(\"\\n{0:30}\\t{1}\\n{2:30}\\t{3}\\n{4:30}\\t{5}\\n{6:30}\\t{7}\\n\".format(row.name_a, row.name_b,\n row.address_a, row.address_b,\n row.zip_a, row.zip_b,\n row.city_a, row.city_b))\n\n\n label = self.__user_input(\"Is this a match? (0/1)\")\n labels_list.append((index, label))\n\n labels_index = [index for index, label in labels_list]\n labels_values = [label for index, label in labels_list]\n\n # Create dataframe with index and labels\n add_labels = pd.Series(labels_values, index=labels_index, name='label')\n\n # Union the new training set to the full training set\n self.labeled_features = pd.concat([self.labeled_features, to_label_features], axis = 0, ignore_index=False)\n self.labeled_labels = pd.concat([self.labeled_labels, add_labels], axis = 0, ignore_index=False)\n\n return self", "def load_imbd_dataset(path=\"imdb.pkl\", nb_words=None, skip_top=0,\n maxlen=None, test_split=0.2, seed=113,\n start_char=1, oov_char=2, index_from=3):\n from six.moves import cPickle\n import gzip\n # from ..utils.data_utils import get_file\n from six.moves import zip\n import numpy as np\n from six.moves import urllib\n\n url = 'https://s3.amazonaws.com/text-datasets/'\n def download_imbd(filename):\n if not os.path.exists(filename):\n print('Downloading ...')\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n return filename\n\n filename = download_imbd(path)\n # path = get_file(path, origin=\"https://s3.amazonaws.com/text-datasets/imdb.pkl\")\n\n if filename.endswith(\".gz\"):\n f = gzip.open(filename, 'rb')\n else:\n f = open(filename, 'rb')\n\n X, labels = cPickle.load(f)\n f.close()\n\n np.random.seed(seed)\n np.random.shuffle(X)\n np.random.seed(seed)\n np.random.shuffle(labels)\n\n if start_char is not None:\n X = [[start_char] + [w + index_from for w in x] for x in X]\n elif index_from:\n X = [[w + index_from for w in x] for x in X]\n\n if maxlen:\n new_X = []\n new_labels = []\n for x, y in zip(X, labels):\n if len(x) < maxlen:\n new_X.append(x)\n new_labels.append(y)\n X = new_X\n labels = new_labels\n if not X:\n raise Exception('After filtering for sequences shorter than maxlen=' +\n str(maxlen) + ', no sequence was kept. '\n 'Increase maxlen.')\n if not nb_words:\n nb_words = max([max(x) for x in X])\n\n # by convention, use 2 as OOV word\n # reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)\n if oov_char is not None:\n X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X]\n else:\n nX = []\n for x in X:\n nx = []\n for w in x:\n if (w >= nb_words or w < skip_top):\n nx.append(w)\n nX.append(nx)\n X = nX\n\n X_train = np.array(X[:int(len(X) * (1 - test_split))])\n y_train = np.array(labels[:int(len(X) * (1 - test_split))])\n\n X_test = np.array(X[int(len(X) * (1 - test_split)):])\n y_test = np.array(labels[int(len(X) * (1 - test_split)):])\n\n return X_train, y_train, X_test, y_test", "def evaluate(predictions, ids, label_identifiers):\n\n labels = []\n #For every prediction\n for i in range(len(predictions)):\n sentence_predictions = predictions[i]\n id_sequence = ids[i]\n sequence_labels = []\n counter = 0\n #For every predicted token\n for j in range(len(id_sequence)):\n word_prediction = sentence_predictions[j]\n id = id_sequence[j]\n #Take only the lemmas that have to be disambiguated\n if not id == '0':\n #Extract the identifiers of the sensekeys associated to the lemma\n indexes = label_identifiers[i][counter]\n new_predictions = []\n #Check if the identifier is a number \n for elem in indexes:\n try:\n index = int(elem)\n new_predictions.append(predictions[i][j][index])\n except ValueError:\n #If is not, MFS was applied\n new_predictions.append(elem)\n #Do the argmax on the extracted prediction indexes\n argmax = np.argmax(new_predictions)\n label = label_identifiers[i][counter][argmax]\n sequence_labels.append(label)\n counter += 1\n labels.append(sequence_labels)\n\n return labels", "def _relabel(labels, minval=0, bgval=None):\n\n labels = np.unique(labels, return_inverse=True)[-1] + minval\n if bgval is not None:\n labels[labels == minval] = bgval\n return labels", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def index_labtests(cnxdict, engine, labtestlist):\n for currtbl in labtestlist:\n currtbl = currtbl.replace(' ', '_')\n mycnx = connect_to_mysql_db_prod('caisismysql')\n mycnx['sql'] = \"\"\"\n ALTER TABLE `caisis`.`{0}`\n CHANGE COLUMN `LabTestId` `LabTestId` BIGINT(20) NOT NULL ,\n ADD PRIMARY KEY (`LabTestId`);\n ALTER TABLE `caisis`.`{0}`\n ADD INDEX `patientid` (`PatientId` ASC);\n ALTER TABLE `caisis`.`{0}`\n ADD INDEX `labdate` (`LabDate` ASC);\n ALTER TABLE `caisis`.`{0}`\n ADD FULLTEXT INDEX `labtest` (`LabTest` ASC);\n \"\"\".format(currtbl)\n dosqlexecute(mycnx)", "def load_data_and_labels(filename, dataset_name,is_train):\n label_count={}\n parameter_file = \"./parameters.json\"\n params = json.loads(open(parameter_file).read())\n if dataset_name == 'ag_news' or dataset_name == 'dbpedia' or dataset_name == 'sogou_news' or dataset_name == 'amazon_review_full' or dataset_name == 'amazon_review_polarity' :\n df = pd.read_csv(filename, names=['label', 'title', 'text'], dtype={'title': object,'text': object})\n selected = ['label', 'title','text','too_short','to_drop']\n\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[2]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df[selected[2]].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df[selected[2]].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n \n elif dataset_name == 'yelp_review_full' or dataset_name == 'yelp_review_polarity':\n df = pd.read_csv(filename, names=['label','text'], dtype={'text': object})\n selected = ['label','text','too_short','to_drop']\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[1]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['text'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['text'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n elif dataset_name == 'yahoo_answers':\n df = pd.read_csv(filename, names=['label', 'title', 'content','answer'], dtype={'title': object,'answer': object,'content': object})\n selected = ['label', 'title','content','answer','too_short','to_drop'] \n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['temp'] = df[['content','answer']].apply(lambda x: ' '.join(str(v) for v in x), axis=1)\n df['too_short']= df['temp'].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['temp'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['temp'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n return x_raw, y_raw, df, labels", "def trainModelOnSubset(self, data, attributes, class_label, indices=None, startNode = None, max_depth=-1):\n\t\t\n\t\t#print (data)\n\t\t#print(\"_________---_________\")\n\t\t\n\t\tif startNode == None:\n\t\t\tstartNode = self.root\n\t\t\n\t\t#print(startNode.name)\n\t\t\n\t\t#print(\"Attributes: \" + str(attributes))\n\t\t\n\t\tnew_attributes=attributes[:]\n\t\t\n\t\tif indices == None:\n\t\t\tsubset = data\t\t\t\n\t\telse:\n\t\t\tsubset = data.loc[indices]\n\t\t\n\t\t\n\t\tif (len(new_attributes) > 0 and not subset.empty):\n\t\t\tbestAttribute_index = startNode.trainNode(data, new_attributes, class_label, indices)\n\t\t\tbestAttribute = new_attributes[bestAttribute_index]\n\t\t\tnew_attributes.pop(bestAttribute_index)\n\t\t\tvalues = attributes_full[bestAttribute]\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\tfor i in values:\n\t\t\t\tnode = Node(i)\n\t\t\t\tstartNode.add_child(node)\n\t\t\t\t#print(node.get_name())\n\t\t\t\t#print(bestAttribute)\n\t\t\t\tindex = list(subset.index[subset[bestAttribute] == i])\n\t\t\t\tself.trainModelOnSubset(subset, new_attributes, class_label, index, node)\n\t\t\t\n\t\telse:\n\t\t\tbestAttribute=class_label\n\t\t\tvalues = attributes_full[bestAttribute]\n\t\t\tsublist = data[bestAttribute].tolist()\n\t\t\ttop = 0\n\t\t\tname = \"test\"\n\t\t\tfor i in values:\n\t\t\t\tif sublist.count(i) > top:\n\t\t\t\t\tname = i\n\t\t\t\t\ttop = sublist.count(i)\n\t\t\tnode = Node(name)\n\t\t\tstartNode.add_child(node)", "def search_for_label(self, label_id: str):\n return search_for_label(self.api_key, label_id)", "def labels(self, labels):\n self._labels = labels", "def select(self, labels):\n indexs = []\n \n for i in range(len(labels)):\n indexs.append(self.column_labels.index(labels[i]))\n new_rows = []\n for x in self.rows:\n new_row = []\n for index in indexs:\n new_row.append(x[index])\n new_rows.append(new_row)\n\n\n\n new_Table = T88ble(new_rows, labels)\n\n return new_Table", "def import_labels():\n\n dict_labels = df.set_index('id').to_dict()['breed']\n unique_labels = sorted(list(set(dict_labels.values())))\n for index, label in dict_labels.items():\n dict_labels[index] = unique_labels.index(label)\n return dict_labels, unique_labels", "def predict(self, test_vectors):\n # Calculate the best matching label for each node\n if not self.ready_for_prediction:\n # totals = sum((node.labels for node in self.codebook), Counter())\n for node in self.codebook:\n # Remove unlabeled hits\n try:\n node.labels.pop(None)\n except KeyError:\n pass\n # # Take into account small clusters. A frequency approach\n # freq_counter = Counter({label: count / totals[label]\n # for label, count in node.labels.items()})\n # if len(freq_counter) > 0:\n # node.label = freq_counter.most_common(1)[0][0]\n # else:\n # node.label = ''\n # Or ignore small clusters and just aim for accuracy\n if len(node.labels) > 0:\n node.label = node.labels.most_common(1)[0][0]\n else:\n node.label = ''\n self.ready_for_prediction = True\n\n # Return the label of the best matching unit for the given test_vectors\n if isinstance(test_vectors, collections.Iterable):\n return [self.bmu(test_vector).label for test_vector in test_vectors]\n else:\n return self.bmu(test_vectors).label", "def update(self, labels, preds):\n #labels, preds = check_label_shapes(labels, preds, True)\n\n for label, pred_label in zip(labels, preds):\n if len(pred_label.shape) > 2:\n pred_label = mx.nd.reshape(pred_label, shape=[-1, pred_label.shape[-1]])\n label = mx.nd.reshape(pred_label, shape=[-1])\n\n # Using argpartition here instead of argsort is safe because\n # we do not care about the order of top k elements. It is\n # much faster, which is important since that computation is\n # single-threaded due to Python GIL.\n pred_label = np.argpartition(pred_label.asnumpy().astype('float32'), -self.top_k)\n label = label.asnumpy().astype('int32')\n check_label_shapes(label, pred_label)\n num_dims = len(pred_label.shape)\n mask = (label != self.ignore_label).astype(np.int32)\n num_samples = mask.sum()\n\n num_classes = pred_label.shape[1]\n top_k = min(num_classes, self.top_k)\n for j in range(top_k):\n num_correct = ((pred_label[:, num_classes - 1 - j].flat == label.flat) * mask).sum()\n self.sum_metric += num_correct\n self.global_sum_metric += num_correct\n\n self.num_inst += num_samples\n self.global_num_inst += num_samples", "def assign_labels(basename, data_folder=Path(\"/data\"), verbose=False):\n urls_path = data_folder / \"graphs\" / basename / (basename + \".urls\")\n assert urls_path.exists(), \"Urls file not found!\"\n # check if labels dict already existing\n labels_path = data_folder / \"models\" / basename / (\"labels.json\")\n if labels_path.exists():\n print(\"Labels json already existing.\")\n else:\n print(\"Building labels json..\")\n # count number of lines in file\n num_lines = sum(1 for line in urls_path.open())\n labels_array = [0] * num_lines\n with urls_path.open() as f:\n clusters_count = Counter()\n labels = dict()\n class_index = 0\n for pos, line in enumerate(tqdm(f, total=num_lines)):\n # extract the TLD\n complete_domain = tldextract.extract(line).suffix\n # we only need the country domain now\n domain = complete_domain.split(\".\")[-1]\n # if domain unseen add it to class indices\n if domain not in labels:\n class_index += 1\n labels[domain] = class_index\n # assign label and add it to array\n y = labels[domain]\n labels_array[pos] = y\n clusters_count[domain] += 1\n labels_data = dict()\n # labels_data['labels'] = labels # do we really need this?\n labels_data['labels'] = {int(v): k for k, v in labels.items()}\n labels_data['count'] = clusters_count\n labels_data['array'] = labels_array\n if verbose:\n print(\"Found following labels:\")\n print(labels)\n with open(labels_path, 'w', encoding='utf-8') as outfile:\n json.dump(labels_data, outfile, ensure_ascii=False, indent=4)\n return labels_path", "def mnist_noniid(dataset, num_users):\n # num_shards, num_imgs = 2*num_users, int(dataset.data.size()[0]/2/num_users) # choose two number from a set with num_shards, each client has 2*num_imgs images\n # idx_shard = [i for i in range(num_shards)]\n # dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}\n # idxs = np.arange(dataset.data.size()[0])\n # labels = dataset.train_labels.numpy()\n #\n # # sort labels\n # idxs_labels = np.vstack((idxs, labels))\n # idxs_labels = idxs_labels[:,idxs_labels[1,:].argsort()]\n # idxs = idxs_labels[0,:]\n #\n # # divide and assign\n # for i in range(num_users):\n # rand_set = set(np.random.choice(idx_shard, 2, replace=False))\n # idx_shard = list(set(idx_shard) - rand_set)\n # for rand in rand_set:\n # dict_users[i] = np.concatenate((dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]), axis=0)\n # return dict_users\n\n label_list = dataset.targets.numpy()\n minLabel = min(label_list)\n numLabels = len(dataset.classes)\n\n dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}\n for i in range(0, len(label_list)):\n tmp_target_node = int((label_list[i] - minLabel) % num_users)\n if num_users > numLabels:\n tmpMinIndex = 0\n tmpMinVal = math.inf\n for n in range(0, num_users):\n if (n) % numLabels == tmp_target_node and len(dict_users[n]) < tmpMinVal:\n tmpMinVal = len(dict_users[n])\n tmpMinIndex = n\n tmp_target_node = tmpMinIndex\n dict_users[tmp_target_node] = np.concatenate((dict_users[tmp_target_node], [i]), axis=0)\n return dict_users", "def gen_labels(self, nidxs=None, condense_labels=False):\n\n if nidxs is None:\n nidxs = self.nidx_train\n\n y = []\n\n for r in nidxs:\n y.append(self.node_labels[r])\n\n if condense_labels:\n # This should be improved, since this will fail if there are labels with exactly the same number of samples\n # Current solution use a bit of noise to minimize conflicts/favors\n y = self.encode_labels(y)\n lab_weights = 1. - np.mean(y, axis=0)\n noise = np.random.normal(loc=0, scale=0.0001, size=np.shape(y))\n y_condensed = np.argmax(minmax_scale(y * lab_weights + noise, axis=1), axis=1)\n return y_condensed\n\n return self.encode_labels(y)", "def labels(self, labels):\n\n self._labels = labels", "def labels(self, labels):\n\n self._labels = labels", "def labels(self, labels):\n\n self._labels = labels", "def labels(self, labels):\n\n self._labels = labels", "def labels(self, labels):\n\n self._labels = labels", "def label_7m(predictor, zhibo7m):\n for item_ in zhibo7m.find():\n try:\n live_texts_ = item_[\"content\"][\"textFeed\"]\n except Exception as e:\n zhibo7m.delete_one({\"_id\": item_['_id']})\n print(\"delete error id: {}\".format(item_[\"_id\"]))\n print(e)\n for l_index_, l_item_ in enumerate(live_texts_):\n l_item_[\"p_label\"] = predictor.predict(l_item_[\"msg\"])[0]\n live_texts_[l_index_] = l_item_\n # print(l_item_)\n zhibo7m.update_one({\"_id\": item_['_id']}, {\"$set\": {\"textFeed\": live_texts_}})", "def __init__(self, data_dir: str, modes: List[str] = ['train', 'test', 'dev'], pad_label='O', label_ids_dict=None):\n self.data_dir = data_dir\n self.label_ids = None\n unique_labels = set()\n\n for mode in modes:\n all_labels = []\n label_file = os.path.join(data_dir, 'labels_' + mode + '.txt')\n if not os.path.exists(label_file):\n logging.info(f'Stats calculation for {mode} mode is skipped as {label_file} was not found.')\n continue\n\n with open(label_file, 'r') as f:\n for line in f:\n line = line.strip().split()\n all_labels.extend(line)\n unique_labels.update(line)\n\n if mode == 'train':\n label_ids = {pad_label: 0}\n if pad_label in unique_labels:\n unique_labels.remove(pad_label)\n for label in sorted(unique_labels):\n label_ids[label] = len(label_ids)\n\n self.pad_label = pad_label\n if label_ids_dict:\n if len(set(label_ids_dict) | set(label_ids)) != len(label_ids_dict):\n raise ValueError(\n f'Provided labels to ids map: {label_ids_dict} does not match the labels '\n f'in the data: {label_ids}'\n )\n self.label_ids = label_ids_dict if label_ids_dict else label_ids\n logging.info(f'Labels: {self.label_ids}')\n self.label_ids_filename = os.path.join(data_dir, 'label_ids.csv')\n out = open(self.label_ids_filename, 'w')\n labels, _ = zip(*sorted(self.label_ids.items(), key=lambda x: x[1]))\n out.write('\\n'.join(labels))\n logging.info(f'Labels mapping saved to : {out.name}')\n\n all_labels = [self.label_ids[label] for label in all_labels]\n logging.info(f'Three most popular labels in {mode} dataset:')\n total_labels, label_frequencies, max_id = get_label_stats(\n all_labels, os.path.join(data_dir, mode + '_label_stats.tsv')\n )\n\n logging.info(f'Total labels: {total_labels}')\n logging.info(f'Label frequencies - {label_frequencies}')\n\n if mode == 'train':\n class_weights_dict = get_freq_weights(label_frequencies)\n logging.info(f'Class Weights: {class_weights_dict}')\n self.class_weights = fill_class_weights(class_weights_dict, max_id)\n self.num_classes = max_id + 1", "def findmaxidx(datasets, target='atom_label'):\n\n if target == 'atom_label':\n return _findmaxidx(datasets, 0)\n elif target == 'wle_label':\n return _findmaxidx(datasets, 2)", "def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n \ttf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n \ttf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n\t\tparsed_items = p.findall(line)\n\t\tuid = parsed_items[0]\n\t\thuman_string = parsed_items[2]\n\t\tuid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n \tif line.startswith(' target_class:'):\n \t\ttarget_class = int(line.split(': ')[1])\n \tif line.startswith(' target_class_string:'):\n \t\ttarget_class_string = line.split(': ')[1]\n \t\tnode_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n\t\tif val not in uid_to_human:\n\t\t\ttf.logging.fatal('Failed to locate: %s', val)\n\t\tname = uid_to_human[val]\n\t\tnode_id_to_name[key] = name\n\n return node_id_to_name", "def __init__(\n self,\n label_to_idx: dict = None,\n ):\n super().__init__(lazy=False)\n self.token_indexers = {\"tokens\": SingleIdTokenIndexer()}\n self.label_to_idx = label_to_idx", "def reindex(self, label2index):\n df_reindex = self.df.copy()\n def get_index_for_label(label):\n return label2index[label]\n df_reindex[\"index\"] = df_reindex[\"index\"].apply(get_index_for_label)\n self.df_reindex = df_reindex", "def test_dbscan_callable():\n # Parameters chosen specifically for this task.\n # Different eps to other test, because distance is not normalised.\n eps = 0.8\n min_samples = 10\n # metric is the function reference, not the string key.\n metric = distance.euclidean\n # Compute DBSCAN\n # parameters chosen for task\n core_samples, labels = dbscan(X, metric=metric,\n eps=eps, min_samples=min_samples)\n\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(labels)) - int(-1 in labels)\n assert_equal(n_clusters_1, n_clusters)\n\n db = DBSCAN(metric=metric)\n labels = db.fit(X, eps=eps, min_samples=min_samples).labels_\n\n n_clusters_2 = len(set(labels)) - int(-1 in labels)\n assert_equal(n_clusters_2, n_clusters)", "def verify_labels_order(x_test_indices=None, graph_labels=None, y_test_labels=None):\n test = True\n for k, i in enumerate(x_test_indices):\n if graph_labels[i] != y_test_labels[k]:\n test = False\n break\n if not test:\n raise Exception('Label from graph does not match with testing data')", "def lookup_label_id(service, labels):\n if not labels:\n return\n\n labelids = {} # label name => label id\n results = service.users().labels().list(userId='me').execute()\n mylabs = results.get('labels', [])\n for lab in mylabs:\n if len(labelids) == len(labels):\n break\n if lab['name'] in labels:\n labelids[lab['name']] = lab['id']\n return labelids", "def put_label(id):\n label_id = dao.set_label(id=id,\n name=request.json['name'],\n fields=request.json['fields'])\n\n return jsonify(dao.get_label(label_id))", "def populate_index(db):\n\tfor url in URL:\n\t\tprint url\n\t\trequest = urllib2.Request(url)\n\t\ttry :\n\t\t\tresponse = urllib2.urlopen(request)\n\t\texcept urllib2.URLError:\n\t\t\tprint \"Network Unreachable \"\n\t\t\tsys.exit()\t\n\t\ttext = html2text(response.read())\n\t\tdb.generate_index(text,url)", "def _refinement_random_walker(\n self,\n ds_labels,\n ds_maskROI,\n ds_mask,\n target_label):\n\n ds_labels[(ds_maskROI == False) & ds_mask] = target_label\n ds_labels[(ds_maskROI == False) & (ds_mask == False)] = -1\n\n labels = zoom(\n ds_labels,\n zoom=np.float32(\n self.size) /\n self.ds_size,\n order=0)\n maskROI = zoom(\n ds_maskROI,\n zoom=np.float32(\n self.size) /\n self.ds_size,\n order=0).astype(\n np.bool)\n\n # Extract labelled and unlabelled vertices\n m_unlabeled = (labels == 0) & (maskROI)\n m_foreground = (labels == target_label)\n\n unlabeled = np.ravel_multi_index(np.where(m_unlabeled), self.size)\n labeled = np.ravel_multi_index(np.where(labels != 0), self.size)\n #labeled = np.ravel_multi_index(np.where((m_foreground) | (labels > 0)), self.size)\n\n # Preparing the right handside of the equation BT xs\n B = self.L[unlabeled][:, labeled]\n mask = (labels[labels != 0]).flatten() == target_label\n fs = sparse.csr_matrix(mask).transpose()\n rhs = B * fs\n\n # Preparing the left handside of the equation Lu\n Lu = self.L[unlabeled][:, unlabeled]\n\n # Solve the linear equation Lu xu = -BT xs\n if self._pyamg_found:\n ml = ruge_stuben_solver(Lu)\n M = ml.aspreconditioner(cycle='V')\n else:\n M = None\n xu = cg(Lu, -rhs.todense(), tol=1e-3, M=M, maxiter=120)[0]\n\n probability = np.zeros(self.size, dtype=np.float32)\n probability[m_unlabeled] = xu\n probability[m_foreground] = 1\n\n return probability", "def label_index(self, label_index):\n\n self._label_index = label_index", "def label_index(self, label_index):\n\n self._label_index = label_index", "def change_labels(labels, cluster_name, idx_to_change, target_labels):\n assert(type(idx_to_change) == list)\n assert(type(target_labels) == list)\n assert(len(idx_to_change) == len(target_labels))\n\n sub_list = labels[labels == cluster_name]\n\n for idx, target in zip(idx_to_change, target_labels):\n sub_list[idx] = target\n\n labels[labels == cluster_name] = sub_list\n\n return labels", "def load_set_by_labels(labels):\n return get_default_repo().get_set_by_labels(labels)", "def mark(key, graph, labels):\n resample_id = [rand_flip_graph(graph, edge) for edge in key]\n mod_graph = graph.copy()\n for i, (label_index1, label_index2) in enumerate(key):\n index1 = label_index2node(label_index1, labels)\n index2 = label_index2node(label_index2, labels)\n if resample_id[i]:\n mod_graph.add_edge(index1, index2)\n else:\n mod_graph.remove_edge(index1, index2)\n return (resample_id, mod_graph)", "def initialize(labels, init_data):\n ds = None\n init_data = {k: list(v) for k, v in init_data.items()}\n\n logger.info(\n \"Initializing dataset from metadata:\\n %s\",\n '\\n '.join(\"{}: {}\".format(k, len(init_data[k])) for k in sorted(init_data.keys())))\n\n first_data = list(init_data.values())[0]\n if not all(len(first_data) == len(data) for data in init_data.values()):\n logger.error(\"Cannot initialize dataset from metadata dictionary that has values of different lengths\")\n return\n\n ds = tf.data.Dataset.from_tensor_slices(init_data)\n label2int, _ = tf_utils.make_label2onehot(tf.constant(labels, tf.string))\n logger.info(\n \"Generated label2target lookup table from indexes of array:\\n %s\",\n '\\n '.join(\"{:s} {:d}\".format(l, label2int.lookup(tf.constant(l, tf.string))) for l in labels))\n\n append_labels_as_targets = lambda x: dict(x, target=label2int.lookup(x[\"label\"]))\n return ds.map(append_labels_as_targets, num_parallel_calls=TF_AUTOTUNE)", "def majority_vote(labels):\n\n conta = Counter(labels)\n\n winner, winner_count = conta.most_common(1)[0]\n\n num_winner = sum([1 for count in conta.values() if count == winner_count])\n\n if num_winner == 1:\n return winner\n else:\n return majority_vote(labels[:-1])", "def update_nodes(nodes, bb):\n \n for node in nodes:\n node.set(\"label\", update_bb_string(node.get_attributes()[\"label\"], bb))\n node.set_name(update_node_name(node.get_name(), bb))", "def set_idf(self):\n for word, word_info in kc_util.gen_db(self.dictionary_db.cursor()):\n word_info.idf = self.idf(word_info.number)\n self.dictionary_db.replace(word, pickle.dumps(word_info))", "def getLabels(df, eps=3, min_samples=100):\n #instantiate dbscan\n db = DBSCAN(eps=eps, \n min_samples=min_samples, \n metric='euclidean', \n n_jobs=-1\n )\n \n #fit and predict to data\n db.fit_predict(df[['x', 'y']])\n \n #Returns the sorted unique elements of an array\n labels_unique = np.unique(db.labels_)\n #drop the -1 labels which are unlabeled\n labels_unique = labels_unique[labels_unique != -1]\n \n \n return db.labels_, labels_unique", "def insert_new_label(self, label, index, nvals):\n if label in self.labels: return\n self.labels.append(label)\n self.parents.append(self.find_parent_label(label))\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0", "def mapping_leaves(leaves1, leaves2, label1, label2, nodes1, links1, nodes2, links2, mode, ED, nnlabel):\n \n # Varibles for labeled leaves\n nleaves1 = []\n nleaves2 = []\n nlabel = []\n \n # Variables for unlabeled leaves\n UKleaves1 = []\n UKleaves2 = []\n UKlabel1 = []\n UKlabel2 = []\n \n nleaves = np.zeros(len(leaves1))\n label = np.zeros(len(leaves1))\n UK1idx = []\n UK2idx = []\n for i in range(0, len(leaves1)):\n if label1[i] in label2 and label1[i] < MAX_NODES:\n nleaves1.append(leaves1[i])\n nlabel.append(label1[i])\n idx = label2.index(label1[i])\n nleaves[idx] = leaves1[i]\n label[idx] = nnlabel[i]\n else:\n UKleaves1.append(leaves1[i])\n UKlabel1.append(label1[i])\n UK1idx.append(i)\n if label2[i] in label1 and label2[i] < MAX_NODES:\n nleaves2.append(leaves2[i])\n else:\n UKleaves2.append(leaves2[i])\n UKlabel2.append(label2[i])\n UK2idx.append(i)\n if len(UK1idx)>0:\n # Calculated the distance matrix from unmatched leaves to matched leaves\n dist1 = get_tree_dist_between_leaves(UKleaves1, nleaves1, nodes1, links1, mode, ED)\n dist2 = get_tree_dist_between_leaves(UKleaves2, nleaves2, nodes2, links2, mode, ED)\n # Calculate resorting rule with minimum weight matching of distance matrices\n dict1 = map_nodes_leaves(dist2, dist1)\n for i in range(0, len(dict1)):\n # Update labels using resorting rule.\n nleaves[UK2idx[i]] = leaves1[UK1idx[dict1[i]]]\n label[UK2idx[i]] = nnlabel[UK1idx[dict1[i]]]\n return nleaves, label" ]
[ "0.68891954", "0.6300339", "0.58898443", "0.5635267", "0.5226151", "0.51870143", "0.51595086", "0.50911933", "0.50677323", "0.49870756", "0.4969618", "0.4949305", "0.4914531", "0.49075586", "0.48753136", "0.48677775", "0.48287308", "0.47792596", "0.47701105", "0.47633627", "0.47311106", "0.47189683", "0.47187024", "0.4717691", "0.471671", "0.47091928", "0.47000366", "0.46964186", "0.46963125", "0.46918386", "0.4680784", "0.4674889", "0.4674507", "0.4672619", "0.467138", "0.4659081", "0.4646061", "0.46394148", "0.46225655", "0.4616795", "0.46081495", "0.4607809", "0.4605811", "0.460156", "0.4601154", "0.4565888", "0.45417437", "0.45375532", "0.45305958", "0.45300862", "0.45286378", "0.45219836", "0.4515916", "0.45084906", "0.44847256", "0.44815618", "0.44731957", "0.4466071", "0.44612312", "0.44536784", "0.44498622", "0.44473705", "0.44394523", "0.44354674", "0.44350904", "0.44320795", "0.4430615", "0.44255987", "0.44242394", "0.44226983", "0.44141424", "0.4409512", "0.4409512", "0.4409512", "0.4409512", "0.4409512", "0.440665", "0.44029677", "0.44001248", "0.43730897", "0.4365153", "0.4356969", "0.43512374", "0.434824", "0.43470109", "0.4344965", "0.43396464", "0.43382332", "0.4335971", "0.4335971", "0.4335949", "0.43330112", "0.43293944", "0.43223926", "0.43219933", "0.43189245", "0.4317767", "0.43118972", "0.4311335", "0.43112928" ]
0.73168194
0
Given an index database db, a list of labels (one for each id), and a list of ids to train on, computes the optimal split value. It modifies this node to have the optimal split type and value, and then returns the quality of the split as computed by the split_cost function. If features != None, it is a list of available feature indices to use in this split, or a function of 0 arguments that can be called to get a list of features.
def pick_best_split(self,db,labels,ids,features=None): idlabels = [labels[id] for id in ids] if misclassification_error(idlabels) == 0: #base case: no misclassifications self.type = 'v' self.value = idlabels[0] return 0 best = None bestCost = 0 splitval = None discrete = True if features == None: if len(ids) < db.numFeatures(): #look at all present features in the training set features = db.getPresentFeatures(ids) #print len(features),"of",db.numFeatures(),"features selected" else: features = range(db.numFeatures()) elif callable(features): features = features() for i in features: if len(db.entryLists[i]) == 0: continue idiscrete = db.discreteFeature[i] if idiscrete: #count number of labels of a certain value splitter = defaultdict(lambda:defaultdict(int)) #count of labels for missing values nmissing = defaultdict(int) for id in ids: val = db[i,id] if val is None: #missing values go down to all splits nmissing[labels[id]] += 1 continue splitter[val][labels[id]] += 1 if len(splitter) > continuous_variable_threshold: #print "Determined to be a continuous variable" idiscrete = False break if idiscrete: if len(splitter) <= 1: #only a single value continue #count number of missing values in all splits cmax = 0 for k in splitter: for l,v in nmissing.iteritems(): splitter[k][l] += v cmax = max(cmax,sum(splitter[k].values())) #shrink by fraction of (# of ids - largest child)/(# of ids) scale = (1.0-float(cmax)/float(len(ids)))*len(splitter) #evaluate cost cost = split_cost(splitter.values())*scale #print "Split on",i,"information gain",-cost,splitter.values() else: #continuous, need to learn the best split vals = [] presentlabels = [] nonelabels = [] for id in ids: val = db[i,id] if val is None: nonelabels.append(labels[id]) continue vals.append(val) presentlabels.append(labels[id]) if len(vals) <= 1: print "No values for feature",i,"?" print vals continue #print "Considering continuous split on",i s,cost = best_split(vals,presentlabels,nonelabels) scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2 cost *= scale #print "Result",s,"Information gain",-cost if cost < bestCost: best = i bestCost = cost discrete = idiscrete if not idiscrete: splitval = s if best is None: self.type = 'v' if len(ids) > 0: self.value = vote(idlabels) return misclassification_error(idlabels) else: self.value = None return 0 else: self.feature = best #discrete or inequality split if discrete: self.type = 's' else: self.type = 'i' self.value = splitval return bestCost
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_split_feature(self, data_set, target_feature, tree_features):\n\n if self.__criterion == 'entropy':\n feature_gains = {feature: self.__gain(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = max(feature_gains, key=feature_gains.get)\n return split_feature\n elif self.__criterion == 'gini':\n feature_ginis = {feature: self.__gini(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = min(feature_ginis, key=feature_ginis.get)\n return split_feature\n # TODO: I should check this (gini index).", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def __find_best_split(self, x, y):\n data = np.transpose(np.vstack((np.transpose(x), y)))\n num_features = data.shape[1] - 1\n\n # initialise splitting rule components\n integer_splitting_rule = None\n feature_index_to_split = None\n max_info_gain = 0\n\n # iterate over all the features and find best splits within these\n for feature in range(num_features):\n info_gain, split_int = self.__find_best_split_in_feature(\n data[:, [feature, -1]])\n if info_gain is None:\n continue\n # update max info gain so far as it iterates over features\n if info_gain > max_info_gain:\n max_info_gain = info_gain\n feature_index_to_split = feature\n integer_splitting_rule = int(split_int)\n\n return feature_index_to_split, integer_splitting_rule", "def best_split(values,labels,nonelabels=None):\n assert len(values) >= 2\n assert len(values) == len(labels)\n N = len(values)\n ilist = sorted((v,l) for (v,l) in zip(values,labels))\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for v,l in ilist:\n rightcount[l] += 1\n bestindex = -1\n bestcost = split_cost([leftcount,rightcount])\n\n cost = bestcost\n #costs = [cost]\n #print \"Split costs:\"\n for i in xrange(len(ilist)):\n v,l = ilist[i]\n rightcount[l] -= 1\n leftcount[l] += 1\n if i+1 >= len(ilist) or v == ilist[i+1][0]:\n #no splits when v is equal to the next value\n continue\n cost = split_cost([leftcount,rightcount])\n #print \" \",v,leftcount.values(),rightcount.values(),cost\n #costs.append(cost)\n if cost < bestcost:\n bestcost = cost\n bestindex = i\n #raw_input()\n if bestindex < 0:\n #no split found... try splitting in half\n splitval = (ilist[0][0]+ilist[-1][0])*0.5\n else:\n splitval = (ilist[bestindex][0] + ilist[bestindex+1][0])*0.5\n if nonelabels is None:\n return (splitval,bestcost)\n #reevaluate counts\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for l in nonelabels:\n leftcount[l] += 1\n rightcount[l] += 1\n for v,l in ilist:\n if v <= splitval:\n leftcount[l] += 1\n else:\n rightcount[l] += 1\n return splitval,split_cost([leftcount,rightcount])", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right", "def choose_best_split(self, X_subset, y_subset):\n # YOUR CODE HERE\n feature_index = None\n threshold = None\n best_G = np.inf\n N = len(X_subset)\n \n for current_feature in range(X_subset.shape[1]):\n thresholds = np.unique(X_subset[:, current_feature])\n \n for t in thresholds:\n y_left, y_right = self.make_split_only_y(current_feature, t, X_subset, y_subset)\n H_L = self.H(y_left)\n H_R = self.H(y_right)\n \n G = (len(y_left) / N) * H_L + (len(y_right) / N) * H_R\n \n if G < best_G:\n best_G = G\n feature_index = current_feature\n threshold = t\n \n return feature_index, threshold", "def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value", "def best_cutoff(self,\n split_label):\n split_args = self.sub_split_args[split_label]\n split_data = self.sub_split_data[split_label]\n # This criterion for the use_scipy flag is arbitrary and needs\n # further testing\n n_unique = len(np.unique(split_data[~np.isnan(split_data)]))\n use_scipy = True\n if n_unique > len(split_data)/1000:\n use_scipy = False\n idxcut_below, effects_below, rstats_below, ndata_below =\\\n self.u_data(split_label, use_scipy=use_scipy)\n idxcut_above, effects_above, rstats_above, ndata_above =\\\n self.u_data(split_label, above=True, use_scipy=use_scipy)\n\n # Default cutoff is min(split_data) - 1\n cutoff = split_data[split_args[0]] - 1\n value = 0\n # If no cutoff was possible\n if len(idxcut_below) == 0 or len(idxcut_above) == 0:\n return cutoff, value\n\n # All idx_cutoffs and values for cutoffs, for debugging\n for idx in range(len(idxcut_above)):\n idxcut = idxcut_above[idx]\n if idxcut != idxcut_below[idx]:\n raise NameError('Code error, invalid split')\n value_temp = (abs(effects_above[idx] -\n effects_below[idx]) *\n rstats_above[idx] *\n rstats_below[idx] *\n min(ndata_above[idx]) *\n min(ndata_below[idx]))\n if value_temp > value:\n cutoff = (split_data[split_args[int(idxcut)]] +\n split_data[split_args[int(idxcut)+1]])/2\n value = value_temp\n return cutoff, value", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def pick_best_label(self,db,labels,ids):\n self.type = 'v'\n if len(labels) > 0:\n self.value = vote([labels[id] for id in ids])\n else:\n self.value = None\n return", "def best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\titerative_based_score = 0\n\t# given that all pairs use the same amount of features, the position 0 was arbitrarily selected to compute the number of features being used\n\tmin_number_features = int(0.15*len(train_features[0]))\n\tmax_number_features = int(0.85*len(train_features[0]))\n\n\t# min_number_features = 19\n\t# max_number_features = 20\n\n\titerative_based_selector = None\n\titerative_based_train_features_selected = None\n\titerative_based_test_features_selected = None\n\n\tfor i in range(min_number_features, max_number_features):\n\t\tprint(i)\n\t\ttemp_iterative_based_selector = RFE(RandomForestRegressor(n_estimators=100), n_features_to_select=i)\n\t\ttemp_iterative_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_iterative_based_train_features_selected = temp_iterative_based_selector.transform(train_features)\n\t\ttemp_iterative_based_test_features_selected = temp_iterative_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_iterative_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_iterative_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Iterative Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > iterative_based_score:\n\t\t\titerative_based_score = temp_score\n\t\t\titerative_based_selector = temp_iterative_based_selector\n\t\t\titerative_based_train_features_selected = temp_iterative_based_train_features_selected\n\t\t\titerative_based_test_features_selected = temp_iterative_based_test_features_selected\n\n\titerative_based_mask = iterative_based_selector.get_support()\n\tprint(\"This is the iterative based mask: \")\n\tprint(iterative_based_mask)\n\n\treturn iterative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask", "def find_best_split(rows):\n best_gain = 0 # keep track of the best information gain\n best_question = None # keep train of the feature / value that produced it\n current_uncertainty = gini(rows)\n n_features = len(rows[0]) - 1 # number of columns\n #print(\"n_features:\", n_features)\n\n for col in range(1,n_features): # for each feature\n # for each iteration this is the set of all values of a specific column, eg, All pixels number 0\n values = set([row[col] for row in rows]) # unique values in the column\n for val in values: # for each value\n\n # Create a question object for each val under a column, holding the val and the col number\n question = Question(col, val)\n\n # try splitting the dataset\n true_rows, false_rows = partition(rows, question)\n\n # Skip this split if it doesn't divide the\n # dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(true_rows, false_rows, current_uncertainty)\n\n # You actually can use '>' instead of '>=' here\n # but I wanted the tree to look a certain way for our\n # toy dataset.\n if gain >= best_gain:\n best_gain, best_question = gain, question\n\n return best_gain, best_question", "def feature_subset(self,node,db,labels,ids):\n return None", "def get_best_split_all(x, y) -> Tuple[int, float, float]:\n m = x.shape[1]\n col_best_gin = np.ones(shape=m)\n col_best_val = np.ones(shape=m)\n for c in range(m):\n best = 1\n best_x = 0\n for i in np.unique(x[:, c]):\n gini = Tree.split(x[:, c], y, i)\n if gini < best:\n best = gini\n best_x = i\n col_best_gin[c] = best\n col_best_val[c] = best_x\n\n # Select best feature to split on\n col_idx = np.argmin(col_best_gin)\n # Convert to bool index\n col_idx = np.array(range(x.shape[1])) == col_idx\n\n return col_idx, col_best_val[col_idx], col_best_gin[col_idx]", "def _find_split(self, X, y, n_features):\r\n splits_info = []\r\n\r\n # Select features to consider\r\n features = self._feature_selection.get_features(n_features, self._feature_prob)\r\n\r\n # Get candidate splits\r\n for feature_id in features:\r\n for split_value in compute_split_values(X[:, feature_id]):\r\n splits_info.append(\r\n compute_split_info(self._split_criterion, X, y, feature_id, split_value, self._min_samples_leaf))\r\n\r\n splits = []\r\n for split_info in splits_info:\r\n if split_info is not None:\r\n gain, feature_id, split_value = split_info\r\n split = Split(feature_id, value=split_value, gain=gain)\r\n splits.append(split)\r\n else:\r\n continue\r\n\r\n selected_split = self._split_chooser.get_split(splits)\r\n return selected_split", "def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\t# percentile selector\n\tpercentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# model based selector\n\tmodel_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# iterative based selector\n\titerative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\tall_scores = []\n\n\tregressor.fit(train_features, train_similarity_target)\n\tprint(\"The score on all features: %.3f\" % regressor.score(test_features, test_similarity_target))\n\tall_scores.append(regressor.score(test_features, test_similarity_target))\n\n\t# show results for the percentile selector\n\tall_scores.append(percentile_score)\n\n\t# show results for the model based selector\n\tall_scores.append(model_based_score)\n\n\t# show results for the iterative based selector\n\tall_scores.append(iterative_based_score)\n\n\tmax_value_position = all_scores.index(max(all_scores))\n\n\tif max_value_position == 0:\n\t\tprint(\"Returning all features!\\n\")\n\t\treturn train_features, test_features\n\telif max_value_position == 1:\n\t\tpercentile_mask = build_mask(percentile_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')\n\t\tdebug_data(percentile_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the percentile selector!\\n\")\n\t\treturn percentile_selector, percentile_train_features_selected, percentile_test_features_selected\n\telif max_value_position == 2:\n\t\tmodel_based_mask = build_mask(model_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')\n\t\tdebug_data(model_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the model based selector!\\n\")\n\t\treturn model_based_selector, model_based_train_features_selected, model_based_test_features_selected\n\telse:\n\t\titerative_based_mask = build_mask(iterative_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')\n\t\tdebug_data(iterative_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the iterative based selector!\\n\")\n\t\treturn iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected", "def __find_best_split_in_feature(self, feature_and_class):\n\n # sort the feature and class and use changes in the class to reduce\n # number of potential split info gain calculations\n sorted_data = feature_and_class[\n feature_and_class[:, 0].astype(np.int).argsort()]\n potential_splits = self.__find_integers_with_class_change(sorted_data)\n info_gains = self.__info_gain_from_splits(potential_splits,\n sorted_data)\n\n # returning nothing in no information gains are found\n if len(info_gains) == 0:\n return None, None\n\n index = info_gains.index(max(info_gains))\n return info_gains[index], potential_splits[index]", "def find_split(x, y):\n\n # Need the starting entropy so we can measure improvement...\n start_entropy = calculate_entropy(y)\n\n # Best thus far, initialised to a dud that will be replaced immediately...\n best = {'infogain': -np.inf}\n\n # Randomly allocate the splits to be traversed (without replacement)\n feature_total = x.shape[1]\n feature_subset_count = int(np.sqrt(feature_total))\n feature_subset = np.random.permutation(feature_total)[:feature_subset_count]\n\n # Loop every possible split of every feature...\n for feature_index in feature_subset:\n for split in np.unique(x[:, feature_index]):\n\n left_indices = []\n right_indices = []\n\n # Get index of rows where x[row_index,feature_index] <= split\n for row_index,row in enumerate(x):\n left_indices.append(row_index) if x[row_index,feature_index] <= split else right_indices.append(row_index)\n\n left_ys = y[left_indices]\n right_ys = y[right_indices]\n\n nleft = len(left_ys)\n nright = len(right_ys)\n ntotal = nleft + nright\n infogain = start_entropy - (nleft / ntotal) * calculate_entropy(left_ys) - (\n nright / ntotal) * calculate_entropy(right_ys)\n\n if infogain > best['infogain']:\n best = {'feature': feature_index,\n 'split': split,\n 'infogain': infogain,\n 'left_indices': left_indices,\n 'right_indices': right_indices}\n return best", "def _compute_best_split_and_push(self, node):\n\n node.split_info = self.splitter.find_node_split(\n node.sample_indices, node.histograms, node.sum_gradients,\n node.sum_hessians)\n\n if node.split_info.gain <= 0: # no valid split\n self._finalize_leaf(node)\n else:\n heappush(self.splittable_nodes, node)", "def getBestDBScanModel ( features):\r\n\r\n\tprint(\"DBScan model\")\r\n\tmodels = []\r\n\tfor nbSamples in range( 2, len(features)//4):\r\n\t\tnbSamples *= 2\r\n\t\tfor distance in range( 1, 26):\r\n\t\t\tdistance /= 50\r\n\t\t\tmodels.append( st.getFittedDBScanModel( features, distance, nbSamples))\r\n\tbestModel, bestScore = st.getBestFittedModel( models, features)\r\n\tif not bestModel:\r\n\t\tprint(\"Regected all models\")\r\n\t\treturn False, -1\r\n\tprint(\"Score:\", bestScore)\r\n\tprint(\"Number of clusters:\", st.getNbClusters(bestModel))\r\n\tprint(\"Max distence:\", bestModel.get_params()[\"eps\"])\r\n\tprint(\"Min number of samples\", bestModel.get_params()[\"min_samples\"])\r\n\treturn bestModel, bestScore", "def get_best_split(rows):\n best_gain = 0\n best_question = None\n current_impurity = get_gini(rows)\n n_features = len(rows[0])\n\n for col in range(n_features):\n\n for row in rows:\n question = Question(col, row[col])\n true_rows, false_rows = partition(rows, question)\n\n if len(true_rows) == 0 or len(false_rows) == 0:\n break\n\n question_gain = get_info_gain(true_rows, false_rows, current_impurity)\n\n if question_gain >= best_gain:\n best_gain = question_gain\n best_question = question\n\n print(best_gain)\n print(best_question)\n return best_gain, best_question", "def _backward_best_subset(X, y, nbest=8, beamwidth=40, score=\"bic\"):\n \n assert nbest > 0, \"nbest must be positive\"\n beamwidth = max(beamwidth, nbest)\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def get_bic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().aic\n\n get_score = get_bic if score == \"bic\" else get_aic\n \n features = X.columns\n \n heap = []\n visited = set()\n \n def get_pair(k):\n return get_score(['(Intercept)', *k]), k\n \n k = tuple(features)\n heapq.heappush(heap, get_pair(k))\n \n while True:\n modified = False\n min_score = heap[0][0]\n for _, k in heap:\n for f in features:\n if f not in k:\n continue\n candidate_features = tuple([x for x in k if x != f])\n if candidate_features in visited:\n continue\n visited.add(candidate_features)\n new_pair = get_pair(candidate_features)\n if new_pair[0] > min_score:\n modified = True\n heapq.heappush(heap, get_pair(candidate_features))\n if len(heap) > beamwidth:\n heapq.heappop(heap)\n min_score = heap[0][0]\n if not modified:\n break\n \n return heapq.nsmallest(nbest, [(-x, ['(Intercept)', *y]) for x, y in heap])", "def choose_best_feature(data_set):\n feature_size = len(data_set[0]) - 1\n base_entropy = calc_entropy(data_set)\n best_info_gain = 0.0; best_feature = -1\n for i in xrange(feature_size):\n feat_list = [eg[i] for eg in data_set]\n unique_values = set(feat_list)\n new_entropy = 0.0\n for value in unique_values:\n sub_ds = splite_dataset(data_set, i, value)\n prob = len(sub_ds) / float(len(data_set))\n new_entropy += prob * calc_entropy(sub_ds)\n info_gain = base_entropy - new_entropy\n if info_gain > best_info_gain:\n best_info_gain = info_gain\n best_feature = i\n\n return best_feature", "def learn(self,db,labels):\n self.keys = db.keys[:]\n labelindex = -1\n if isinstance(labels,str):\n labelindex = db.keys.index(labels)\n assert labelindex >= 0,\"label does not exist in database keys\"\n labels = db.get_column(labelindex)\n elif isinstance(labels,int):\n labelindex = labels\n labels = db.get_column(labelindex)\n else:\n assert len(labels) == len(db.entries)\n self.root = DecisionTreeNode()\n if labelindex >= 0:\n raise NotImplementedError(\"Ooops, taking out indexed label broken\")\n entries = np.delete(entries,labelindex,1)\n db = IndexedDatabase(db)\n if self.maxnodes != None:\n return self.greedy_learn_search(db,labels)\n else:\n self.deepest = 0\n return self.greedy_learn(self.root,db,labels,range(len(labels)))", "def split_cost(label_count_list):\n return -split_information_gain(label_count_list)\n #this cost value is the misclassification error.\n return split_misclassification_error(label_count_list)", "def compute_splits(self, G, nw_name='test', train_frac=0.51, split_alg='spanning_tree', owa=True, fe_ratio=1,\n split_id=0, verbose=False):\n # Compute train/test split\n if split_alg == 'random':\n tr_E, te_E = stt.rand_split_train_test(G, train_frac)\n train_E, test_E, G, mp = pp.relabel_nodes(tr_E, te_E, G.is_directed())\n elif split_alg == 'naive':\n train_E, test_E = stt.naive_split_train_test(G, train_frac)\n elif split_alg == 'spanning_tree':\n train_E, test_E = stt.split_train_test(G, train_frac)\n elif split_alg == 'fast':\n train_E, test_E = stt.quick_split(G, train_frac)\n train_E_false, test_E_false = stt.quick_nonedges(G, train_frac, fe_ratio)\n elif split_alg == 'timestamp':\n train_E, test_E, G = stt.timestamp_split(G, train_frac)\n train_E = set(zip(train_E[:, 0], train_E[:, 1]))\n test_E = set(zip(test_E[:, 0], test_E[:, 1]))\n else:\n raise ValueError('Split alg. {} unknown!'.format(split_alg))\n\n # Compute non-edges\n if split_alg != 'fast':\n num_fe_train = len(train_E) * fe_ratio\n num_fe_test = len(test_E) * fe_ratio\n if owa:\n train_E_false, test_E_false = stt.generate_false_edges_owa(G, train_E, test_E,\n num_fe_train, num_fe_test)\n else:\n train_E_false, test_E_false = stt.generate_false_edges_cwa(G, train_E, test_E,\n num_fe_train, num_fe_test)\n\n # Set class attributes to new values\n self.set_splits(train_E, train_E_false, test_E, test_E_false, directed=G.is_directed(), nw_name=nw_name,\n split_id=split_id, split_alg=split_alg, owa=owa, verbose=verbose)\n\n return train_E, train_E_false, test_E, test_E_false", "def aux_best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\tpercentile_mask = build_mask(percentile_mask, used_features)\n\tmask_save_path = os.path.join('feature_selection_masks', 'assin2_percentile_based_mask.txt')\n\tdebug_data(percentile_mask, mask_save_path)\n\n\treturn percentile_train_features_selected, percentile_test_features_selected, percentile_selector", "def find_split(self, X, y):\n choices = y.size\n if choices <= 1:\n return None, None\n\n # find the number of each option in the current node.\n options_parent = [np.sum(y == c) for c in range(self.num_outcomes)]\n\n # find the gini of current node.\n best_gini = 1.0 - sum((n / choices) ** 2 for n in options_parent)\n best_idx, best_split = None, None\n\n # loop through the features to get splits and options.\n for idx in range(self.num_features):\n splits, options = zip(*sorted(zip(X[:, idx], y)))\n\n num_left = [0] * self.num_outcomes\n num_right = options_parent.copy()\n for i in range(1, choices):\n c = options[i - 1]\n num_left[c] += 1\n num_right[c] -= 1\n gini_left = 1.0 - sum(\n (num_left[x] / i) ** 2 for x in range(self.num_outcomes)\n )\n gini_right = 1.0 - sum(\n (num_right[x] / i) ** 2 for x in range(self.num_outcomes)\n )\n\n gini = (i * gini_left + (choices - i) * gini_right) / choices\n\n if splits[i] == splits[i - 1]:\n continue\n\n if gini < best_gini:\n best_gini = gini\n best_idx = idx\n best_split = (splits[i] + splits[i - 1]) / 2\n\n return best_idx, best_split", "def load_data(dataset_str, random_split, split_sizes, random_split_seed, add_val, add_val_seed, p_val, active_learning):\n if dataset_str == 'polblogs':\n features, labels, adj = load_polblogs()\n features = sp.lil_matrix(features)\n n, d = features.shape\n idx_train = None\n idx_val = None\n idx_test = None\n else:\n features, labels, adj, n, idx_train, idx_val, idx_test = load_base_data(dataset_str)\n\n train_mask, val_mask, test_mask = get_training_masks(n, features, labels, random_split, split_sizes, random_split_seed,\n add_val, add_val_seed, p_val, idx_train, idx_val, idx_test)\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n if active_learning:\n t = adj.toarray()\n sg = list(nx.connected_component_subgraphs(nx.from_numpy_matrix(t)))\n vid_largest_graph = sg[np.argmax([nx.adjacency_matrix(g).shape[0] for g in sg])].nodes()\n adj = t[vid_largest_graph,:]; adj = adj[:, vid_largest_graph]\n return sp.csr_matrix(adj), sp.csr_matrix(features.toarray()[vid_largest_graph,:]), labels[vid_largest_graph]\n else:\n return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask", "def _split_dataset(self, X, y, label, index, value, sample_weights=None):\n # YOUR CODE HERE\n # Hint: Do not forget to remove the index-th feature from X.\n # begin answer\n ret1=[]\n ret2=[]\n featVec=X[:,index]\n X=X[:,[i for i in range(X.shape[1]) if i!=index ]]\n for i in range(len(featVec)):\n if featVec[i]>=value:\n ret1.append(i)\n else:\n ret2.append(i)\n sub1_X = X[ret1,:]\n sub1_y = y[ret1]\n label_1=label[ret1]\n sub1_sample_weights=sample_weights[ret1]\n sub2_X = X[ret2,:]\n sub2_y = y[ret2]\n label_2=label[ret2]\n sub2_sample_weights=sample_weights[ret2]\n # end answer\n return sub1_X, sub1_y, label_1, sub1_sample_weights, sub2_X, sub2_y, label_2, sub2_sample_weights", "def split(self,best):\n\t\treturn self.cu_for_split(best,False)", "def _choose_best_feature(self, X, y, label, sample_weights=None):\n best_feature_idx = 0\n # YOUR CODE HERE\n # Note that you need to implement the sampling feature part here for random forest!\n # Hint: You may find `np.random.choice` is useful for sampling.\n # begin answer\n n_features = X.shape[1]\n if self.sample_feature:\n max_features=max(1, min(n_features, int(np.round(np.sqrt(n_features)))))\n new_features=np.random.choice(n_features, max_features, replace=False)\n new_X=X[:, new_features]\n else:\n new_X=X\n n_new_features=new_X.shape[1]\n #new_features=np.random.choice(n_features, n_features, replace=False)\n #old_cost=self.entropy(y, sample_weights)\n #use C4.5 algorirhm\n best_impurity=None\n best_feature_idx=0\n best_feature_val=X[0, 0]\n for i in range(n_new_features):\n unique_vals=np.unique(X[:,i])\n for value in unique_vals:\n sub1_X, sub1_y, label1, sub1_sample_weights, sub2_X, sub2_y, label2, sub2_sample_weights=self._split_dataset(X, y, label, i, value, sample_weights)\n if len(sub1_y)>0 and len(sub2_y)>0:\n new_impurity=self._impurity(y, sub1_y, sub2_y)\n if best_impurity is None or new_impurity > best_impurity:\n best_impurity=new_impurity\n best_feature_idx=i\n best_feature_val=value \n # end answer\n return best_feature_idx, best_feature_val", "def _forward_best_subset(X, y, nbest=8, beamwidth=40, score=\"bic\"):\n \n assert nbest > 0, \"nbest must be positive\"\n beamwidth = max(beamwidth, nbest)\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def get_bic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().aic\n\n get_score = get_bic if score == \"bic\" else get_aic\n \n features = X.columns\n \n heap = []\n visited = set()\n \n def get_pair(k):\n return get_score(['(Intercept)', *k]), k\n \n k = ()\n heapq.heappush(heap, get_pair(k))\n \n while True:\n modified = False\n min_score = heap[0][0]\n for _, k in heap:\n for f in features:\n if f in k:\n continue\n candidate_features = tuple(sorted([*k, f]))\n if candidate_features in visited:\n continue\n visited.add(candidate_features)\n new_pair = get_pair(candidate_features)\n if new_pair[0] > min_score:\n modified = True\n heapq.heappush(heap, get_pair(candidate_features))\n if len(heap) > beamwidth:\n heapq.heappop(heap)\n min_score = heap[0][0]\n if not modified:\n break\n \n return heapq.nsmallest(nbest, [(-x, ['(Intercept)', *y]) for x, y in heap])", "def score_features(self, features, predictor, cv_fold, verbose=0):\n # First we optimise the hyper parameters:\n # data has 4 keys but only 2 (x_train and y_train) will be used for the optimization\n best_params = optimize_hyper_parameters(features, predictor, cv_fold, verbose)\n predictor.set_hyper_parameters(best_params)\n\n # Then we fit the predictor:\n predictor.fit(features)\n\n # Afterwards, we generate the prediction\n y_pred = predictor.predict(features)\n\n # Finally, we compute the metrics:\n metric_res = score_prediction(features['y_test'], y_pred)\n\n self.predictor = predictor\n\n return metric_res, best_params", "def compute_splits(self, G, nw_name='test', train_frac=0.51, split_alg='spanning_tree', split_id=0, verbose=False):\n # Compute train/test split\n if split_alg == 'random':\n tr_E, te_E = stt.rand_split_train_test(G, train_frac)\n train_E, test_E, G, mp = pp.relabel_nodes(tr_E, te_E, G.is_directed())\n elif split_alg == 'naive':\n train_E, test_E = stt.naive_split_train_test(G, train_frac)\n elif split_alg == 'spanning_tree':\n train_E, test_E = stt.split_train_test(G, train_frac)\n elif split_alg == 'fast':\n train_E, test_E = stt.quick_split(G, train_frac)\n elif split_alg == 'timestamp':\n train_E, test_E, _ = stt.timestamp_split(G, train_frac)\n else:\n raise ValueError('Split alg. {} unknown!'.format(split_alg))\n\n # Make sure the edges are numpy arrays\n train_E = np.array(list(train_E))\n test_E = np.array(list(test_E))\n\n # Get the labels of train and test\n a = nx.adjacency_matrix(G, nodelist=range(len(G.nodes)))\n tr_labels = np.ravel(a[train_E[:, 0], train_E[:, 1]])\n te_labels = np.ravel(a[test_E[:, 0], test_E[:, 1]])\n\n # Split train and test edges in those with positive and negative signs\n pos_tr_e = train_E[np.where(tr_labels == 1)[0], :]\n neg_tr_e = train_E[np.where(tr_labels == -1)[0], :]\n pos_te_e = test_E[np.where(te_labels == 1)[0], :]\n neg_te_e = test_E[np.where(te_labels == -1)[0], :]\n\n # Make a train graph with appropriate weights +1 / -1\n H = G.copy()\n H.remove_edges_from(test_E)\n\n # Set class attributes to new values\n self.set_splits(train_E=pos_tr_e, train_E_false=neg_tr_e, test_E=pos_te_e, test_E_false=neg_te_e,\n directed=G.is_directed(), nw_name=nw_name, TG=H, split_id=split_id,\n split_alg=split_alg, verbose=verbose)\n\n return pos_tr_e, neg_tr_e, pos_te_e, neg_te_e", "def best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tmodel_based_score = 0\n\tscaling_factors = [\"0.25*mean\", \"0.5*mean\", \"median\", \"1.25*mean\", \"1.5*mean\"]\n\t# scaling_factors = [\"0.5*mean\", \"median\"]\n\tmodel_based_selector = None\n\tmodel_based_train_features_selected = None\n\tmodel_based_test_features_selected = None\n\n\tfor factor in scaling_factors:\n\t\tprint(factor)\n\t\ttemp_model_based_selector = SelectFromModel(RandomForestRegressor(n_estimators=100), threshold=factor)\n\t\ttemp_model_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_model_based_train_features_selected = temp_model_based_selector.transform(train_features)\n\t\ttemp_model_based_test_features_selected = temp_model_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_model_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_model_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Model Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > model_based_score:\n\t\t\tmodel_based_score = temp_score\n\t\t\tmodel_based_selector = temp_model_based_selector\n\t\t\tmodel_based_train_features_selected = temp_model_based_train_features_selected\n\t\t\tmodel_based_test_features_selected = temp_model_based_test_features_selected\n\n\tmodel_based_mask = model_based_selector.get_support()\n\tprint(\"This is the model based mask: \")\n\tprint(model_based_mask)\n\n\treturn model_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask", "def get_next_split ( self, feature_matrix: np.ndarray, target_array: np.ndarray, tree_split: TreeSplits):\n # If only 1 y value, make a leaf node\n if len ( set ( target_array ) ) == 1:\n tree_split.updateTreeValues (\n feature_column = None,\n feature_value = None,\n node_type = None,\n nodes = {},\n children = target_array,\n )\n return tree_split\n\n # Get the presplit entropy\n presplit_entropy = self.evaluate_function ( target_array )\n\n column_values = {}\n for k, v in self.map_column_node_type.items():\n # If there's only one value in feature matrix \"X\", set the split value to infinity\n if len ( set ( feature_matrix [ :, k ] ) ) == 1:\n value = np.inf\n split = None\n class_ratios = 1\n elif v == \"continuous\":\n # Get the best possible continuous split for the column\n split, value, class_ratios = self.get_optimal_continuous_feature_split (\n feature_matrix = feature_matrix, target_array = target_array, feature_column = k\n )\n else:\n # Get the split value for the discrete column\n value, class_ratios = self.get_optimal_discrete_feature_split (\n feature_matrix = feature_matrix, target_array = target_array, feature_column = k\n )\n split = None\n\n column_values [ k ] = ( split, value, class_ratios )\n\n # Get the column with the largest gain ratio\n col_idx_with_min_value = max (\n column_values,\n key = lambda x: ( presplit_entropy - column_values.get ( x ) [ 1 ] )\n / column_values.get ( x ) [ 2 ],\n )\n\n # If stopping criteria are met or all splits are infinite, terminate the process\n if (\n self.early_stopping_comparison (\n column_values.get ( col_idx_with_min_value ) [ 1 ], self.early_stopping_value\n )\n ) or not np.isfinite ( column_values.get ( col_idx_with_min_value ) [ 1 ] ):\n self.get_terminal_node (\n feature_column = col_idx_with_min_value,\n feature_value = column_values [ col_idx_with_min_value ] [ 0 ],\n node = tree_split,\n feature_matrix = feature_matrix ,\n target_array = target_array,\n )\n return tree_split\n\n # If the best split is continuous, add a continuous node\n if self.map_column_node_type.get ( col_idx_with_min_value ) == \"continuous\":\n return self.get_continuous_node (\n feature_column = col_idx_with_min_value,\n feature_value = column_values [col_idx_with_min_value ] [ 0 ],\n feature_matrix = feature_matrix,\n target_array = target_array,\n node = tree_split,\n )\n\n # Otherwise, add a discrete node.\n else:\n return self.get_discrete_node (\n feature_matrix = feature_matrix,\n target_array = target_array,\n feature_value = column_values [ col_idx_with_min_value ] [ 0 ],\n feature_column = col_idx_with_min_value,\n node = tree_split,\n )\n # End get_next_split", "def getSplitFunc(self, splitType):\n if splitType.upper() == \"INFO GAIN\":\n return self.findBestColumnSplitByInfoGain\n elif splitType.upper() == \"GAIN RATIO\":\n return self.findBestColumnSplitByGainRatio\n elif splitType.upper() == \"GINI INDEX\":\n return self.findBestColumnSplitByGini\n return None", "def _split_threshold(self, node):\n\n # define the score to improve upon\n if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:\n # split only if min(children scores) > node.score\n force_split = False\n best_score = node.score\n else:\n # force split: just take the best (even if children are worse)\n force_split = True\n best_score = None\n\n left, right = None, None\n\n # iterate over embedding dimensions (first ones are more reliable)\n # up to max_n_vec (included), until we found an improving split\n for _vec in range(self.n_vec):\n\n # get the candidate thresholds along this dimension\n threshs = self._get_candidate_thresholds(node, _vec)\n\n # look for an improving best split along this eigenvector\n for _t in threshs:\n # compute the split\n below_thresh = self.E[node.ids, _vec] < _t\n _lids = node.ids[below_thresh]\n _rids = node.ids[np.logical_not(below_thresh)]\n # check if the tubes are not too small\n _nl, _nr = len(_lids), len(_rids)\n is_valid = _nl >= self.min_leaf_size and _nr >= self.min_leaf_size\n if is_valid:\n # compute the score of the new tubes only\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # get the score of this split\n split_score = min(_sl, _sr)\n if best_score is None or split_score > best_score:\n # better split\n best_score = split_score\n node.has_children = True\n node.thresh = _t\n left = SpectralNode(\n _lids, _vec, score=_sl, name=node.name + \"0\")\n right = SpectralNode(\n _rids, _vec, score=_sr, name=node.name + \"1\")\n\n # check stopping criterion\n if node.has_children:\n # we found an improving split\n if _vec > 0 or not force_split:\n # found an improving non-forced split: stop here\n break\n\n return left, right", "def _bestFeat2split(dataSet, impurity_crit, min_impurity_decrease, min_samples_split):\n\t\tm, n = dataSet.shape\n\t\tbestFeatInd, bestVal = None, DecisionTree._make_leaf(dataSet, impurity_crit)\n\n\t\tif m < min_samples_split or len(set(dataSet[:,-1])) == 1:\n\t\t\treturn bestFeatInd, bestVal\n\n\t\timpurity = m * impurity_crit(dataSet)\n\t\tmin_impurity = np.inf\n\t\t\n\n\t\tfor feat_ind in range(n-1):\n\t\t\tif type(dataSet[:, feat_ind][0]) != str:\n\t\t\t\tuniqVal = set(dataSet[:, feat_ind])\n\t\t\telse:\n\t\t\t\tuniqVal = map(set, subsets(list(dataSet[:, feat_ind])))\n\t\t\tfor val in uniqVal:\n\t\t\t\tD1, D2 = DecisionTree._binarySplit(dataSet, feat_ind, val)\n\t\t\t\tif len(D1) < min_samples_split or len(D2) < min_samples_split:\n\t\t\t\t\tcontinue\n\t\t\t\tnew_impurity = len(D1)*impurity_crit(D1) + len(D2)*impurity_crit(D2)\n\t\t\t\tif impurity - new_impurity < min_impurity_decrease:\n\t\t\t\t\tcontinue\n\t\t\t\tif new_impurity < min_impurity:\n\t\t\t\t\tmin_impurity = new_impurity\n\t\t\t\t\tbestFeatInd = feat_ind; bestVal = val\n\t\treturn bestFeatInd, bestVal", "def best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\treturn percentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask", "def __init__(self,\r\n max_depth=None,\r\n min_samples_split=2,\r\n min_samples_leaf=1,\r\n split_criterion=None,\r\n feature_selection=None,\r\n feature_prob=None,\r\n min_gain_split=0,\r\n split_chooser=None):\r\n self._n_classes = None\r\n self._max_depth = None\r\n self._split_criterion = None\r\n self._split_chooser = None\r\n self._feature_selection = None\r\n self._min_samples_split = None\r\n self._min_samples_leaf = None\r\n self._min_gain_split = None\r\n self._feature_prob = None\r\n\r\n if max_depth is None or max_depth > 0:\r\n self._max_depth = max_depth\r\n else:\r\n raise(ValueError(\"The depth of the tree must be greater than 0.\"))\r\n\r\n if split_criterion is not None:\r\n self._split_criterion = split_criterion\r\n else:\r\n raise (ValueError(\"The split criterion can not be None.\"))\r\n\r\n if split_chooser is not None:\r\n self._split_chooser = split_chooser\r\n else:\r\n raise (ValueError(\"The split chooser can not be None.\"))\r\n\r\n if feature_selection is not None:\r\n self._feature_selection = feature_selection\r\n else:\r\n raise (ValueError(\"The feature selection can not be None.\"))\r\n\r\n if min_samples_split is not None and min_samples_split > 1:\r\n self._min_samples_split = min_samples_split\r\n else:\r\n raise(ValueError(\"The min_samples_split must be greater than 1.\"))\r\n\r\n if min_samples_leaf is not None and min_samples_leaf > 0:\r\n self._min_samples_leaf = min_samples_leaf\r\n else:\r\n raise(ValueError(\"The min_samples_leaf must be greater than 0.\"))\r\n\r\n if min_gain_split is not None and min_gain_split >= 0:\r\n self._min_gain_split = min_gain_split\r\n else:\r\n raise(ValueError(\"The min_gain_split must be greater or equal than 0.\"))\r\n\r\n if feature_prob is not None:\r\n self._feature_prob = feature_prob", "def test_train_split_per_value():\n shape = (1000, 1000, 3)\n\n input1 = np.random.randint(10, size=shape, dtype=int)\n input2 = np.random.randint(10, size=shape, dtype=int)\n\n patch1 = EOPatch()\n patch1[INPUT_MASK_FEATURE] = input1\n\n patch2 = EOPatch()\n patch2[INPUT_MASK_FEATURE] = input2\n\n bins = [0.2, 0.6]\n\n split_task = TrainTestSplitTask((*INPUT_MASK_FEATURE, NEW_FEATURE_NAME), bins, split_type='per_value')\n\n # seeds should get ignored when splitting 'per_value'\n patch1 = split_task(patch1, seed=1)\n patch2 = split_task(patch2, seed=1)\n\n otuput1 = patch1[NEW_MASK_FEATURE]\n otuput2 = patch2[NEW_MASK_FEATURE]\n\n unique = set(np.unique(input1)) | set(np.unique(input2))\n\n for uniq in unique:\n folds1 = otuput1[input1 == uniq]\n folds2 = otuput2[input2 == uniq]\n assert_array_equal(np.unique(folds1), np.unique(folds2))", "def choose_split(data,treshold):\n n_features = len(data[0]) - 1 # number of columns\n quest_gain = [] # keep track of the gains and questions\n\n for col in range(1,n_features): # for each feature\n values = set([row[col] for row in data]) # unique values in the column\n for val in values: # for each value\n question = Question(col, val)\n \n # try splitting the dataset\n true_rows, false_rows = partition(data, question)\n\n # Skip this split if it doesn't divide the dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(data, true_rows, false_rows)\n quest_gain.append(Question_gain(gain,question))\n\n possible_question = [] # possible questions to ask\n n_quest_gain = len(quest_gain)\n\n if n_quest_gain == 0:\n return float('Inf'), float('NaN') #\n\n for x in range(n_quest_gain):\n if (quest_gain[x].gain >= treshold):\n possible_question.append(Question_gain(quest_gain[x].gain,quest_gain[x].question))\n \n n_possible_question = len(possible_question)\n if n_possible_question == 0:\n return float('Inf'), float('NaN')\n\n if n_possible_question>=2:\n [i, j] = random.sample(range(0, n_possible_question), 2)\n else:\n i = j = random.randint(0,n_possible_question-1)\n\n if possible_question[i].gain>=possible_question[j].gain:\n return possible_question[i].gain, possible_question[i].question\n else:\n return possible_question[j].gain, possible_question[j].question", "def best_split(self, X, y, attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of information gain/gini gain seen so far\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = information_gain(y,attr_val,self.type)\n if (cur_if>global_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr\n else:\n global_if = float('inf')\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = gini_gain(y,attr_val)\n if (global_if>cur_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr", "def computeSoftwareMLModels(df,data_label,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,group_col,model_type,ml_model,rank_features=False,compute_null=False,n_splits=10,n_repeats=10,n_jobs=1):\n software_list = df[data_label].unique()\n print('Running ML classifer on {} {}'.format(len(software_list),data_label))\n scores_concat_df = pd.DataFrame()\n feature_rank_concat_df = pd.DataFrame()\n external_scores_concat_df = pd.DataFrame()\n\n perf_pval_dict = {}\n for pipe in software_list:\n ml_df = df[df[data_label]==pipe]\n print('{} {}'.format(data_label, pipe))\n\n #cross_val_score\n scores_df, null_df, pvalue, feature_rank_df = getMLModelPerf(ml_df,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,model_type,ml_model,rank_features,compute_null,n_splits,n_repeats,n_jobs) \n scores_df[data_label] = np.tile(pipe,len(scores_df))\n scores_concat_df = scores_concat_df.append(scores_df)\n \n if compute_null:\n null_df[data_label] = np.tile('null',len(null_df))\n scores_concat_df = scores_concat_df.append(null_df)\n perf_pval_dict[pipe] = pvalue\n\n # RFECV\n if rank_features:\n feature_rank_df[data_label] = np.tile(pipe,len(feature_rank_df))\n feature_rank_concat_df = feature_rank_concat_df.append(feature_rank_df)\n\n # explicit CV for internal vs external perfomance\n if group_col:\n external_scores_df = getIndependentTestSetPerf(ml_df,roi_cols,covar_continuous_cols,covar_cat_cols,outcome_col,group_col,model_type,ml_model)\n external_scores_df[data_label] = np.tile(pipe,len(external_scores_df))\n external_scores_concat_df = external_scores_concat_df.append(external_scores_df) \n\n return scores_concat_df, perf_pval_dict, feature_rank_concat_df, external_scores_concat_df", "def sub_select_features(features, strategy):\n\n def extract_one_index(y_val):\n index_ones = []\n y_prev = 0\n start_stop = []\n if y_val[-1] == 1:\n y_val = y_val.tolist() + [0]\n for i, y in enumerate(y_val):\n if y_prev == 0 and y == 1:\n start_stop = [i]\n if y_prev == 1 and y == 0:\n start_stop.append(i)\n index_ones.append(start_stop)\n y_prev = y\n return index_ones\n\n def wrapper(start_stop, maxi):\n size = start_stop[1] - start_stop[0]\n bound = (size+1)//2\n return [max(0, start_stop[0]-bound), min(maxi, start_stop[1]+bound)]\n\n def deduce_index_to_keep(one_index, maxi):\n wrapped = [wrapper(start_stop, maxi) for start_stop in one_index]\n to_keep = [idx for idx in range(wrapped[0][0], wrapped[0][1])]\n for start_stop in wrapped[1:]:\n to_keep += [idx for idx in range(start_stop[0], start_stop[1]) if idx > to_keep[-1]]\n return to_keep\n\n if strategy == 0:\n new_features = features # We do nothing\n\n else:\n new_features = dict()\n for which in ['train', 'test']:\n one_id = extract_one_index(features['y_'+which])\n true_idx = deduce_index_to_keep(one_id, len(features['y_'+which]))\n try:\n new_features['x_'+which] = features['x_'+which][true_idx]\n new_features['y_'+which] = features['y_'+which][true_idx]\n except IndexError as e:\n print(which)\n print(features['x_'+which].shape)\n print(features['y_'+which].shape)\n print(one_id)\n raise e\n\n return new_features", "def fit(self, train_data, train_labels, val_data, val_labels):\r\n split = np.append(-np.ones(train_labels.shape, dtype=np.float32),\r\n np.zeros(val_labels.shape, dtype=np.float32))\r\n ps = PredefinedSplit(split)\r\n\r\n sh = train_data.shape\r\n train_data = np.append(train_data, val_data , axis=0)\r\n train_labels = np.append(train_labels , val_labels, axis=0)\r\n del val_data, val_labels\r\n \r\n model = RandomForestClassifier(n_jobs=self.n_jobs,\r\n **self.scikit_args) \r\n \r\n params = {'n_estimators':np.arange(1,1001,50)} \r\n #Coarse search \r\n gs = GridSearchCV(model, params, refit=False, n_jobs=self.n_jobs, \r\n verbose=self.verbose, cv=ps)\r\n gs.fit(train_data, train_labels)\r\n \r\n #Fine-Tune Search\r\n params = {'n_estimators':np.arange(gs.best_params_['n_estimators']-50,\r\n gs.best_params_['n_estimators']+50)} \r\n \r\n self.gs = GridSearchCV(model, params, refit=self.refit, n_jobs=self.n_jobs, \r\n verbose=self.verbose, cv=ps)\r\n self.gs.fit(train_data, train_labels)\r\n \r\n if not self.refit:\r\n model.set_params(n_estimators=gs.best_params_['n_estimators'])\r\n self.gs = model\r\n self.gs.fit(train_data[:sh[0]], train_labels[:sh[0]])", "def make_splits(input_pkl, test_split=0.1, val_split=0.1):\n if (test_split > 1) or (val_split > 1) or (test_split + val_split > 1) or (test_split <= 0) or (val_split <= 0):\n logging.warning('Check the input for make splits, quitting')\n exit()\n\n main_dict = load_pickle(input_pkl)\n data, labels = main_dict['data'], main_dict['labels']\n idx_arr = np.random.choice(len(data), len(data))\n data, labels = data[idx_arr], labels[idx_arr]\n print(len(data[0][-1]))\n # Find the split sizes\n val_split = int(len(data) * val_split)\n test_split = val_split + int(len(data) * test_split)\n\n # Make and save the splits\n save_pickle({'data': data[:val_split], 'labels': labels[:val_split]}, 'data/val.pkl')\n save_pickle({'data': data[val_split:test_split], 'labels': labels[val_split:test_split]}, 'data/test.pkl')\n save_pickle({'data': data[test_split:], 'labels': labels[test_split:]}, 'data/train.pkl')", "def split(self, X, y, feature_array):\n n, p = X.shape\n\n best_gain = 0\n best_split_point = 0\n best_feature_id = -1\n for feature_id in feature_array:\n cur_gain, cur_split_point = self.find_best_split(\n X[:, feature_id], y)\n if cur_gain > best_gain - self.eps:\n best_gain = cur_gain\n best_split_point = cur_split_point\n best_feature_id = feature_id\n\n assert(best_feature_id != -1)\n\n x = X[:, best_feature_id]\n left_index = x < best_split_point\n right_index = x >= best_split_point\n\n self.split_id = best_feature_id\n self.split_val = best_split_point\n\n return (left_index, right_index)", "def fit(self, data, targets):\n # update these three\n self.idx = 0\n self.val = None\n self.left = None\n self.right = None\n ### YOUR CODE HERE\n # i have added a slow and a fast version\n \n num_points, num_features = data.shape\n # print('num points, num_features', num_points, num_features)\n \n def feat_score(feat_idx):\n feat = data[:, feat_idx].copy()\n perm = np.argsort(feat)\n s_feat = feat[perm]\n s_targets = targets[perm]\n target_var = ((s_targets - s_targets.mean())**2).sum()\n s_left, s_right = sum_squares(s_targets)\n def score(idx, _vals):\n ## slow version\n #left = _vals[0:idx]\n #right = _vals[idx:]\n #assert len(left) + len(right) == len(_vals), (len(left), len(right), len(_vals))\n #left_mean = np.mean(left)\n #right_mean = np.mean(right)\n #left_error = np.sum((left-left_mean)**2)\n #assert np.allclose(left_error, s_left[idx]) \n #right_error = np.sum((right-right_mean)**2)\n #assert np.allclose(right_error, s_right[idx])\n # return left_error+right_error\n # fast version\n return s_left[idx] + s_right[idx]\n # score for every split\n scores = np.array([score(x, s_targets) for x in range(0, num_points)])\n assert scores.min() <= target_var, target_var\n best_score_idx = np.argmin(scores)\n best_score = scores[best_score_idx]\n val = s_feat[best_score_idx]\n # print('best score', feat_idx, best_score, best_score_idx, val, s_feat[best_score_idx+1])\n \n return best_score, {'val': val, \n 'left': np.mean(s_targets[:best_score_idx]), \n 'right': np.mean(s_targets[best_score_idx:])\n } \n\n split_scores = []\n for f in range(0, num_features):\n total_score, _params = feat_score(f)\n split_scores.append(total_score)\n # print('score of {0} - {1}'.format(feat_names[f], total_score))\n # print('feature scores:', np.array(split_scores))\n best_feat = np.argmin(split_scores)\n best_score = split_scores[best_feat]\n # print('Best Feature idx: {0} - Best Cost: {1}'.format(best_feat, best_score))\n score_again, params = feat_score(best_feat)\n # print('double check score', score_again, best_score)\n self.idx = best_feat\n self.val = params['val']\n self.left = params['left']\n self.right = params['right']\n print(\"idx={}, val={}, left={}, right={}\".format(self.idx, self.val, self.left, self.right))\n assert not np.isnan(self.left)\n assert not np.isnan(self.right)\n ### END CODE", "def convert_examples_to_features(self, examples_paths, label_list, max_seq_length, tokenizer, set_type):\n \n if all([os.path.exists(path.replace('examples', 'features')) for path in examples_paths]):\n features_paths = examples_paths\n \n else:\n\n def f(example):\n labels_ids = torch.FloatTensor(example.label).unsqueeze(0).to(torch.int64)\n input_ids = torch.FloatTensor(example.text_a).unsqueeze(0).to(torch.int64)\n #attention_mask = torch.ones(input_ids.size()).to(torch.int64)\n attention_mask = torch.FloatTensor(example.text_b).unsqueeze(0).to(torch.int64)\n token_type_ids = torch.zeros(input_ids.size()).to(torch.int64)\n output_mask = (labels_ids != -100)\n return InputFeatures(input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n label_ids=labels_ids,\n output_mask=output_mask)\n\n for index_split, examples_split in enumerate(examples_paths):\n split = self.load_object(examples_split)\n print(f\"Computing split {index_split+1} / {self.n_splits}... Split size: {len(split)}\")\n features = Parallel(n_jobs=-1)(delayed(f)(example) for example in tqdm(split))\n self.save_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_features_split-{index_split}.pkl'), features)\n\n features_paths = [os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_features_split-{index_split}.pkl') for index_split in range(self.n_splits)]\n \n return features_paths", "def best_split(self):\r\n best_splits = [[0, None, None]]\r\n impurity, best_S, best_xj = 0, None, None\r\n \r\n for xj in self.x_names:\r\n for S in self.potential_splits(xj):\r\n ir = float(self.impurity_reduction(xj, S))\r\n if ir > impurity:\r\n impurity, best_S, best_xj = ir, S, xj\r\n best_splits.append([S, xj])\r\n else: \r\n pass\r\n \r\n return best_S, best_xj", "def _best_split(cls, X, y):\n n = X.shape[0]\n num_feature = X.shape[1]\n y_types = np.unique(y)\n\n # initialize\n min_score = float(n)\n feature_idx = None\n best_theta = None\n best_idx = None\n\n for feature_idx in xrange(num_feature):\n # counter for y\n cumulate_y = Counter()\n rest_y = Counter()\n for y_type in y_types:\n cnt = np.where(y == y_type)[0].shape[0]\n rest_y[y_type] = cnt\n\n # sorted data\n sorted_idx = np.argsort(X[:, feature_idx])\n sorted_X = np.copy(X)\n sorted_y = np.copy(y)\n sorted_X = sorted_X[sorted_idx]\n sorted_y = sorted_y[sorted_idx]\n #print \"_best_split:\", sorted_X.shape, sorted_y.shape\n\n for idx in xrange(n-1):\n theta = (sorted_X[idx, feature_idx] + sorted_X[idx + 1, feature_idx]) / 2\n y_label = sorted_y[idx]\n cumulate_y[y_label] += 1\n rest_y[y_label] -= 1\n left_cnt = sum(cumulate_y.values())\n right_cnt = sum(rest_y.values())\n w_1 = left_cnt * cls._gini_index(cumulate_y.values())\n w_2 = right_cnt * cls._gini_index(rest_y.values())\n score = w_1 + w_2\n if score < min_score:\n min_score = score\n best_theta = theta\n best_idx = feature_idx\n #print('new min score: %.3f' % score)\n #print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n #print('left: %d, right: %d' % (left_cnt, right_cnt))\n print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n return (best_idx, best_theta)", "def _best_split(cls, X, y):\n n = X.shape[0]\n num_feature = X.shape[1]\n y_types = np.unique(y)\n\n # initialize\n min_score = float(n)\n feature_idx = None\n best_theta = None\n best_idx = None\n\n for feature_idx in xrange(num_feature):\n # counter for y\n cumulate_y = Counter()\n rest_y = Counter()\n for y_type in y_types:\n cnt = np.where(y == y_type)[0].shape[0]\n rest_y[y_type] = cnt\n\n # sorted data\n sorted_idx = np.argsort(X[:, feature_idx])\n sorted_X = np.copy(X)\n sorted_y = np.copy(y)\n sorted_X = sorted_X[sorted_idx]\n sorted_y = sorted_y[sorted_idx]\n #print \"_best_split:\", sorted_X.shape, sorted_y.shape\n\n for idx in xrange(n-1):\n theta = (sorted_X[idx, feature_idx] + sorted_X[idx + 1, feature_idx]) / 2\n y_label = sorted_y[idx]\n cumulate_y[y_label] += 1\n rest_y[y_label] -= 1\n left_cnt = sum(cumulate_y.values())\n right_cnt = sum(rest_y.values())\n w_1 = left_cnt * cls._gini_index(cumulate_y.values())\n w_2 = right_cnt * cls._gini_index(rest_y.values())\n score = w_1 + w_2\n if score < min_score:\n min_score = score\n best_theta = theta\n best_idx = feature_idx\n #print('new min score: %.3f' % score)\n #print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n #print('left: %d, right: %d' % (left_cnt, right_cnt))\n print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n return (best_idx, best_theta)", "def __init__(self, max_features, min_samples_leaf, min_samples_split=2, \n max_depth=None, n_estimators=100, split='logrank',\n split_threshold_mode='exhaustive', random_state=47,\n n_jobs=9, oob_score=True, feature_importance=True):\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = int(min_samples_leaf)\n self.max_features = int(max_features)\n self.split_threshold_mode = split_threshold_mode\n self.n_jobs = n_jobs\n self.oob_score = oob_score\n self.feature_importance = feature_importance\n self.column_names = None\n self.oob_score_ = None\n self.feature_importances_ = None\n\n if random_state is None:\n self.random_state = np.random.RandomState()\n elif type(random_state) == int:\n self.random_state = np.random.RandomState(random_state)\n else:\n self.random_state = random_state\n\n if split == 'logrank':\n self.split_score_function = logrank\n else:\n raise NotImplementedError('Unsupported split criterion '\n + '\"{0}\"'.format(split))\n\n # # name of the time column used within the class. this can be anything\n # self._time_column = \"time\"\n # # name of the event column used within the class. this can be anything\n # self._event_column = \"event\"", "def get_split(self,X,y):\n \n BEST_COL = 0\n BEST_SPLIT =0\n BEST_IMPUR = 99\n for i,feature in enumerate(X.T):\n arg_sort=np.argsort(feature) #Sort the feature for optimizing the find of splitting points\n feature= feature[arg_sort]\n y_sort = y[arg_sort]\n splits = self.possible_splits(feature,y_sort) #Get \n\n impur,splits = self.test_split(feature,y_sort,splits) #Get impurity for splitting points\n best_idx = np.argmin(impur)\n best_impur = impur[best_idx]\n \n if best_impur==0.0: #Found perfect split, terminate\n return(i,splits[best_idx])\n elif best_impur<BEST_IMPUR:\n BEST_IMPUR=best_impur\n BEST_SPLIT=splits[best_idx]\n BEST_COL=i\n return (BEST_COL,BEST_SPLIT)", "def get_coreset(self, pool_features, labelled_features):\n new_batch = []\n self.update_distances(pool_features, labelled_features, reset_dist=True)\n for _ in range(self.num_query):\n # choose furthest point\n ind = np.argmax(self.min_distances)\n # New examples should not be in already selected since those points\n # should have min_distance of zero to a cluster center.\n assert ind not in new_batch\n # update distances with this point\n self.update_distances(pool_features, pool_features[ind, :].reshape(1,-1), reset_dist=False)\n new_batch.append(ind)\n print(f\"Maximum distance from cluster centers is {max(self.min_distances)}\")\n\n return new_batch", "def split(features, groundtruths, n_split):\n\n if n_split == 1:\n return features, groundtruths\n\n tags = list(set(groundtruths))\n new_index = {}\n for tag in tags:\n new_index[tag] = []\n for index, gt in enumerate(groundtruths):\n new_index[gt].append(index)\n new_feats = []\n new_gts = []\n for i in range(0, n_split):\n indexes = []\n for tag in tags:\n ref = len(new_index[tag])/n_split\n indexes.append(new_index[tag][ref*i:ref*(i+1)])\n \"\"\"\n ..todo:: manage multiple tags!\n \"\"\"\n indexes = indexes[0] + indexes[1]\n # print(features[:5])\n # print(len(indexes))\n # print(len(indexes[0]))\n # print(len(indexes[1]))\n # sys.exit()\n indexes.sort()\n new_gts.append([groundtruths[j] for j in indexes])\n new_feats.append([features[j] for j in indexes])\n return new_feats, new_gts", "def get_min_across_splits_continuous (\n feature_array: np.ndarray, target_array: np.ndarray, splits: np.ndarray, evaluate_function: Callable\n ):\n n = len ( splits )\n if n > 500:\n # If many split points, use some threading\n with multiprocessing.Pool ( processes = 8 ) as p:\n # Get evaluation scores across all the splits\n post_split_evals = dict (\n zip (\n range ( len ( splits ) ),\n p.starmap (\n BaseTree.get_split_goodness_fit_continuous,\n zip ( [ feature_array] * n, [ target_array ] * n, splits, [ evaluate_function ] * n ),\n ),\n )\n )\n p.close()\n else:\n # If not too many split points, get scores across all splits\n post_split_evals = dict (\n zip (\n range ( len ( splits ) ),\n map (\n lambda x: BaseTree.get_split_goodness_fit_continuous ( * x ),\n zip ( [ feature_array ] * n, [ target_array ] * n, splits, [ evaluate_function ] * n ),\n ),\n )\n )\n # Get the minimum split based on gain ratio\n min_eval = min (\n post_split_evals,\n key = lambda x: pipe (\n post_split_evals.get ( x ),\n lambda results: results [ 0 ] / results [ 1 ], # entropy / intrinsic value\n ),\n )\n\n # Return the best split and the splits scores\n return ( splits [ min_eval ], * post_split_evals.get ( min_eval ) )\n # End get_min_across_splits_continuous()", "def best_cat_brute_force_split(self, ind, dep):\n split = Split(None, None, None, None, 0)\n all_dep = np.unique(dep.arr)\n for i, ind_var in enumerate(ind):\n ind_var = ind_var.deep_copy()\n unique = np.unique(ind_var.arr)\n\n freq = {}\n if dep.weights is None:\n for col in unique:\n counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)\n freq[col] = cl.defaultdict(int)\n freq[col].update(np.transpose(counts))\n else:\n for col in unique:\n counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)\n freq[col] = cl.defaultdict(int)\n for dep_v in all_dep:\n freq[col][dep_v] = dep.weights[(ind_var.arr == col) * (dep.arr == dep_v)].sum()\n\n if len(list(ind_var.possible_groupings())) == 0:\n split.invalid_reason = InvalidSplitReason.PURE_NODE\n\n choice, highest_p_join, split_chi, dof = None, None, None, None\n for comb in ind_var.all_combinations():\n freqs = [ sum( [ cl.Counter(freq[key]) for key in c ], cl.Counter()) for c in comb ]\n\n if sum([ (sum(x.values()) < self.min_child_node_size) for x in freqs ] ) > 0:\n continue\n keys = set(sum([ list(f.keys()) for f in freqs ], []))\n\n n_ij = np.array(\n [ [ col.get(k, 0) for k in keys ] for col in freqs ]\n )\n\n chi, p_split, dof = chisquare(n_ij, dep.weights is not None)\n\n if (choice is None or p_split < highest_p_join or (p_split == highest_p_join and chi > split_chi)) and p_split < self.alpha_merge:\n choice, highest_p_join, split_chi = comb, p_split, chi\n\n temp_split = Split(i, choice, split_chi, highest_p_join, dof, split_name=ind_var.name)\n better_split = (not split.valid() or p_split < split.p or (p_split == split.p and chi > split.score)) and choice is not None\n if better_split: split, temp_split = temp_split, split\n\n if split.valid() and choice is not None:\n chi_threshold = self.split_threshold * split.score\n\n if temp_split.valid() and temp_split.score >= chi_threshold:\n for sur in temp_split.surrogates:\n if sur.column_id != i and sur.score >= chi_threshold:\n split.surrogates.append(sur)\n\n temp_split.surrogates = []\n split.surrogates.append(temp_split)\n\n split.sub_split_values(ind[split.column_id].metadata)\n\n return split", "def detect_splits(self):\n logg.info(' abstracted graph will have {} nodes'.format(self.n_splits+1))\n indices_all = np.arange(self.X.shape[0], dtype=int)\n segs = [indices_all]\n if False: # this is safe, but not compatible with on-the-fly computation\n tips_all = np.array(np.unravel_index(np.argmax(self.Dchosen), self.Dchosen.shape))\n else:\n if self.iroot is not None:\n tip_0 = np.argmax(self.Dchosen[self.iroot])\n else:\n tip_0 = np.argmax(self.Dchosen[0]) # just a random index, here fixed to \"0\"\n tips_all = np.array([tip_0, np.argmax(self.Dchosen[tip_0])])\n # we keep a list of the tips of each segment\n segs_tips = [tips_all]\n if self.clusters_precomputed_names:\n self.segs_names_original = [', '.join(self.clusters_precomputed_names)]\n segs_undecided = [True]\n segs_adjacency = [[]]\n segs_distances = np.zeros((1, 1))\n segs_adjacency_nodes = [{}]\n # logg.info(' do not consider groups with less than {} points for splitting'\n # .format(self.min_group_size))\n for ibranch in range(self.n_splits):\n if self.clusters == 'unconstrained_segments':\n iseg, new_tips = self.select_segment(segs, segs_tips, segs_undecided)\n if iseg == -1:\n logg.info('... partitioning converged')\n break\n logg.info('... branching {}:'.format(ibranch + 1),\n 'split group', iseg)\n segs_distances = self.do_split(segs, segs_tips,\n segs_undecided,\n segs_adjacency,\n segs_distances,\n iseg, new_tips)\n else:\n logg.msg(' split', ibranch + 1, v=4)\n stop, segs_distances = self.do_split_constrained(segs, segs_tips,\n segs_adjacency,\n segs_adjacency_nodes,\n segs_distances)\n if stop: break\n\n # segments\n self.segs = segs\n self.segs_tips = segs_tips\n self.segs_sizes = []\n for iseg, seg in enumerate(self.segs): self.segs_sizes.append(len(seg))\n\n # the full, unscaled adjacency matrix\n self.segs_adjacency_full_attachedness = 1/segs_distances\n # if self.attachedness_measure == 'connectedness':\n # norm = np.sqrt(np.multiply.outer(self.segs_sizes, self.segs_sizes))\n # self.segs_adjacency_full_attachedness /= norm\n self.segs_adjacency_full_confidence, self.segs_adjacency_tree_confidence \\\n = self.compute_adjacency_confidence(\n self.segs_adjacency_full_attachedness,\n segs_adjacency,\n self.tree_based_confidence)\n np.fill_diagonal(self.segs_adjacency_full_attachedness, 0)", "def _score_fn(self, unused_context_features, group_features, mode, unused_params, unused_config):\n with tf.compat.v1.name_scope(\"input_layer\"):\n group_input = [\n tf.compat.v1.layers.flatten(group_features[name])\n for name in sorted(self.example_feature_columns())\n ]\n\n # if self.sparse_features:\n # self.sparse_emb_inputlist = [\n # tf.compat.v1.layers.flatten(group_features[name])\n # for name in self.sparse_features\n # ]\n\n self.group_input = group_input\n input_layer = tf.concat(self.group_input, 1)\n tf.compat.v1.summary.scalar(\"input_sparsity\",\n tf.nn.zero_fraction(input_layer))\n tf.compat.v1.summary.scalar(\"input_max\",\n tf.reduce_max(input_tensor=input_layer))\n tf.compat.v1.summary.scalar(\"input_min\",\n tf.reduce_min(input_tensor=input_layer))\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n cur_layer = tf.compat.v1.layers.batch_normalization(\n input_layer, training=is_training)\n for i, layer_width in enumerate(int(d) for d in self.hidden_layer_dims):\n cur_layer = tf.compat.v1.layers.dense(cur_layer, units=layer_width)\n cur_layer = tf.compat.v1.layers.batch_normalization(\n cur_layer, training=is_training)\n cur_layer = tf.nn.relu(cur_layer)\n tf.compat.v1.summary.scalar(\"fully_connected_{}_sparsity\".format(i),\n tf.nn.zero_fraction(cur_layer))\n\n cur_layer = tf.compat.v1.layers.dropout(\n cur_layer, rate=self.dropout_rate, training=is_training)\n logits = tf.compat.v1.layers.dense(cur_layer, units=self.group_size)\n self.logits = logits\n\n if self._use_multi_head():\n # Duplicate the logits for both heads.\n return {_PRIMARY_HEAD: logits, _SECONDARY_HEAD: logits}\n else:\n return logits", "def choose_split_value(attrs, classes):\n indices = np.argsort(attrs)\n classes = classes[indices]\n attrs = attrs[indices]\n max_gain = 0.0\n max_gain_value = None\n for i in range(len(attrs) - 1):\n if classes[i] != classes[i+1]:\n mean = (attrs[i] + attrs[i+1]) / 2.0\n gain = inform_gain(attrs, classes, mean)\n if gain > max_gain:\n max_gain = gain\n max_gain_value = mean\n return max_gain_value, max_gain", "def find_best_split(self, x, y):\n\n # check cornor case: all same x\n n = y.size\n\n if all(x == x[0]):\n return (0, amin(x) - self.eps)\n\n sort_index = argsort(x)\n x_sorted = x[sort_index]\n y_sorted = y[sort_index]\n\n # build potential split index array\n split_index_array = array([i for i in range(1, n)\n if x_sorted[i] != x_sorted[i - 1]\n and y_sorted[i] != y_sorted[i - 1]])\n\n # split_index_array = linspace(\n # 0, y.size, num=min(5, ceil(n / 5)), endpoint=False, dtype='int')\n # split_index_array = split_index_array[1:]\n\n best_split_index = 0\n best_gain = 0\n h_x = self.cur_entropy\n\n for split_index in split_index_array:\n left_entropy = self.entropy(y_sorted[:split_index])\n right_entropy = self.entropy(y_sorted[split_index:])\n h_xy = (split_index * left_entropy +\n (n - split_index) * right_entropy) / n\n cur_gain = h_x - h_xy\n\n if cur_gain > best_gain:\n best_gain = cur_gain\n best_split_index = split_index\n\n if best_split_index != 0:\n best_split_point = (x_sorted[best_split_index] +\n x_sorted[best_split_index - 1]) / 2\n else:\n best_split_point = x_sorted[best_split_index] - self.eps\n\n return (best_gain, best_split_point)", "def model(features, test_features, encoding='ohe', n_folds=5):\n\n # Extract the ids\n train_ids = features['SK_ID_CURR']\n test_ids = test_features['SK_ID_CURR']\n\n # Extract the labels for training\n labels = features['TARGET']\n\n # Remove the ids and target\n features = features.drop(columns=['SK_ID_CURR', 'TARGET'])\n test_features = test_features.drop(columns=['SK_ID_CURR'])\n\n # One Hot Encoding\n if encoding == 'ohe':\n features = pd.get_dummies(features)\n test_features = pd.get_dummies(test_features)\n\n # Align the dataframes by the columns\n features, test_features = features.align(test_features, join='inner', axis=1)\n\n # No categorical indices to record\n cat_indices = 'auto'\n\n # Integer label encoding\n elif encoding == 'le':\n\n # Create a label encoder\n label_encoder = LabelEncoder()\n\n # List for storing categorical indices\n cat_indices = []\n\n # Iterate through each column\n for i, col in enumerate(features):\n if features[col].dtype == 'object':\n # Map the categorical features to integers\n features[col] = label_encoder.fit_transform(np.array(features[col].astype(str)).reshape((-1,)))\n test_features[col] = label_encoder.transform(np.array(test_features[col].astype(str)).reshape((-1,)))\n\n # Record the categorical indices\n cat_indices.append(i)\n\n # Catch error if label encoding scheme is not valid\n else:\n raise ValueError(\"Encoding must be either 'ohe' or 'le'\")\n\n print('Training Data Shape: ', features.shape)\n print('Testing Data Shape: ', test_features.shape)\n\n # Extract feature names\n feature_names = list(features.columns)\n\n # Convert to np arrays\n features = np.array(features)\n test_features = np.array(test_features)\n\n # Create the kfold object\n k_fold = KFold(n_splits=n_folds, shuffle=True, random_state=50)\n\n # Empty array for feature importances\n feature_importance_values = np.zeros(len(feature_names))\n\n # Empty array for test predictions\n test_predictions = np.zeros(test_features.shape[0])\n\n # Empty array for out of fold validation predictions\n out_of_fold = np.zeros(features.shape[0])\n\n # Lists for recording validation and training scores\n valid_scores = []\n train_scores = []\n\n # Iterate through each fold\n for train_indices, valid_indices in k_fold.split(features):\n # Training data for the fold\n train_features, train_labels = features[train_indices], labels[train_indices]\n # Validation data for the fold\n valid_features, valid_labels = features[valid_indices], labels[valid_indices]\n\n # Create the model\n model = lgb.LGBMClassifier(n_estimators=10000, objective='binary',\n class_weight='balanced', learning_rate=0.05,\n reg_alpha=0.1, reg_lambda=0.1,\n subsample=0.8, n_jobs=-1, random_state=50)\n\n # Train the model\n model.fit(train_features, train_labels, eval_metric='auc',\n eval_set=[(valid_features, valid_labels), (train_features, train_labels)],\n eval_names=['valid', 'train'], categorical_feature=cat_indices,\n early_stopping_rounds=100, verbose=200)\n\n # Record the best iteration\n best_iteration = model.best_iteration_\n\n # Record the feature importances\n feature_importance_values += model.feature_importances_ / k_fold.n_splits\n\n # Make predictions\n test_predictions += model.predict_proba(test_features, num_iteration=best_iteration)[:, 1] / k_fold.n_splits\n\n # Record the out of fold predictions\n out_of_fold[valid_indices] = model.predict_proba(valid_features, num_iteration=best_iteration)[:, 1]\n\n # Record the best score\n valid_score = model.best_score_['valid']['auc']\n train_score = model.best_score_['train']['auc']\n\n valid_scores.append(valid_score)\n train_scores.append(train_score)\n\n # Clean up memory\n gc.enable()\n del model, train_features, valid_features\n gc.collect()\n\n # Make the submission dataframe\n submission = pd.DataFrame({'SK_ID_CURR': test_ids, 'TARGET': test_predictions})\n\n # Make the feature importance dataframe\n feature_importances = pd.DataFrame({'feature': feature_names, 'importance': feature_importance_values})\n\n # Overall validation score\n valid_auc = roc_auc_score(labels, out_of_fold)\n\n # Add the overall scores to the metrics\n valid_scores.append(valid_auc)\n train_scores.append(np.mean(train_scores))\n\n # Needed for creating dataframe of validation scores\n fold_names = list(range(n_folds))\n fold_names.append('overall')\n\n # Dataframe of validation scores\n metrics = pd.DataFrame({'fold': fold_names,\n 'train': train_scores,\n 'valid': valid_scores})\n\n return submission, feature_importances, metrics", "def get_samples(\n self,\n num_samples=0,\n idx=None,\n split='val',\n as_list=True,\n deterministic=False,\n as_tuple=False,\n simple_IDs=False):\n assert(idx is not None or num_samples > 0)\n\n if split == 'train':\n assert(self.mode in ['train_noval', 'train_with_val'])\n if idx is None:\n if deterministic:\n idx = self._trn_idx[0:num_samples]\n else:\n idx = np.random.choice(self._trn_idx, size=num_samples, replace=False)\n\n images, labels, IDs = self._get_train_samples(idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, labels, IDs\n else:\n return map(np.asarray, (images, labels, IDs))\n\n elif split == 'val':\n assert(self.mode in ['val', 'val_notrain', 'train_with_val'])\n if idx is None:\n if deterministic:\n idx = self._val_idx[0:num_samples]\n else:\n idx = np.random.choice(self._val_idx, size=num_samples, replace=True)\n\n images, gt_labels, IDs = self._get_val_samples(idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, gt_labels, IDs\n else:\n return map(np.asarray, (images, gt_labels, IDs))\n\n elif split == 'val_with_preds':\n assert(self.mode in ['val', 'val_notrain', 'train_with_val'])\n if idx is None:\n if deterministic:\n idx = self._val_idx[0:num_samples]\n else:\n idx = np.random.choice(self._val_idx, size=num_samples, replace=True)\n\n images, gt_labels, pred_labels, IDs = self._get_val_samples_with_preds(\n idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, gt_labels, pred_labels, IDs\n else:\n return map(np.asarray, (images, gt_labels, pred_labels, IDs))\n\n elif split == 'val_with_pred_paths':\n assert(self.mode in ['val', 'val_notrain', 'train_with_val'])\n if idx is None:\n if deterministic:\n idx = self._val_idx[0:num_samples]\n else:\n idx = np.random.choice(self._val_idx, size=num_samples, replace=True)\n\n images, gt_labels, pred_label_paths, IDs = self._get_val_samples_with_pred_paths(\n idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, gt_labels, pred_label_paths, IDs\n else:\n return map(np.asarray, (images, gt_labels, pred_label_paths, IDs))\n\n elif split == 'test':\n if idx is None:\n if deterministic:\n idx = self._tst_idx[0:num_samples]\n else:\n idx = np.random.choice(self._tst_idx, size=num_samples, replace=False)\n\n images, IDs = [], []\n for l in idx:\n if self.opts['in_memory']:\n image = self._images_test[l]\n else:\n image = self._load_sample(self._img_tst_path[l], preprocess=False, as_tuple=as_tuple)\n images.append(image)\n if simple_IDs is True:\n IDs.append(self._tst_IDs_simpl[l])\n else:\n IDs.append(self._tst_IDs[l])\n\n if as_list:\n return images, IDs\n else:\n return map(np.asarray, (images, IDs))\n\n elif split == 'test_with_preds':\n if idx is None:\n if deterministic:\n idx = self._tst_idx[0:num_samples]\n else:\n idx = np.random.choice(self._tst_idx, size=num_samples, replace=False)\n\n images, pred_labels, IDs = self._get_test_samples_with_preds(idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, pred_labels, IDs\n else:\n return map(np.asarray, (images, pred_labels, IDs))\n\n elif split == 'test_with_pred_paths':\n if idx is None:\n if deterministic:\n idx = self._tst_idx[0:num_samples]\n else:\n idx = np.random.choice(self._tst_idx, size=num_samples, replace=False)\n\n images, pred_label_paths, IDs = self._get_test_samples_with_pred_paths(\n idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, pred_label_paths, IDs\n else:\n return map(np.asarray, (images, pred_label_paths, IDs))\n\n else:\n return None, None", "def _compute_best_value(self):\n reduced_cs = []\n concerned_vars = set()\n\n for c in self.utilities:\n asgt = filter_assignment_dict(self._neighbors_values, c.dimensions)\n reduced_cs.append(c.slice(asgt))\n concerned_vars.update(c.dimensions)\n var_val, rel_val = find_arg_optimal(\n self.variable,\n lambda x: functools.reduce(operator.add, [f(x) for f in reduced_cs]),\n self._mode,\n )\n # Add the cost for each variable value if any\n for var in concerned_vars:\n if var.name == self.name:\n rel_val += var.cost_for_val(self.current_value)\n else:\n rel_val += var.cost_for_val(self._neighbors_values[var.name])\n\n return var_val, rel_val", "def _compute_best_value(self):\n asgt = self._neighbors_values.copy()\n best_cost, best_val = None, []\n\n for v in self._variable.domain:\n asgt[self.variable.name] = v\n c = self._compute_cost(**asgt)\n if (\n best_cost is None\n or (best_cost > c and self._mode == \"min\")\n or (best_cost < c and self._mode == \"max\")\n ):\n best_cost = c\n best_val = [v]\n elif best_cost == c:\n best_val.append(v)\n\n return best_val, best_cost", "def calculate_data_split(self, n_train=2, n_vali=0, n_test=1, n_splits=1,\n num_percentiles=4, random_state=87, verbosity=1,\n manual_split=False,train_indices=None, test_indices=None,\n train_redshift_indices=[0], test_redshift_indices=[0],\n interpolate_over_redshift_only=False, **kwargs):\n \n n_extrema=kwargs.get('n_extrema', 2)\n ind_extrema=kwargs.get('ind_extrema', [0,-1])\n self.data_separation(n_extrema=n_extrema, ind_extrema=ind_extrema)\n\n too.condprint(\"number of wanted training vectors\", n_train, level=2, verbosity=verbosity)\n too.condprint(\"number of wanted test vectors\", n_test, level=1, verbosity=verbosity)\n if n_train+n_test > (self.size_fullspace):\n print(\"Warning n_train is larger than total full sample space\")\n\n self.random_state = random_state\n self.num_percentiles = num_percentiles\n self.n_splits = n_splits\n\n stratif_labels = self.stratify_array(self.midspace, num_percentiles=self.num_percentiles)\n self.test_splitdict = dict()\n self.train_splitdict = dict()\n self.vali_splitdict = dict()\n\n if manual_split == False:\n n_vali = self.size_midspace-n_test-n_train\n if n_vali !=0 and len(self.ind_midspace)> 1:\n kf = StratifiedShuffleSplit(n_splits=self.n_splits, test_size=n_test, random_state=self.random_state)\n for ii, (trainvali, test) in enumerate(kf.split(self.midspace,stratif_labels)):\n #test = test[np.in1d(test, extspace_ind, invert=True)]\n\n test = self.ind_midspace[test]\n if n_train > 0:\n train, valitest = train_test_split(trainvali, test_size=n_vali, shuffle=True, random_state=self.random_state)\n train = self.ind_midspace[train]\n train = np.unique(np.concatenate([train,self.ind_extremaspace]))\n train = self.ind_fullspace[train]\n else:\n train = self.ind_extremaspace\n train = self.ind_fullspace[train]\n valitest=trainvali\n\n #valitest = valitest[np.in1d(valitest, extspace_ind, invert=True)]\n valitest = self.ind_midspace[valitest]\n #print(test, trr, \" s tr\", len(train)-2, \" tr: \", train, \" va: \", valitest)\n self.test_splitdict[ii] = test\n self.vali_splitdict[ii]= valitest\n self.train_splitdict[ii] = train\n elif len(self.ind_midspace)> 1 and n_vali == 0:\n kf = StratifiedShuffleSplit(n_splits=self.n_splits, test_size=n_test, random_state=self.random_state)\n for ii, (train, test) in enumerate(kf.split(self.midspace,stratif_labels)):\n test = self.ind_midspace[test]\n train = self.ind_midspace[train]\n train = np.unique(np.concatenate([train,self.ind_extremaspace]))\n train = self.ind_fullspace[train]\n self.test_splitdict[ii] = test\n self.train_splitdict[ii] = train\n\n else:\n test = self.ind_midspace\n train = self.ind_extremaspace\n self.test_splitdict[0] = test\n self.train_splitdict[0] = train\n \n ###/!\\ Warning /!\\ For now we always use manual split (which not really manual now...)\n elif manual_split == True:\n ### Determine the number of samples avaible with different values of parameters. e.g nb_param = 101 for MassiveNus\n nb_param = int(len(self.fullspace)/len(self.z_requested)) \n if len(self.z_requested)==1:\n nb_param = int(len(self.fullspace))\n \n\n for ii in range (n_splits):\n ###Here the user has chosen to provide the test indices\n if test_indices is not None:\n test_indices = np.atleast_2d(test_indices)\n test = test_indices[ii]\n ###We make sure that the indice lies into a correct space. e.g if we have nb_param = 101, and a indices i = 103 it will become i =2\n test_origin = [tt%nb_param for tt in test]\n \n ###Do we want to construct a interpolation only over the redshift ? /!\\ Warning /!\\ this is case is not really used....\n if interpolate_over_redshift_only == False and train_indices is None:\n train_origin = [ii for ii in range(1,nb_param-1) if ii not in test_origin ]\n\n elif interpolate_over_redshift_only == False and train_indices is not None:\n train_origin = [tt%nb_param for tt in train ]\n else :\n train_origin = test_origin\n ###Here the user has chosen not to provide the test indices\n ## so we first randomly generate them\n else:\n if train_indices is None:\n test_origin = [ii for ii in range(1,nb_param-1)]\n test_origin = shuffle(test_origin)[:n_test]\n if interpolate_over_redshift_only == False:\n train_origin = [ii for ii in range(1,nb_param-1) if ii not in test_origin ]\n else:\n train_origin = test_origin\n ###The user has specified train indices so must be sure that train and test do not overlap !\n else:\n train_indices = np.atleast_2d(train_indices)\n train = train_indices[ii]\n train_origin = [tt%nb_param for tt in train ]\n test_origin = [ii for ii in range(1,nb_param-1) if ii not in train_origin ] ####!!!\n \n train_origin = shuffle(train_origin)\n \n train_origin = train_origin[:n_train]\n test_origin = shuffle(test_origin)[:n_test]\n if train_indices is None:\n if [0] not in test_origin:\n train_origin +=[0]\n if [nb_param-1]not in test_origin:\n \n train_origin += [nb_param-1]\n if [0] in test_origin or [nb_param-1] in test_origin :\n print(\"Warning : trying to interpolate a extramal value\")\n \n\n train_redshift = self.z_requested[train_redshift_indices]\n test_redshift = self.z_requested[test_redshift_indices]\n self.train_redshift = train_redshift \n self.test_redshift = test_redshift\n too.condprint(\"redshift used for training\", train_redshift,level=1,verbosity=verbosity)\n too.condprint(\"redshfit used for testing\", test_redshift,level=1,verbosity=verbosity)\n train = []\n test = []\n ### looping over the redshift \n for zz in train_redshift_indices:\n train+= [ii + zz*nb_param for ii in train_origin ]\n\n for zz in test_redshift_indices: \n test += [ii + zz*nb_param for ii in test_origin ]\n \n\n self.train_splitdict[ii] = train\n self.test_splitdict[ii] = test\n shuffled = shuffle(train)\n self.train_splitdict[ii] = shuffled\n self.vali_splitdict[ii] = shuffled\n\n return None", "def _train_val_split(self, df, val_split):\n # Compute the number of validation examples\n val_size = round(df.shape[0] * val_split)\n\n # Compute validation examples by keeping all questions related\n # to the same context within the same split\n val_actual_size = 0\n val_keys = []\n for t, n in df[\"title\"].value_counts().to_dict().items():\n if val_actual_size + n > val_size:\n break\n val_keys.append(t)\n val_actual_size += n\n\n # Build the train and validation DataFrames\n train_df = df[~df[\"title\"].isin(val_keys)].reset_index(drop=True)\n val_df = df[df[\"title\"].isin(val_keys)].reset_index(drop=True)\n return train_df, val_df", "def main():\n \n # The following 5 command lines can be outcommented if the features are already created.\n # There is no need to process the data every single time.\n # Fine tuning the learning algorythm is much faster without that extra step.\n \n # by reading the train dataset the feature index is created.\n # First calling of the processdata function\n # Data limited to 300000\n featureIndexes = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000)\n print \"featureIndex generated!\"\n print len(featureIndexes)\n\n # Trainfeature is created using the indexfeatures...\n # Second calling of the processdata function\n trainFeatures, trainTargets, trainItemIds, trainPrices, trainUrls, trainPhones, trainEmails, trainLength = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000) # Original itemsLimit=300000\n\n # Building the test dataset... just like the training...\n testFeatures, testItemIds, testPrices, testUrls, testPhones, testEmails, testLength = processData(os.path.join(dataFolder,\"avito_test.tsv\"), featureIndexes)\n\n # Dumping data into file...\n # joblib.dump((trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds), os.path.join(dataFolder,\"train_data.pkl\"))\n joblib.dump((trainFeatures,trainTargets,trainItemIds,trainPrices,trainUrls,trainPhones,trainEmails,trainLength,\n testFeatures, testItemIds,testPrices,testUrls,testPhones,testEmails,testLength), os.path.join(dataFolder,\"SeparatedByCategory.pkl\"))\n\n\n # loading data pack...\n # trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds = joblib.load(os.path.join(dataFolder,\"train_data.pkl\"))\n\n #logging.info(\"Feature preparation done, fitting model...\")\n\n # Stochastic gradient model", "def prepareSplitClassifier(df, models, choice):\n\n\n def classificationOutput(clf, X, Y):\n \"\"\"\n Fit the model and print the classification results\n - confusion_matrix\n - avg scores etc\n \"\"\"\n n_samples = 36\n\n print \"\\n\\nClassifier: \\n %s\" % (clf)\n print \"#\" * 79\n # classifier_gnb = naive_bayes.GaussianNB() # initiating the classifier\n\n clf.fit(X[:n_samples], Y[:n_samples]) # train on first n_samples and test on last 10\n\n expected = Y[n_samples:]\n predicted = clf.predict(X[n_samples:])\n print(\"Classification report:\\n%s\\n\" % (metrics.classification_report(expected, predicted)))\n print(\"\\nConfusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\n\n\n\n def splitclassify(cDf):\n \"\"\"\n Given the dataframe combined with equal fair and unfair apps,\n classify them\n \"\"\"\n cDf = cDf.reindex(np.random.permutation(cDf.index)) # shuffle the dataframe\n featCols = set(cDf.columns)\n featCols.remove('appLabel')\n\n features = cDf[list(featCols)].astype('float')\n\n ## Scale the features to a common range\n min_max_scaler = preprocessing.MinMaxScaler()\n X = min_max_scaler.fit_transform(features.values)\n\n Y = cDf['appLabel'].values\n\n\n if choice == 'all':\n for key in models:\n classifier = models[key]\n classificationOutput(classifier, X, Y)\n else:\n if choice in models:\n classifier = models[choice]\n classificationOutput(classifier, X, Y)\n else:\n print \"Incorrect Choice\"\n\n\n\n fairDf = df[df['appLabel'] == False]\n unfairDf = df[df['appLabel'] == True]\n\n\n # calculate total possible splits of fair data frame relatie to\n # size of unfair dataframe\n splits = len(fairDf) // len(unfairDf)\n\n for i in range(splits):\n clDf = fairDf[i : i+len(unfairDf)].append(unfairDf)\n\n # print fairDf.values, unfairDf.values\n print \"Classifying %d th split of fair apps with unfair app\" % (i)\n print \"-\" * 79\n splitclassify(clDf)\n print \"\\n\\n\"", "def _score_fn(context_features, group_features, mode, unused_params,\n\t\t\t\t\t\t\t\tunused_config):\n\t\twith tf.name_scope(\"input_layer\"):\n\t\t\tgroup_input = [\n\t\t\t\t\ttf.layers.flatten(group_features[name])\n\t\t\t\t\tfor name in sorted(example_feature_columns)\n\t\t\t]\n\t\t\tprint(group_input[0].shape)\n\t\t\tprint(group_input[0].dtype)\n\t\t\tcontext_input = [\n\t\t\t\t\ttf.layers.flatten(context_features[name])\n\t\t\t\t\tfor name in sorted(context_feature_columns)\n\t\t\t]\n\t\t\tprint(context_input[0].shape)\n\t\t\tprint(context_input[0].dtype)\n\t\t\tfinal_input = context_input + group_input\n\t\t\tinput_layer = tf.concat(final_input, 1)\n\t\t\ttf.summary.scalar(\"input_sparsity\", tf.nn.zero_fraction(input_layer))\n\t\t\ttf.summary.scalar(\"input_max\", tf.reduce_max(input_layer))\n\t\t\ttf.summary.scalar(\"input_min\", tf.reduce_min(input_layer))\n\n\t\tis_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\t\tcur_layer = tf.layers.batch_normalization(input_layer, training=is_training)\n\t\tfor i, layer_width in enumerate(int(d) for d in FLAGS.hidden_layer_dims):\n\t\t\tcur_layer = tf.layers.dense(cur_layer, units=layer_width)\n\t\t\tcur_layer = tf.layers.batch_normalization(cur_layer, training=is_training)\n\t\t\tcur_layer = tf.nn.relu(cur_layer)\n\t\t\ttf.summary.scalar(\"fully_connected_{}_sparsity\".format(i),\n\t\t\t\t\t\t\t\t\t\t\t\ttf.nn.zero_fraction(cur_layer))\n\t\tcur_layer = tf.layers.dropout(\n\t\t\t\tcur_layer, rate=FLAGS.dropout_rate, training=is_training)\n\t\tlogits = tf.layers.dense(cur_layer, units=FLAGS.group_size)\n\t\treturn logits", "def evaluate_split( df, attribute, split ):\n mask = df[attribute] <= split\n \n # split the dataset on the split attribute\n dfl = df[mask]\n dfr = df[~mask]\n \n \n # calculate weighting factors for child\n weighting_factor_left = float(dfl.shape[0])/df.shape[0]\n weighting_factor_right = float(dfr.shape[0])/df.shape[0]\n\n # calculate gini for left and right\n gini_parent = gini_impurity(df)\n gini_left = gini_impurity(dfl)\n gini_right = gini_impurity(dfr)\n \n # calculate weighted gini for this split \n weighted_gini = gini_parent - (weighting_factor_left*gini_left + weighting_factor_right*gini_right)\n return weighted_gini", "def kb_train_test_split(test_size, random_state):\n\n cnxn_path = \"/polyaxon-data/goldenretriever/db_cnxn_str.txt\"\n conn = pyodbc.connect(open(cnxn_path, 'r').read())\n\n SQL_Query = pd.read_sql_query('''SELECT dbo.query_labels.id, dbo.query_db.query_string, \\\n dbo.kb_clauses.processed_string, dbo.kb_raw.kb_name, dbo.kb_raw.type FROM dbo.query_labels \\\n JOIN dbo.query_db ON dbo.query_labels.query_id = dbo.query_db.id \\\n JOIN dbo.kb_clauses ON dbo.query_labels.clause_id = dbo.kb_clauses.id \\\n JOIN dbo.kb_raw ON dbo.kb_clauses.raw_id = dbo.kb_raw.id''', conn)\n\n df = pd.DataFrame(SQL_Query).set_index('id')\n kb_names = df['kb_name'].unique()\n\n train_dict = dict()\n test_dict = dict()\n\n train_idx_all = []\n test_idx_all = []\n\n for kb_name in kb_names:\n kb_id = df[df['kb_name'] == kb_name].index.values\n train_idx, test_idx = train_test_split(kb_id, test_size=test_size,\n random_state=random_state)\n \n train_dict[kb_name] = train_idx\n test_dict[kb_name] = test_idx\n \n for k,v in train_dict.items():\n for idx in v:\n train_idx_all.append(idx)\n \n for k,v in test_dict.items():\n for idx in v:\n test_idx_all.append(idx)\n \n return df, train_dict, test_dict, train_idx_all, test_idx_all", "def split(self, elIndicesDict, maxSubTreeSize=None, numSubTrees=None, verbosity=0):\n #dbList = self.generate_circuit_list()\n tm = _time.time()\n printer = _VerbosityPrinter.build_printer(verbosity)\n\n if (maxSubTreeSize is None and numSubTrees is None) or \\\n (maxSubTreeSize is not None and numSubTrees is not None):\n raise ValueError(\"Specify *either* maxSubTreeSize or numSubTrees\")\n if numSubTrees is not None and numSubTrees <= 0:\n raise ValueError(\"EvalTree split() error: numSubTrees must be > 0!\")\n\n #Don't split at all if it's unnecessary\n if maxSubTreeSize is None or len(self) < maxSubTreeSize:\n if numSubTrees is None or numSubTrees == 1: return elIndicesDict\n\n self.subTrees = []\n evalOrder = self.get_evaluation_order()\n printer.log(\"EvalTree.split done initial prep in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n\n def nocache_create_equal_size_subtrees():\n \"\"\" A shortcut for special case when there is no cache so each\n circuit can be evaluated independently \"\"\"\n N = len(self)\n subTrees = [set(range(i, N, numSubTrees)) for i in range(numSubTrees)]\n totalCost = N\n return subTrees, totalCost\n\n def create_subtrees(maxCost, maxCostRate=0, costMetric=\"size\"):\n \"\"\"\n Find a set of subtrees by iterating through the tree\n and placing \"break\" points when the cost of evaluating the\n subtree exceeds some 'maxCost'. This ensure ~ equal cost\n trees, but doesn't ensure any particular number of them.\n\n maxCostRate can be set to implement a varying maxCost\n over the course of the iteration.\n \"\"\"\n\n if costMetric == \"applys\":\n def cost_fn(rem): return len(rem) # length of remainder = #-apply ops needed\n elif costMetric == \"size\":\n def cost_fn(rem): return 1 # everything costs 1 in size of tree\n else: raise ValueError(\"Uknown cost metric: %s\" % costMetric)\n\n subTrees = []\n curSubTree = set([evalOrder[0]])\n curTreeCost = cost_fn(self[evalOrder[0]][1]) # remainder length of 0th evaluant\n totalCost = 0\n cacheIndices = [None] * self.cache_size()\n\n for k in evalOrder:\n iStart, remainder, iCache = self[k]\n\n if iCache is not None:\n cacheIndices[iCache] = k\n\n #compute the cost (additional #applies) which results from\n # adding this element to the current tree.\n cost = cost_fn(remainder)\n inds = set([k])\n\n if iStart is not None and cacheIndices[iStart] not in curSubTree:\n #we need to add the tree elements traversed by\n #following iStart\n j = iStart # index into cache\n while j is not None:\n iStr = cacheIndices[j] # cacheIndices[ iStart ]\n inds.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n\n if curTreeCost + cost < maxCost:\n #Just add current string to current tree\n curTreeCost += cost\n curSubTree.update(inds)\n else:\n #End the current tree and begin a new one\n #print(\"cost %d+%d exceeds %d\" % (curTreeCost,cost,maxCost))\n subTrees.append(curSubTree)\n curSubTree = set([k])\n\n cost = cost_fn(remainder); j = iStart\n while j is not None: # always traverse back iStart\n iStr = cacheIndices[j]\n curSubTree.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n totalCost += curTreeCost\n curTreeCost = cost\n #print(\"Added new tree w/initial cost %d\" % (cost))\n\n maxCost += maxCostRate\n\n subTrees.append(curSubTree)\n totalCost += curTreeCost\n return subTrees, totalCost\n\n ##################################################################\n # Part I: find a list of where the current tree should be broken #\n ##################################################################\n\n if numSubTrees is not None and self.cache_size() == 0:\n #print(\"Split: EQUAL SUBTREES!\") #REMOVE\n subTreeSetList, totalCost = nocache_create_equal_size_subtrees()\n #printer.log(\"EvalTree.split PT1 %.1fs\" %\n # (_time.time()-tm)); tm = _time.time() #REMOVE\n\n elif numSubTrees is not None:\n\n #OLD METHOD: optimize max-cost to get the right number of trees\n # (but this can yield trees with unequal lengths or cache sizes,\n # which is what we're often after for memory reasons)\n costMet = \"size\" # cost metric\n if costMet == \"applies\":\n maxCost = self.get_num_applies() / numSubTrees\n else: maxCost = len(self) / numSubTrees\n maxCostLowerBound, maxCostUpperBound = maxCost, None\n maxCostRate, rateLowerBound, rateUpperBound = 0, -1.0, +1.0\n #OLD (& incorrect) vals were 0, -1.0/len(self), +1.0/len(self),\n # though current -1,1 vals are probably overly conservative...\n resultingSubtrees = numSubTrees + 1 # just to prime the loop\n iteration = 0\n\n #Iterate until the desired number of subtrees have been found.\n while resultingSubtrees != numSubTrees:\n subTreeSetList, totalCost = create_subtrees(maxCost, maxCostRate, costMet)\n resultingSubtrees = len(subTreeSetList)\n #print(\"DEBUG: resulting numTrees = %d (cost %g) w/maxCost = %g [%s,%s] & rate = %g [%g,%g]\" % \\\n # (resultingSubtrees, totalCost, maxCost, str(maxCostLowerBound), str(maxCostUpperBound),\n # maxCostRate, rateLowerBound, rateUpperBound))\n\n #DEBUG\n #totalSet = set()\n #for s in subTreeSetList:\n # totalSet.update(s)\n #print(\"DB: total set length = \",len(totalSet))\n #assert(len(totalSet) == len(self))\n\n #Perform binary search in maxCost then maxCostRate to find\n # desired final subtree count.\n if maxCostUpperBound is None or abs(maxCostLowerBound - maxCostUpperBound) > 1.0:\n # coarse adjust => vary maxCost\n last_maxCost = maxCost\n if resultingSubtrees <= numSubTrees: # too few trees: reduce maxCost\n maxCost = (maxCost + maxCostLowerBound) / 2.0\n maxCostUpperBound = last_maxCost\n else: # too many trees: raise maxCost\n if maxCostUpperBound is None:\n maxCost = totalCost # / numSubTrees\n else:\n maxCost = (maxCost + maxCostUpperBound) / 2.0\n maxCostLowerBound = last_maxCost\n else:\n # fine adjust => vary maxCostRate\n last_maxRate = maxCostRate\n if resultingSubtrees <= numSubTrees: # too few trees reduce maxCostRate\n maxCostRate = (maxCostRate + rateLowerBound) / 2.0\n rateUpperBound = last_maxRate\n else: # too many trees: increase maxCostRate\n maxCostRate = (maxCostRate + rateUpperBound) / 2.0\n rateLowerBound = last_maxRate\n\n iteration += 1\n assert(iteration < 100), \"Unsuccessful splitting for 100 iterations!\"\n\n else: # maxSubTreeSize is not None\n subTreeSetList, totalCost = create_subtrees(\n maxSubTreeSize, maxCostRate=0, costMetric=\"size\")\n\n ##########################################################\n # Part II: create subtrees from index sets\n ##########################################################\n # (common logic provided by base class up to providing a few helper fns)\n\n def permute_parent_element(perm, el):\n \"\"\"Applies a permutation to an element of the tree \"\"\"\n # perm[oldIndex] = newIndex\n #return (perm[el[0]] if (el[0] is not None) else None, el[1], el[2])\n return (el[0], el[1], el[2]) # no need to permute the cache element ([0])\n\n def create_subtree(parentIndices, numFinal, fullEvalOrder, sliceIntoParentsFinalArray, parentTree):\n \"\"\"\n Creates a subtree given requisite information:\n\n Parameters\n ----------\n parentIndices : list\n The ordered list of (parent-tree) indices to be included in\n the created subtree.\n\n numFinal : int\n The number of \"final\" elements, i.e. those that are used to\n construct the final array of results and not just an intermediate.\n The first numFinal elemements of parentIndices are \"final\", and\n 'sliceIntoParentsFinalArray' tells you which final indices of\n the parent they map to.\n\n fullEvalOrder : list\n A list of the integers between 0 and len(parentIndices)-1 which\n gives the evaluation order of the subtree *including* evaluation\n of any initial elements.\n\n sliceIntoParentsFinalArray : slice\n Described above - map between to-be-created subtree's final\n elements and parent-tree indices.\n\n parentTree : EvalTree\n The parent tree itself.\n \"\"\"\n #t0 = _time.time() #REMOVE\n subTree = MapEvalTree()\n subTree.myFinalToParentFinalMap = sliceIntoParentsFinalArray\n subTree.num_final_strs = numFinal\n subTree[:] = [None] * len(parentIndices)\n\n curCacheSize = 0\n subTreeCacheIndices = {}\n\n for ik in fullEvalOrder: # includes any initial indices\n k = parentIndices[ik] # original tree index\n\n oStart, remainder, oCache = self[k] # original tree data\n\n if oCache is not None: # this element was in parent's cache,\n subTreeCacheIndices[oCache] = curCacheSize # maps parent's cache indices to subtree's\n iCache = curCacheSize\n curCacheSize += 1\n else:\n iCache = None\n\n iStart = None if (oStart is None) else \\\n subTreeCacheIndices[oStart]\n subTree.eval_order.append(ik)\n\n assert(subTree[ik] is None)\n subTree[ik] = (iStart, remainder, iCache)\n\n #t1 = _time.time() #REMOVE\n subTree.cachesize = curCacheSize\n subTree.parentIndexMap = parentIndices # parent index of each subtree index\n subTree.simplified_circuit_spamTuples = [self.simplified_circuit_spamTuples[k]\n for k in _slct.indices(subTree.myFinalToParentFinalMap)]\n #subTree._compute_finalStringToEls() #depends on simplified_circuit_spamTuples\n\n #t2 = _time.time() #REMOVE\n final_el_startstops = []; i = 0\n for spamTuples in parentTree.simplified_circuit_spamTuples:\n final_el_startstops.append((i, i + len(spamTuples)))\n i += len(spamTuples)\n #t3 = _time.time() #REMOVE\n if len(_slct.indices(subTree.myFinalToParentFinalMap)) > 0:\n subTree.myFinalElsToParentFinalElsMap = _np.concatenate(\n [_np.arange(*final_el_startstops[k])\n for k in _slct.indices(subTree.myFinalToParentFinalMap)])\n #Note: myFinalToParentFinalMap maps only between *final* elements\n # (which are what is held in simplified_circuit_spamTuples)\n else: # no final elements (a \"dummy\" tree, useful just to keep extra procs busy)\n subTree.myFinalElsToParentFinalElsMap = _np.arange(0, 0) # empty array\n\n #t4 = _time.time() #REMOVE\n subTree.num_final_els = sum([len(v) for v in subTree.simplified_circuit_spamTuples])\n #t5 = _time.time() #REMOVE\n subTree.recompute_spamtuple_indices(bLocal=False)\n #t6 = _time.time() #REMOVE\n\n subTree.trim_nonfinal_els()\n #t7 = _time.time() #REMOVE\n subTree.opLabels = self._get_opLabels(subTree.generate_circuit_list(permute=False))\n #t8 = _time.time() #REMOVE\n # print(\"DB: create_subtree timing: \"\n # \"t1=%.3fs, t2=%.3fs, t3=%.3fs, t4=%.3fs, t5=%.3fs, t6=%.3fs, t7=%.3fs, t8=%.3fs\"\n # % (t1-t0,t2-t1,t3-t2,t4-t3,t5-t4,t6-t5,t7-t6,t8-t7))\n\n return subTree\n\n #printer.log(\"EvalTree.split PT2 %.1fs\" %\n # (_time.time()-tm)); tm = _time.time() #REMOVE\n\n updated_elIndices = self._finish_split(elIndicesDict, subTreeSetList,\n permute_parent_element, create_subtree,\n all_final=bool(self.cache_size() == 0))\n #printer.log(\"EvalTree.split PT3 %.1fs\" %\n # (_time.time()-tm)); tm = _time.time() #REMOVE\n\n printer.log(\"EvalTree.split done second pass in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n return updated_elIndices", "def collect_best_features(self):\n bincsp = self.binary_csp # just to make code shorter\n n_folds = len(self.binary_csp.folds)\n n_class_pairs = len(self.binary_csp.class_pairs)\n result_shape = (n_folds, n_class_pairs)\n self.train_feature = np.empty(result_shape, dtype=object)\n self.train_feature_full_fold = np.empty(result_shape, dtype=object)\n self.test_feature = np.empty(result_shape, dtype=object)\n self.test_feature_full_fold = np.empty(result_shape, dtype=object)\n self.selected_filters_per_filterband = np.empty(result_shape, dtype=object)\n for fold_i in range(n_folds):\n for class_pair_i in range(n_class_pairs):\n bin_csp_train_features = deepcopy(bincsp.train_feature[\n self.selected_filter_inds, fold_i, class_pair_i])\n bin_csp_train_features_full_fold = deepcopy(\n bincsp.train_feature_full_fold[\n self.selected_filter_inds,\n fold_i, class_pair_i])\n bin_csp_test_features = deepcopy(bincsp.test_feature[\n self.selected_filter_inds, fold_i, class_pair_i])\n bin_csp_test_features_full_fold = deepcopy(\n bincsp.test_feature_full_fold[\n self.selected_filter_inds,fold_i, class_pair_i])\n selected_filters_per_filt = self.select_best_filters_best_filterbands(\n bin_csp_train_features, max_features=self.n_features,\n forward_steps=self.forward_steps, \n backward_steps=self.backward_steps,\n stop_when_no_improvement=self.stop_when_no_improvement)\n self.train_feature[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_train_features, selected_filters_per_filt)\n self.train_feature_full_fold[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_train_features_full_fold, selected_filters_per_filt)\n \n self.test_feature[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_test_features, selected_filters_per_filt)\n self.test_feature_full_fold[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_test_features_full_fold, selected_filters_per_filt)\n \n self.selected_filters_per_filterband[fold_i, class_pair_i] = \\\n selected_filters_per_filt", "def scoring_sampling_half(INPUT_PATH=r'./result/data_split_half_reliability/split_data_features_and_trait_scores',\n method='predefined_parameters'):\n parameters = None\n if method == 'itself':\n calculate_scoring_parameters(INPUT_PATH)\n parameters = read(os.path.join(INPUT_PATH, 'scoring_parameters.csv'))\n\n if method == 'predefined_parameters':\n parameters = read(r'./model/trait/auxiliary_data/scoring_parameters.csv')\n\n for root, dirs, files in os.walk(INPUT_PATH):\n if files:\n features_df_train = read(os.path.join(root, root.split('\\\\')[-1] + '_train_features_group.csv'))\n score_train = logistic_trait_scores(features_df_train, parameters)\n write(os.path.join(root, root.split('\\\\')[-1] + '_train_item_and_trait_scores.csv'), score_train)\n\n features_df_test = read(os.path.join(root, root.split('\\\\')[-1] + '_test_features_group.csv'))\n score_test = logistic_trait_scores(features_df_test, parameters)\n write(os.path.join(root, root.split('\\\\')[-1] + '_test_item_and_trait_scores.csv'), score_test)", "def compute_splits(feature_df, target_col, max_num_splits):\n tree_estimator = DecisionTreeClassifier(max_leaf_nodes=max_num_splits+1,\n class_weight='balanced',\n random_state=1407)\n\n tree_estimator.fit(feature_df, target_col)\n thresholds = tree_estimator.tree_.threshold[tree_estimator.tree_.children_left != _tree.TREE_LEAF]\n return sorted(thresholds)", "def getFittedDBScanModel( features, maxDistance, minNbSamples):\r\n\r\n\tdbscanModel = DBSCAN( eps=maxDistance, min_samples=minNbSamples)\r\n\tdbscanModel.fit( features )\r\n\treturn dbscanModel", "def train_val_split_from_df(path_to_df, text_field, label_field, split_params={}, save_dir=\"./\",\n preprocessing_function=None, additional_fields_and_preps={}, postfix=\"\", verbose=False):\n if path_to_df.endswith(\"parquet\"):\n df = pd.read_parquet(path_to_df)\n else:\n df = pd.read_csv(path_to_df)\n\n print_items = []\n\n if \"seed\" in split_params:\n seed = split_params[\"seed\"]\n else:\n print_items.append(\"no 'seed' parameter specified in split_params, the default is 17\")\n seed = 17\n np.random.seed(seed)\n if \"fraction\" in split_params:\n f = split_params[\"fraction\"]\n assert type(f) == float\n else:\n print_items.append(\"no 'fraction' parameter specified in split_params, the default is 0.1\")\n f = 0.1\n\n for added_field, prep_f in additional_fields_and_preps.items():\n if df[added_field].dtype != \"object\":\n df[added_field] = df[added_field].astype(str)\n if prep_f:\n df[added_field] = df[added_field].map(prep_f)\n df[text_field] = df[text_field] + \" \" + df[added_field]\n\n val_mask = np.random.choice([True, False], size=len(df), p=[f, 1 - f])\n\n return preprocess_and_save(df, val_mask, text_field, label_field, preprocessing_function,\n additional_fields_and_preps, save_dir, postfix, verbose, print_items)", "def find_best_features(year, features, sex, age, heavy):\r\n print 'find_best_features(year=%d,features=%s,sex=%s,age=%s,heavy=%s)' % (year, features, sex,\r\n age, heavy)\r\n X, y, keys = getXy_by_features(year, features, sex, age)\r\n title = 'features=%s,sex=%s,age=%s,year=%d' % (features,sex,age,year) \r\n results, n_samples = select_features.get_most_predictive_feature_set(title, X, y, keys, heavy)\r\n return results, n_samples, keys", "def select_randomly(self, val_split: float, test_split: float) -> {str: int}:\n\n def _select(start, n, label) -> int:\n \"\"\"\n Label all columns in [start, start+n) with label.\n \"\"\"\n n_selected = 0\n for i in range(start, int(start + n)):\n x = self._x_positions[i]\n n_selected += self._cols[x].mark_as(label)\n return n_selected\n\n def _remove_overlaps(start, end) -> int:\n \"\"\"\n Remove unlabelled columns in [start-col_width, end+col_width].\n \"\"\"\n start = self._x_positions[start % self.n_cols]\n end = self._x_positions[int(end) % self.n_cols]\n n_removed = 0\n for x, col in self._cols.items():\n if start - self.col_width <= x <= start or end <= x <= end + self.col_width:\n if col.label is None:\n n_removed += col.mark_as('ignore')\n return n_removed\n\n def _next_unlabelled_col(x):\n \"\"\"\n Return index of first unlabelled column after x.\n \"\"\"\n for i in range(self.n_cols):\n idx = (x + i) % self.n_cols\n x_current = self._x_positions[idx]\n if self._cols[x_current].label is None:\n return idx\n\n # When computing number of columns per split we must take into account\n # that some columns will be removed, i.e. we want to compute the split\n # sizes as fraction of the number of actual selected columns, not of\n # the total number of columns.\n delta_x = self._x_positions[1] - self._x_positions[0]\n n_to_remove_per_split = self.col_width / delta_x\n # * 2 because 2 gaps between 3 splits\n n_to_keep = self.n_cols - n_to_remove_per_split * 2\n n_val = round(n_to_keep * val_split)\n n_test = round(n_to_keep * test_split)\n n_train = n_to_keep - n_val - n_test\n\n n_selected_crops_per_split = dict.fromkeys(['training', 'validation', 'test', 'ignore'], 0)\n\n # Place patches in arbitrary order\n start = 0\n for n, label in random.sample(list(zip([n_train, n_val, n_test], ['training', 'validation', 'test'])), k=3):\n # Mark patch\n n_selected_crops_per_split[label] += _select(start, n, label)\n # Remove columns overlapping this patch\n n_selected_crops_per_split['ignore'] += _remove_overlaps(start, start + n - 1)\n # Next patch starts at next unlabelled column\n start = _next_unlabelled_col(start)\n\n return n_selected_crops_per_split", "def data_split(self, split_index=0, thinning=None, apply_mask=False, mask=[], **kwargs):\n \n self.learn_sets = ['train','vali','test']\n self.ind_train = self.train_splitdict[split_index]\n self.ind_train.sort()\n self.ind_test = self.test_splitdict[split_index]\n self.ind_test.sort()\n if len(self.vali_splitdict) !=0:\n self.learn_sets = ['train','vali','test']\n self.ind_vali = self.vali_splitdict[split_index]\n self.ind_vali.sort()\n self.indices_learn_dict = dict(zip(self.learn_sets, [self.ind_train, self.ind_vali, self.ind_test]))\n else:\n self.learn_sets = ['train','test']\n self.indices_learn_dict = dict(zip(self.learn_sets, [self.ind_train, self.ind_test]))\n\n\n self.train_samples = self.fullspace[self.ind_train]\n self.train_size = len(self.train_samples)\n\n if len(self.vali_splitdict) !=0:\n self.vali_samples = self.fullspace[self.ind_vali]\n self.vali_samples.sort()\n self.vali_size = len(self.vali_samples)\n else:\n self.vali_size = 0\n self.test_samples = self.fullspace[self.ind_test]\n #self.test_samples.sort()\n self.test_size = len(self.test_samples)\n verbosity = kwargs.get('verbosity', 1)\n\n too.condprint(\"number of obtained training vectors\", self.train_size, level=1, verbosity=verbosity)\n too.condprint(\"number of obtained validation vectors\", self.vali_size, level=1, verbosity=verbosity)\n too.condprint(\"number of obtained test vectors\", self.test_size, level=2, verbosity=verbosity)\n\n\n self.matrix_datalearn_dict = dict()\n\n for dli in self.learn_sets:\n matrixdata = np.copy(self.matrix_z)\n self.matrixdata=matrixdata\n\n ## copy of mask to avoid modifying orginal mask after iterations\n if apply_mask==False:\n maskcopy=np.arange(0,len(matrixdata[0])) ##range over all axis length, does not mask anything\n else:\n maskcopy=np.copy(mask)\n \n ## apply thinning (if set to None, there is no thinning)\n self.mask_true=maskcopy[::thinning] \n\n ## apply mask also to feature grid and save as masked_+...\n setattr(self, 'masked_'+self.features_str, self.fgrid[self.mask_true]) \n\n matrixdata = matrixdata[:,self.mask_true] ## apply mask and thinning to feature space (k-grid)\n indices_l = self.indices_learn_dict[dli]\n matrixdata = matrixdata[indices_l,:] ##choose learning set\n self.matrix_datalearn_dict[dli] = matrixdata\n self.matrix_datalearn_dict = objdict(self.matrix_datalearn_dict)\n return self.matrix_datalearn_dict", "def execute_pipeline(self, data_frame, target, primitives_list, problem_type,\n optimize=False, max_evals=10, scoring=None,\n minimize_cost=False, hyperparameters=None):\n all_pipeline_dict = {}\n Folds = {}\n self.scoring = scoring\n self.data_frame = data_frame\n self.problem_type = problem_type\n self.minimize_cost = minimize_cost\n if(not isinstance(target, np.ndarray)):\n target = np.asarray(target)\n self.target = target\n\n list_of_executed_pipelines = []\n for index, primitives in enumerate(primitives_list):\n\n pipleline_order = \"pipeline\" + str(index)\n\n if(optimize):\n self.primitive = primitives\n self.pipeline_dict['primitives'] = primitives\n pipeline = self.create_pipeline(primitives)\n self.optimization(pipeline, max_evals)\n\n else:\n list_of_executed_pipelines.append(\n self.search_all_possible_primitives(primitives, hyperparameters))\n\n for fold in list_of_executed_pipelines[0][0]:\n fold_number = fold[0]\n Folds[str(fold_number)] = {\"predicted\": list_of_executed_pipelines[0][0][0][2],\n \"Actual\": list_of_executed_pipelines[0][0][0][3]}\n\n self.pipeline_dict = {'primitives': primitives,\n 'folds': Folds,\n 'hyperparameter': None}\n\n all_pipeline_dict[pipleline_order] = self.pipeline_dict\n self.pipeline_dict = {}\n return all_pipeline_dict", "def getSplits(df, train_size, val_size, test_size, seed=None):\n size = len(df)\n\n # size is considered a percentage if less than 1:\n train_size = int(train_size * size) if train_size < 1 else train_size\n val_size = int(val_size * size) if val_size < 1 else val_size\n test_size = int(test_size * size) if test_size < 1 else test_size\n\n if not seed is None:\n np.random.seed(seed)\n\n train_val_idx = np.random.choice(\n a=range(size),\n size=train_size + val_size,\n replace=False\n )\n train_idx = train_val_idx[:train_size]\n val_idx = train_val_idx[train_size:]\n\n train = df.iloc[train_idx]\n val = df.iloc[val_idx]\n test = df.drop(train.index).drop(val.index) # test is equal to the leftover\n\n assert len(train) + len(val) + len(test) == len(df)\n\n return train, val, test", "def split(self, elIndicesDict, maxSubTreeSize=None, numSubTrees=None, verbosity=0):\n #dbList = self.generate_circuit_list()\n tm = _time.time()\n printer = _VerbosityPrinter.build_printer(verbosity)\n\n if (maxSubTreeSize is None and numSubTrees is None) or \\\n (maxSubTreeSize is not None and numSubTrees is not None):\n raise ValueError(\"Specify *either* maxSubTreeSize or numSubTrees\")\n if numSubTrees is not None and numSubTrees <= 0:\n raise ValueError(\"EvalTree split() error: numSubTrees must be > 0!\")\n\n #Don't split at all if it's unnecessary\n if maxSubTreeSize is None or len(self) < maxSubTreeSize:\n if numSubTrees is None or numSubTrees == 1: return elIndicesDict\n\n self.subTrees = []\n evalOrder = self.get_evaluation_order()\n printer.log(\"EvalTree.split done initial prep in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n\n def create_subtrees(maxCost, maxCostRate=0, costMetric=\"size\"):\n \"\"\"\n Find a set of subtrees by iterating through the tree\n and placing \"break\" points when the cost of evaluating the\n subtree exceeds some 'maxCost'. This ensure ~ equal cost\n trees, but doesn't ensure any particular number of them.\n\n maxCostRate can be set to implement a varying maxCost\n over the course of the iteration.\n \"\"\"\n\n if costMetric == \"applys\":\n def cost_fn(rem): return len(rem) # length of remainder = #-apply ops needed\n elif costMetric == \"size\":\n def cost_fn(rem): return 1 # everything costs 1 in size of tree\n else: raise ValueError(\"Uknown cost metric: %s\" % costMetric)\n\n subTrees = []\n curSubTree = set([evalOrder[0]])\n curTreeCost = cost_fn(self[evalOrder[0]][1]) # remainder length of 0th evaluant\n totalCost = 0\n cacheIndices = [None] * self.cache_size()\n\n for k in evalOrder:\n iStart, remainder, iCache = self[k]\n\n if iCache is not None:\n cacheIndices[iCache] = k\n\n #compute the cost (additional #applies) which results from\n # adding this element to the current tree.\n cost = cost_fn(remainder)\n inds = set([k])\n\n if iStart is not None and cacheIndices[iStart] not in curSubTree:\n #we need to add the tree elements traversed by\n #following iStart\n j = iStart # index into cache\n while j is not None:\n iStr = cacheIndices[j] # cacheIndices[ iStart ]\n inds.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n\n if curTreeCost + cost < maxCost:\n #Just add current string to current tree\n curTreeCost += cost\n curSubTree.update(inds)\n else:\n #End the current tree and begin a new one\n #print(\"cost %d+%d exceeds %d\" % (curTreeCost,cost,maxCost))\n subTrees.append(curSubTree)\n curSubTree = set([k])\n\n cost = cost_fn(remainder); j = iStart\n while j is not None: # always traverse back iStart\n iStr = cacheIndices[j]\n curSubTree.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n totalCost += curTreeCost\n curTreeCost = cost\n #print(\"Added new tree w/initial cost %d\" % (cost))\n\n maxCost += maxCostRate\n\n subTrees.append(curSubTree)\n totalCost += curTreeCost\n return subTrees, totalCost\n\n ##################################################################\n # Part I: find a list of where the current tree should be broken #\n ##################################################################\n\n subTreeSetList = []\n if numSubTrees is not None:\n\n subTreeSize = len(self) // numSubTrees\n for i in range(numSubTrees):\n end = (i + 1) * subTreeSize if (i < numSubTrees - 1) else len(self)\n subTreeSetList.append(set(range(i * subTreeSize, end)))\n\n else: # maxSubTreeSize is not None\n k = 0\n while k < len(self):\n end = min(k + maxSubTreeSize, len(self))\n subTreeSetList.append(set(range(k, end)))\n k = end\n\n ##########################################################\n # Part II: create subtrees from index sets\n ##########################################################\n # (common logic provided by base class up to providing a few helper fns)\n\n def permute_parent_element(perm, el):\n \"\"\"Applies a permutation to an element of the tree \"\"\"\n # perm[oldIndex] = newIndex\n return el # no need to permute operation sequence\n\n def create_subtree(parentIndices, numFinal, fullEvalOrder, sliceIntoParentsFinalArray, parentTree):\n \"\"\"\n Creates a subtree given requisite information:\n\n Parameters\n ----------\n parentIndices : list\n The ordered list of (parent-tree) indices to be included in\n the created subtree.\n\n numFinal : int\n The number of \"final\" elements, i.e. those that are used to\n construct the final array of results and not just an intermediate.\n The first numFinal elemements of parentIndices are \"final\", and\n 'sliceIntoParentsFinalArray' tells you which final indices of\n the parent they map to.\n\n fullEvalOrder : list\n A list of the integers between 0 and len(parentIndices)-1 which\n gives the evaluation order of the subtree *including* evaluation\n of any initial elements.\n\n sliceIntoParentsFinalArray : slice\n Described above - map between to-be-created subtree's final\n elements and parent-tree indices.\n\n parentTree : EvalTree\n The parent tree itself.\n \"\"\"\n subTree = TermEvalTree()\n subTree.myFinalToParentFinalMap = sliceIntoParentsFinalArray\n subTree.num_final_strs = numFinal\n subTree[:] = [None] * len(parentIndices)\n subTree.p_polys = {}\n subTree.dp_polys = {}\n subTree.hp_polys = {}\n subTree.repcache = {}\n\n for ik in fullEvalOrder: # includes any initial indices\n k = parentIndices[ik] # original tree index\n circuit = self[k] # original tree data\n subTree.eval_order.append(ik)\n assert(subTree[ik] is None)\n subTree[ik] = circuit\n\n subTree.parentIndexMap = parentIndices # parent index of each subtree index\n subTree.simplified_circuit_spamTuples = [self.simplified_circuit_spamTuples[kk]\n for kk in _slct.indices(subTree.myFinalToParentFinalMap)]\n #subTree._compute_finalStringToEls() #depends on simplified_circuit_spamTuples\n\n final_el_startstops = []; i = 0\n for spamTuples in parentTree.simplified_circuit_spamTuples:\n final_el_startstops.append((i, i + len(spamTuples)))\n i += len(spamTuples)\n subTree.myFinalElsToParentFinalElsMap = _np.concatenate(\n [_np.arange(*final_el_startstops[kk])\n for kk in _slct.indices(subTree.myFinalToParentFinalMap)])\n #Note: myFinalToParentFinalMap maps only between *final* elements\n # (which are what is held in simplified_circuit_spamTuples)\n\n subTree.num_final_els = sum([len(v) for v in subTree.simplified_circuit_spamTuples])\n subTree.recompute_spamtuple_indices(bLocal=False)\n\n subTree.opLabels = self._get_opLabels(subTree.generate_circuit_list(permute=False))\n\n return subTree\n\n updated_elIndices = self._finish_split(elIndicesDict, subTreeSetList,\n permute_parent_element, create_subtree)\n printer.log(\"EvalTree.split done second pass in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n return updated_elIndices", "def lookup_SBN_eval(datum, n_words=3, pruning_threshold=150, similarity_threshold=0.3, ref_type=\"journals\"):\n\tlogger.info(\"settings: n_words=%s, pruning_threshold=%s, similarity_threshold=%s\"%(n_words, pruning_threshold, similarity_threshold))\n\ttry:\n\t\tsearch_string, candidates = fetch_candidates(datum[\"title\"],n_words)\n\t\tbid_candidates = [normalize_bid(candidate[\"codiceIdentificativo\"]) \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t for candidate in candidates]\n\t\tprepared_candidates = [prepare_sbn_record(candidate) for candidate in candidates]\n\t\tcleaned_candidates = [cleanup_sbn_record(candidate) for candidate in prepared_candidates]\n\t\tpruned = prune_candidates(datum[\"title\"], cleaned_candidates, pruning_threshold)\n\t\tpruned_bids = [record[\"bid\"] for score,record in pruned]\n\t\tif(ref_type==\"journals\"):\n\t\t\treference = cleanup_reference_journals(datum[\"reference\"])\n\t\telif(ref_type==\"monographs\"):\n\t\t\treference = cleanup_reference_monographs(datum[\"reference\"])\n\t\tcomparison_results = [] \n\t\tfor title_similarity, candidate in pruned:\n\t\t\tsimilarity_score, score_explanation = compare(reference,candidate,title_similarity)\n\t\t\tif(similarity_score >= similarity_threshold):\n\t\t\t\tcomparison_results.append((similarity_score, candidate, score_explanation))\n\t\tcomparison_results = sorted(comparison_results,key=lambda x:x[0],reverse=True)\n\t\tif(len(comparison_results)>0):\n\t\t\tmax_score = max([similarity_score for similarity_score, candidate, score_explanation in comparison_results])\n\t\telse:\n\t\t\tmax_score = np.nan\n\t\tsingle_best = [candidate[\"bid\"]\n\t\t\t\t\t\t\tfor similarity_score, candidate, score_explanation in comparison_results\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif similarity_score == max_score]\n\t\tdatum[\"single_best_bid\"] = single_best\n\t\tdatum[\"bids_gt_threshold\"] = [candidate[\"bid\"]\n\t\t\t\t\t\t\t\t\t\t\tfor similarity_score, candidate, score_explanation in comparison_results]\n\t\tdatum[\"bids_gt_threshold_similarity\"] = \", \".join([\"%s (%s)\"%(candidate[\"bid\"],similarity_score)\n\t\t\t\t\t\t\t\t\t\t\tfor similarity_score, candidate, score_explanation in comparison_results])\n\t\tdatum[\"number_candidates\"] = len(candidates)\n\t\tdatum[\"bid_candidates\"] = bid_candidates\n\t\tdatum[\"bid_in_candidates\"] = len(set(bid_candidates).intersection(datum[\"groundtruth_BID\"]))>0\n\t\tdatum[\"bid_in_pruned_candidates\"] = len(set(pruned_bids).intersection(datum[\"groundtruth_BID\"]))>0\n\t\tdatum[\"search_string\"] = search_string\n\t\tdatum[\"raised_error\"] = False\n\t\tdatum[\"correct_match\"] = len(set(single_best).intersection(datum[\"groundtruth_BID\"]))>0\n\t\tprint(\"[%s] Lookup result: correct = %s\"%(datum[\"mongoid\"],datum[\"correct_match\"]))\n\texcept Exception as e:\n\t\tlogger.error(\"Lookup of reference %s raised the following error: %s\"%(datum[\"mongoid\"],e))\n\t\tdatum[\"raised_error\"] = True\n\treturn datum", "def _get_split_key(keys, num_splits):\n\n # If the number of keys is less than the number of splits, we are limited\n # in the number of splits we can make.\n if not keys or (len(keys) < (num_splits - 1)):\n return keys\n\n # Calculate the number of keys per split. This should be KEYS_PER_SPLIT,\n # but may be less if there are not KEYS_PER_SPLIT * (numSplits - 1) scatter\n # entities.\n #\n # Consider the following dataset, where - represents an entity and\n # * represents an entity that is returned as a scatter entity:\n # ||---*-----*----*-----*-----*------*----*----||\n # If we want 4 splits in this data, the optimal split would look like:\n # ||---*-----*----*-----*-----*------*----*----||\n # | | |\n # The scatter keys in the last region are not useful to us, so we never\n # request them:\n # ||---*-----*----*-----*-----*------*---------||\n # | | |\n # With 6 scatter keys we want to set scatter points at indexes: 1, 3, 5.\n #\n # We keep this as a float so that any \"fractional\" keys per split get\n # distributed throughout the splits and don't make the last split\n # significantly larger than the rest.\n\n num_keys_per_split = max(1.0, float(len(keys)) / (num_splits - 1))\n\n split_keys = []\n\n # Grab the last sample for each split, otherwise the first split will be too\n # small.\n for i in range(1, num_splits):\n split_index = int(round(i * num_keys_per_split) - 1)\n split_keys.append(keys[split_index])\n\n return split_keys", "def project(database, frequent_nodes, minsup, freq_labels, length, H, L, L_hat, n_graphs, n_pos, n_neg, pos_index, class_index, neg_index, graph_id_to_list_id, mapper, labels, model, constraints):\n\t# Declaring globals for recursive pattern mining\n\tglobal __subgraph_count\n\tglobal __positive_index\n\tglobal __n_pos\n\tglobal __n_graphs\n\tglobal __dataset\n\tglobal __pattern_set\n\tglobal __cl_constraints\n\tglobal __ml_constraints\n\tglobal __negative_index\n\tglobal __graph_id_to_list_id\n\tglobal __min_threshold\n\tglobal __min_index\n\n\t__graph_id_to_list_id = graph_id_to_list_id\n\t__ml_constraints = [c for c in constraints[0] if c[0] < n_graphs and c[1] < n_graphs]\n\t__cl_constraints = [c for c in constraints[1] if c[0] < n_graphs and c[1] < n_graphs]\n\t__positive_index = pos_index\n\t__negative_index = neg_index\n\t__n_pos = n_pos\n\t__n_graphs = n_graphs\n\t__H = H\n\t__L = L\n\t__L_hat = L_hat\n\t__dataset = []\n\t__pattern_set = []\n\t__subgraph_count = 0\n\t__min_threshold = sys.maxint\n\t__min_index = 0\n\tdfs_codes = []\n\tprojection_map = {}\n\tfeature_selection_model = None\n\n\tif model == \"top-k\":\n\t\tfeature_selection_model = TopKModel()\n\telif model == \"greedy\":\n\t\tfeature_selection_model = GreedyModel(__n_graphs, __positive_index)\n\telif model == \"gMGFL\":\n\t\tfeature_selection_model = GMGFLModel(__L, __L_hat)\n\telif model == \"gMLC\":\n\t\tfeature_selection_model = GMLCModel(__L, __H)\n\telse:\n\t\tlogging.log(logging.ERROR, \"Model %s not recognized\" %(model))\n\t\texit(0)\n\n\t# TODO: evaluate\n\t\"\"\"\n\tOnly constraints for current binary split\n\tfor con in __ml_constraints:\n\t\tif not labels[con[0]][class_index] == 1 and not labels[con[1]][class_index] == 1:\n\t\t\t__ml_constraints.remove((con[0], con[1]))\n\n\tfor con in __cl_constraints:\n\t\tif not labels[con[0]][class_index] == 1 and not labels[con[1]][class_index] == 1:\n\t\t\t__cl_constraints.remove((con[0], con[1]))\n\t\"\"\"\n\n\t# clean constraints from not applicable ones\n\tfor i, con in enumerate(__ml_constraints):\n\t\tif con[0] >= n_graphs or con[1] >= n_graphs:\n\t\t\t__ml_constraints.remove(con)\n\t\t\tcontinue\n\t\ttry:\n\t\t\tlist_id1 = __graph_id_to_list_id[con[0]]\n\t\t\tlist_id2 = __graph_id_to_list_id[con[1]]\n\t\t\t__ml_constraints[i] = (list_id1, list_id2)\n\t\texcept KeyError:\n\t\t\t__ml_constraints.remove(con)\n\n\tfor i, con in enumerate(__cl_constraints):\n\t\tif con[0] >= n_graphs or con[1] >= n_graphs:\n\t\t\t__cl_constraints.remove(con)\n\t\t\tcontinue\n\t\ttry:\n\t\t\tlist_id1 = __graph_id_to_list_id[con[0]]\n\t\t\tlist_id2 = __graph_id_to_list_id[con[1]]\n\t\t\t__cl_constraints[i] = (list_id1, list_id2)\n\t\texcept KeyError:\n\t\t\t__cl_constraints.remove(con)\n\n\t# TODO: Is this needed?\n\tfor l in frequent_nodes:\n\t\t__subgraph_count += 1\t\t\n\n\tfor g in database:\n\t\tfor n in g.nodes:\n\t\t\tedges = get_forward_init(n, g)\n\t\t\tif len(edges) > 0:\n\t\t\t\t for e in edges:\n\t\t\t\t\tnf = g.nodes[e.fromn]\n\t\t\t\t\tnt = g.nodes[e.to]\n\t\t\t\t\tdfsc = dfs_code(0,1,nf.label,e.label,nt.label)\n\t\t\t\t\tpdfs = pre_dfs(g.id,e,None)\n\t\t\t\t\t# because this is a root --> append the predecesspr dfs code (graph id, edge, None)\n\t\t\t\t\tif dfsc in projection_map:\n\t\t\t\t\t\tprojection_map[dfsc].append(pdfs)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprojection_map[dfsc] = [pdfs,]\n\n\t# Start Subgraph Mining\n\tthreshold = 0 \t# initial threshold for first length 1 subgraph\n\tfor pm in reversed(sorted(projection_map, key=dfs_code_compare)):\t# sorted by highest fromnode label (order is important)\n\t\tif len(projection_map[pm]) < minsup: # number of graphs, this initial pattern occurs (root patterns)\n\t\t\tcontinue\n\t\tdfs_codes.append(dfs_code(0,1,pm[2],pm[3],pm[4]))\t# initial pattern for this projection is always local 0, 1)\n\t\tdfs_codes = mine_subgraph(database, projection_map[pm],\n\t\t\t\t\t\t\tdfs_codes, minsup, length, mapper, feature_selection_model)\n\t\tdfs_codes.pop()\t# dfs_codes is a list of all projections for this initial pattern\n\treturn __dataset, __pattern_set", "def split_data(df, split_method='fo', test_size=.2, random_state=42):\n if split_method == 'fo':\n train_set, test_set = _split_fo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'tfo':\n train_set, test_set = _split_tfo(df, test_size=test_size)\n elif split_method == 'ufo':\n train_set, test_set = _split_ufo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'utfo':\n train_set, test_set = _split_utfo(df, test_size=test_size)\n else:\n raise HuitreError('Invalid data_split value, expect: ufo, utfo')\n train_set = train_set.reset_index(drop=True)\n test_set = test_set.reset_index(drop=True)\n return train_set, test_set", "def main_predefined_split():\n\n average_performance = []\n fold_num = 'predefined'\n output_file_folder = \"output/{}\".format(args.experiment_name)\n output_file_name = \"{}/lnnel_{}.csv\".format(output_file_folder, fold_num)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = output_file_name\n\n if args.use_blink:\n df_train = pd.read_csv(\"./data/lcquad/blink/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/blink/lcquad_test_sorted.csv\")\n else:\n df_train = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_test_sorted.csv\")\n\n # filter out the questions with single positive or many negatives in trianing set\n filtered_question_mentions = []\n for qm in df_train.QuestionMention.unique():\n df_ = df_train[df_train.QuestionMention == qm]\n if df_.Label.sum() == 0:\n filtered_question_mentions.append(qm)\n if df_.Label.sum() == 1 and df_.shape[0] == 1:\n filtered_question_mentions.append(qm)\n # print(df_.Label.values)\n df_train_split_filtered = df_train[~df_train.QuestionMention.isin(filtered_question_mentions)]\n df_train_split_filtered = df_train_split_filtered.sort_values(by=['QuestionMention', 'Label'])\n df_train = df_train_split_filtered\n\n # train\n features_train = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_train.Features.values])\n x_train = torch.from_numpy(features_train).float()\n y_train = torch.from_numpy(df_train.Label.values).float().reshape(-1, 1)\n m_labels_train = df_train.Mention_label.values\n ques_train = df_train.Question.values\n\n # test\n features_test = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_test.Features.values])\n x_test = torch.from_numpy(features_test).float()\n y_test = torch.from_numpy(df_test.Label.values).float().reshape(-1, 1)\n m_labels_test = df_test.Mention_label.values\n ques_test = df_test.Question.values\n\n # train model and evaluate\n model = pick_model(args.model_name, args.alpha)\n model = model.to(device)\n\n # move to gpu\n x_train, y_train = x_train.to(device), y_train.to(device)\n x_test, y_test = x_test.to(device), y_test.to(device)\n\n print(model)\n\n print(\"model: \", args.model_name, args.alpha)\n print(model(x_train, m_labels_train))\n\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n # aggregate the data into train, val, and test\n train_data = (x_train, y_train, m_labels_train, ques_train)\n print(\"train:\", x_train.shape, y_train.shape, m_labels_train.shape, ques_train.shape)\n test_data = (x_test, y_test, m_labels_test, ques_test)\n print(\"test:\", x_test.shape, y_test.shape, m_labels_test.shape, ques_test.shape)\n\n # check class distribution\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n train(model, train_data, test_data, test_data, args.checkpoint_name, args.num_epoch, args.margin,\n args.learning_rate)\n test_pred, best_scores = test(x_test, m_labels_test, ques_test, args.alpha, args.checkpoint_name,\n args.model_name,\n args.output_file_name)\n with open(args.log_file_name, 'a') as f:\n f.write(\n \"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}; lr={}; margin={}\\n\".format(\n args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores[\n 'precision'],\n best_scores[\n 'recall'],\n best_scores['f1'],\n args.learning_rate,\n args.margin))\n print(\"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}\\n\".format(args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores['precision'],\n best_scores['recall'],\n best_scores['f1']))\n average_performance.append([best_scores['precision'], best_scores['recall'], best_scores['f1']])\n\n average_performance = np.array(average_performance)\n print(\"Avg performance is prec - rec - f1: \", average_performance.mean(0))", "def compute_training_features(train_df, df_config, feature_config_list, feature_map, max_horizon):\n pipeline_steps = []\n for feature_config in feature_config_list:\n feature_name, feature_args, featurizer = parse_feature_config(feature_config, feature_map)\n if feature_name in FEATURES_REQUIRE_MAX_HORIZON:\n feature_args[\"max_horizon\"] = max_horizon\n pipeline_steps.append((feature_name, featurizer(df_config=df_config, **feature_args)))\n\n feature_engineering_pipeline = Pipeline(pipeline_steps)\n feature_engineering_pipeline_fitted = feature_engineering_pipeline.fit(train_df)\n train_features = feature_engineering_pipeline_fitted.transform(train_df)\n\n return train_features, feature_engineering_pipeline_fitted", "def search(y, x, optimize_on=\"te_loss\", split_method = 'mass'):\n\n \n\t# split the data (8 model)\n\tsplit_train = split.split(y, x, method= split_method)\n\n\t# large range of parameter\n\tdegrees = range(2, 15)\n\tlambdas = np.logspace(-5, 10)\n\n\n\tlambdas_star = []\n\tdegrees_star = []\n\tprint(\"start search\")\n\tfor i, splitted_set in enumerate(split_train):\n\t\tsub_y, sub_x, id_indices = splitted_set\n\n\t\t# first rough search with large scale\n\t\tlambda_star, degree_star, score = _inner_search(sub_y, sub_x, degrees, lambdas, optimize_on)\n\n\t\t# zoomed search around best parameters\n\t\t# zoomed_degree = range(degree_star-2, degree_star + 2)\n\t\tzoomed_lambda = np.logspace(lambda_star - 2, lambda_star + 2, 25)\n\t\tlambda_star, degree_star, score = _inner_search(sub_y, sub_x, degrees, zoomed_lambda, optimize_on)\n\n\t\t# store found values\n\t\tlambdas_star.append(lambda_star)\n\t\tdegrees_star.append(degree_star)\n\n\t\t# print summary\n\t\tprint(\"-------------------------------------\")\n\t\tprint(\"Set\", i)\n\t\tprint(\"-------------------------------------\")\n\t\tprint(\"lambda*:\", lambda_star)\n\t\tprint(\"degree: \", degree_star)\n\t\tif optimize_on == \"te_loss\":\n\t\t\tprint(\"test set loss: \", score)\n\t\telif optimize_on == \"accu\":\n\t\t\tprint(\"accuracy: \", score)\n\n\tprint(\"...............................\")\n\tprint(\"end\")\n\treturn lambdas_star, degrees_star", "def _train(self, features: pd.DataFrame, labels: pd.DataFrame,\n output_folder: str, n_iter: int=3, n_epoch: int=100,\n train_size: float=0.8,\n out_features: int=None, weight_class: bool=False,\n balanced_sampling: bool=False,\n base_net: Net=None, train_last: bool=False,\n refit: bool=False, refit_n_epoch: int=100, verbose: bool=True):\n\n # weight_class and balanced_sample cannot be True at the same time.\n # if weight_class and balanced_sample:\n # raise ValueError('weight_class and balanced_sample cannot be '\n # '\"True\" at the same time.')\n\n # Make an output folder if not exist.\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n # apply log10 to some features.\n # TODO: fine this code.\n features['period'], min_period = apply_log10(features['period'])\n features['amplitude'], min_amplitude = apply_log10(features['amplitude'])\n features['hl_amp_ratio'], min_hl_amp_ratio = \\\n apply_log10(features['hl_amp_ratio'])\n features['kurtosis'], min_kurtosis = apply_log10(features['kurtosis'])\n features['phase_cusum'], min_phase_cusum = \\\n apply_log10(features['phase_cusum'])\n features['phase_eta'], min_phase_eta = \\\n apply_log10(features['phase_eta'])\n features['quartile31'], min_quartile31 = \\\n apply_log10(features['quartile31'])\n features['skewness'], min_skewness = apply_log10(features['skewness'])\n features['slope_per90'], min_slope_per90 = \\\n apply_log10(features['slope_per90'])\n\n min_values = {\n 'min_period': min_period,\n 'min_amplitude': min_amplitude,\n 'min_hl_amp_ratio': min_hl_amp_ratio,\n 'min_kurtosis': min_kurtosis,\n 'min_phase_cusum': min_phase_cusum,\n 'min_phase_eta': min_phase_eta,\n 'min_quartile31': min_quartile31,\n 'min_skewness': min_skewness,\n 'min_slope_per90': min_slope_per90\n }\n\n self.min_values = min_values\n # Save for later usage.\n pickle.dump(self.min_values, open(os.path.join(\n output_folder, 'min_params.pkl'), 'wb'))\n\n features = np.array(features)\n labels = np.array(labels)\n\n # Normalize.\n features_median = np.median(features, axis=0)\n features_std = np.std(features, axis=0)\n\n # original.\n features_norm = (features - features_median) / features_std\n\n # new.\n # features_min = np.min(features, axis=0)\n # features_max = np.max(features, axis=0)\n # features_norm = features - features_min\n # features_norm /= features_max\n\n # Save the number of features at the last layers.\n if out_features is None:\n self.n_final = np.unique(labels).size\n else:\n self.n_final = out_features\n\n # Save.\n pickle.dump(self.n_final, open(os.path.join(\n output_folder, 'n_final.pkl'), 'wb'))\n\n # Save the values for later usage (e.g. prediction).\n # original.\n self.norm_params = [features_median, features_std]\n # new.\n # self.norm_params = [features_min, features_max]\n pickle.dump(self.norm_params, open(os.path.join(\n output_folder, 'norm_params.pkl'), 'wb'))\n\n # Fit a label encoder.\n le = LabelEncoder()\n le.fit(labels)\n labels_encoded = le.transform(labels)\n\n # Save the label encoder.\n self.label_encoder = le\n pickle.dump(self.label_encoder, open(os.path.join(\n output_folder, 'label_encoder.pkl'), 'wb'))\n\n # Derive class weight by its frequency.\n if weight_class:\n unique, counts = np.unique(labels_encoded, return_counts=True)\n counts = np.array(counts)\n rev_counts = 1. / counts\n # weights = rev_counts / np.sum(rev_counts)\n weights = np.sum(counts) / counts\n class_weights = torch.FloatTensor(weights).to(self.device)\n\n # Training information.\n training_info = {'learning_rate': [],\n 'training_loss': [], 'validation_loss': [],\n 'test_f1': [], 'training_f1': [],\n 'test_mc': [], 'training_mc': []}\n\n # Train a model for the number of iteration.\n best_f1 = 0.\n best_mc = 0.\n f1_average = 'macro'\n for i in range(n_iter):\n # Train and test set split. So each iteration,\n # using a set separated differently.\n x_train, x_test, y_train, y_test = \\\n train_test_split(features_norm, labels_encoded,\n train_size=train_size, stratify=labels_encoded)\n\n # Build datasets.\n trainset = LightCurveDataset(x_train, y_train)\n testset = LightCurveDataset(x_test, y_test)\n\n # Up-sampling imbalanced dataset.\n if balanced_sampling:\n train_weights = self._get_balanced_sample_weights(y_train)\n test_weights = self._get_balanced_sample_weights(y_test)\n\n train_sampler = torch.utils.data.sampler.WeightedRandomSampler(\n train_weights, len(train_weights), replacement=True)\n test_sampler = torch.utils.data.sampler.WeightedRandomSampler(\n test_weights, len(test_weights), replacement=True)\n shuffle = False\n else:\n train_sampler = None\n test_sampler = None\n shuffle = True\n\n # Build data loaders.\n # batch_size = 1024\n batch_size = 10240\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=batch_size, shuffle=shuffle,\n sampler=train_sampler, num_workers=2)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=batch_size, shuffle=shuffle,\n sampler=test_sampler, num_workers=2)\n\n # Initialize a network before entering the iteration.\n net = Net()\n net.to(self.device)\n if base_net is not None:\n # For transfer learning.\n net.load_state_dict(base_net.state_dict())\n\n # Set the number of neurons at the final layers, which is\n # actually the number of target classes.\n net.fc4 = nn.Linear(net.bn4.num_features, self.n_final)\n net.bn5 = nn.BatchNorm1d(self.n_final)\n net.to(self.device)\n\n # Initial learning rate.\n learning_rate = 0.1\n\n # Set training instances.\n if base_net is not None:\n # Transfer only the last layer.\n if train_last:\n optimizer = optim.SGD(net.fc4.parameters(), lr=learning_rate,\n momentum=0.9)\n else:\n optimizer = optim.SGD(net.parameters(), lr=learning_rate,\n momentum=0.9)\n else:\n optimizer = optim.SGD(net.parameters(), lr=learning_rate,\n momentum=0.9)\n\n scheduler = ReduceLROnPlateau(optimizer, 'min', patience=3,\n eps=1e-15)\n if weight_class:\n criterion = nn.CrossEntropyLoss(weight=class_weights)\n else:\n criterion = nn.CrossEntropyLoss()\n\n # Iterate.\n for epoch in range(n_epoch):\n running_loss = 0.0\n\n # Iterate learning rate.\n if optimizer.param_groups[0]['lr'] <= 1e-10:\n optimizer.param_groups[0]['lr'] = learning_rate\n\n # For each batch.\n predicted_label = []\n true_label = []\n net.train()\n for l, data in enumerate(trainloader, 0):\n # Get the inputs.\n inputs, labels = data\n inputs, labels = inputs.to(self.device), \\\n labels.to(self.device)\n\n # Zero the parameter gradients.\n optimizer.zero_grad()\n\n # Forward + backward + optimize.\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n\n # Get true and predicted labels.\n outputs_numpy = torch.max(outputs, 1)[1].cpu().numpy()\n test_numpy = labels.cpu().numpy()\n predicted_label += outputs_numpy.tolist()\n true_label += test_numpy.tolist()\n\n loss.backward()\n optimizer.step()\n\n # Running loss.\n running_loss += loss.item()\n\n # Calculate training f1.\n training_f1 = f1_score(true_label, predicted_label,\n average=f1_average)\n training_mc = matthews_corrcoef(true_label, predicted_label)\n training_mc = (training_mc + 1) / 2.\n\n # Get test-set performance\n val_loss = 0.\n predicted_label = []\n true_label = []\n net.eval()\n for m, test_data in enumerate(testloader, 0):\n test_inputs, test_labels = test_data\n test_inputs, test_labels = test_inputs.to(self.device), \\\n test_labels.to(self.device)\n\n outputs = net(test_inputs)\n val_loss += criterion(outputs, test_labels).item()\n\n # Get true and predicted labels.\n outputs_numpy = torch.max(outputs, 1)[1].cpu().numpy()\n test_numpy = test_labels.cpu().numpy()\n predicted_label += outputs_numpy.tolist()\n true_label += test_numpy.tolist()\n\n test_f1 = f1_score(true_label, predicted_label,\n average=f1_average)\n test_mc = matthews_corrcoef(true_label, predicted_label)\n test_mc = (test_mc + 1) / 2.\n\n curr_f1 = test_f1\n curr_mc = test_mc\n\n if verbose:\n self.logger.info(('[{0}, {1}] '\n 'train Mc: {2:.6f}, test Mc: {3:.6f}, '\n 'learning rate {4:.1e}').format(\n i + 1, epoch + 1, training_mc, curr_mc,\n optimizer.param_groups[0]['lr'])\n )\n\n # Save training information for later usage.\n training_info['learning_rate'].append(\n optimizer.param_groups[0]['lr'])\n training_info['training_loss'].append(running_loss)\n training_info['validation_loss'].append(val_loss)\n training_info['training_f1'].append(training_f1)\n training_info['test_f1'].append(curr_f1)\n training_info['training_mc'].append(training_mc)\n training_info['test_mc'].append(curr_mc)\n\n # We save at the end of each epoch,\n # just in case the training stops unexpectedly.\n pickle.dump(training_info, open(os.path.join(\n output_folder, 'training_info.pkl'), 'wb'))\n\n # Update the best f1 score.\n if curr_f1 > best_f1:\n best_f1 = curr_f1\n self.f1_best = best_f1\n\n # Only if the new model is better.\n if curr_mc > best_mc:\n best_mc = curr_mc\n self.mc_best = best_mc\n\n # Save the model.\n torch.save(net.state_dict(), os.path.join(\n output_folder, 'state_dict.pt'))\n self.net = net\n # self.logger.info('Better model saved.')\n\n # Save true and predicted labels for later usages.\n pickle.dump([true_label, predicted_label],\n open(os.path.join(output_folder,\n 'true_predicted.pkl'), 'wb'))\n\n # Save the best mc as a plain text for temporary saving.\n fp = open(os.path.join(output_folder, 'info.txt'), 'w')\n fp.writelines('# Mc: {0:.6f}, F1: {1:.6f}\\n'.\n format(best_mc, best_f1))\n fp.close()\n\n # Scheduler based on validation loss (i.e. test-set loss).\n scheduler.step(val_loss)\n\n # Epoch ends.\n if verbose:\n self.logger.info('The overall best Mc and F1 using the '\n 'validation set: {0:.6f} and {1:.6f}'.\n format(self.mc_best, self.f1_best))\n\n ################################\n # The whole training finishes. #\n ################################\n\n # Get the best test F1 for each iteration.\n test_f1 = np.max(\n np.array(training_info['test_f1']).reshape(-1, n_epoch), axis=1)\n # Calculate statistics of test_f1.\n self.f1_mean = np.mean(test_f1)\n self.f1_median = np.median(test_f1)\n self.f1_std = np.std(test_f1)\n\n # Get the best test Mc for each iteration.\n test_mc = np.max(\n np.array(training_info['test_mc']).reshape(-1, n_epoch), axis=1)\n # Calculate statistics of test_mc.\n self.mc_mean = np.mean(test_mc)\n self.mc_median = np.median(test_mc)\n self.mc_std = np.std(test_mc)\n\n # Save F1 information.\n fp = open(os.path.join(output_folder, 'info.txt'), 'w')\n fp.writelines('# Best_Mc Median_Mc Mean_Mc Std_Mc '\n 'Best_F1 Median_F1 Mean_F1 Std_F1\\n')\n fp.writelines('{0:.10f} {1:.10f} {2:.10f} {3:.10f} '\n '{4:.10f} {5:.10f} {6:.10f} {7:.10f}\\n'.format(\n self.mc_best, self.mc_median, self.mc_mean, self.mc_std,\n self.f1_best, self.f1_median, self.f1_mean, self.f1_std))\n fp.close()\n\n # Refit the model using the entire dataset.\n if refit:\n self.logger.info('Refit the trained model.')\n self._refit(features_norm, labels_encoded, output_folder,\n weight_class, balanced_sampling,\n refit_n_epoch, verbose)", "def _find_best_offer(\n self, all_offers: List[Tuple[str, Dict]]\n ) -> Tuple[List, float]:\n bests, best_gain = [], 0\n\n for partner, offers in all_offers:\n partial_asgt = self._neighbors_values.copy()\n current_partner = self._neighbor_var(partner)\n\n # Filter out the constraints linking those two variables to avoid\n # counting their cost twice.\n shared = find_dependent_relations(current_partner, self._constraints)\n concerned = [rel for rel in self._constraints if rel not in shared]\n\n for (val_p, my_offer_val), partner_local_gain in offers.items():\n partial_asgt.update({partner: val_p, self.variable.name: my_offer_val})\n\n # Then we evaluate the agent constraint's for the offer\n # and add the partner's local gain.\n cost = assignment_cost(partial_asgt, concerned)\n global_gain = self.current_cost - cost + partner_local_gain\n\n if (global_gain > best_gain and self._mode == \"min\") or (\n global_gain < best_gain and self._mode == \"max\"\n ):\n bests = [(val_p, my_offer_val, partner)]\n best_gain = global_gain\n elif global_gain == best_gain:\n bests.append((val_p, my_offer_val, partner))\n\n return bests, best_gain", "def load_pdbbind_pockets(split=\"index\", subset=\"core\"):\n dataset, tasks = featurize_pdbbind_pockets(subset=subset)\n\n splitters = {'index': dc.splits.IndexSplitter(),\n 'random': dc.splits.RandomSplitter()}\n splitter = splitters[split]\n ########################################################### DEBUG\n print(\"dataset.X.shape\")\n print(dataset.X.shape)\n print(\"dataset.y.shape\")\n print(dataset.y.shape)\n print(\"dataset.w.shape\")\n print(dataset.w.shape)\n print(\"dataset.ids.shape\")\n print(dataset.ids.shape)\n ########################################################### DEBUG\n train, valid, test = splitter.train_valid_test_split(dataset)\n\n transformers = []\n for transformer in transformers:\n train = transformer.transform(train)\n for transformer in transformers:\n valid = transformer.transform(valid)\n for transformer in transformers:\n test = transformer.transform(test)\n \n return tasks, (train, valid, test), transformers", "def Train_Test_Split_and_Scale(features, labels, scaler=None, random_state=42, test_size=0.25):\n # train_test_split from SciKit learn is applied on the feature and labels\n (X_train, X_test,\n y_train, y_test) = train_test_split(features, labels,\n random_state=random_state,\n test_size=test_size)\n # if a scaler from SciKit learn is passed then apply it to X_train\n if scaler:\n # try to scale X_train and transform X_train and X_test with passed in scaler\n try:\n scaler.fit(X_train)\n # supress warnings for SettingCopywithWarning\n with pd.option_context(\"mode.chained_assignment\", None):\n # maintain dataframe structure\n X_train.loc[:,:] = scaler.transform(X_train.values)\n X_test.loc[:,:] = scaler.transform(X_test.values)\n # return the following values\n return (X_train, X_test, y_train, y_test, scaler)\n except:\n print(\"Passed in scaler does not have .fit() and .transform() methods.\\nReturn values from train_test_split() method.\")\n return (X_train, X_test, y_train, y_test, scaler)\n else:\n # return values from train_test_split() method\n return (X_train, X_test, y_train, y_test, scaler)" ]
[ "0.59855705", "0.57539624", "0.5735463", "0.57259667", "0.5551452", "0.55483556", "0.5174804", "0.5155585", "0.5107157", "0.50866544", "0.50679135", "0.5035721", "0.50326467", "0.5029078", "0.50207394", "0.4985602", "0.49575225", "0.4956578", "0.4938984", "0.49286735", "0.490701", "0.49064377", "0.4853137", "0.48498058", "0.4830959", "0.4826124", "0.4825611", "0.48152936", "0.48011008", "0.47958982", "0.47764447", "0.4743777", "0.4714175", "0.47003365", "0.46886483", "0.46754274", "0.46644366", "0.4663346", "0.46565032", "0.46548864", "0.46523237", "0.46336645", "0.46256962", "0.46194226", "0.4591132", "0.4583907", "0.45837504", "0.4576722", "0.45748743", "0.45744258", "0.45698902", "0.4559587", "0.45591402", "0.45567933", "0.4551058", "0.4551058", "0.455043", "0.4546544", "0.45444733", "0.4535707", "0.4533413", "0.45333612", "0.45264828", "0.4509253", "0.44990602", "0.44980744", "0.44967923", "0.44959906", "0.44831777", "0.44801092", "0.4478807", "0.4477927", "0.4475157", "0.44696143", "0.44692332", "0.44610342", "0.44595534", "0.44545388", "0.4452256", "0.44439897", "0.44342142", "0.44305664", "0.4417886", "0.44177768", "0.4412407", "0.44063118", "0.44054216", "0.44054207", "0.44044068", "0.439851", "0.43934274", "0.4392987", "0.4392506", "0.4389433", "0.43884498", "0.43810338", "0.43802044", "0.43766826", "0.43763062", "0.43761683" ]
0.7983181
0
Computes counts for ALL compatible predictions for the given entry, taking missing values into account. counts is assumed to be a defaultdict(int) instance
def predict_all(self,entry,counts): if self.type == 'v': counts[self.value] += 1 return v = entry[self.feature] if v is None: for val,c in self.children.iteritems(): c.predict_all(entry,counts) return if self.type == 's': c = None try: c = self.children[v] except KeyError: #print "Unseen value for feature",self.feature,": ",v best = None bestDist = float('inf') for (val,c) in self.children.iteritems(): if abs(val - v) < bestDist: bestDist = abs(val - v) best = c c = best c.predict_all(entry,counts) elif self.type == 'i': if v <= self.value: self.children[0].predict_all(entry,counts) else: self.children[1].predict_all(entry,counts) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def none_count(d):\n return six.moves.reduce(lambda x, y: x + 1 if y == None else x, d.values(), 0)", "def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous label to current label in transition_count_table.\n All \"rare words\" (those words that appear less than 3 times) are replaced by <UNK>.\n I also add label|START counts.\n \"\"\"\n # Build feature_count_table of V x labels and transition_count_table of labels x labels\n for instance in instance_list: # Set of <(w, pos), l>\n index = 0\n for t in instance.data: # Tuple of (w, pos)\n index = instance.data.index(t)\n # print t[0] # word\n # print instance.label[index] # label\n if t in self.V:\n self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])] +=1\n else:\n self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])] +=1\n if index > 0:\n self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] += 1\n else:\n self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])] += 1", "def _getValueCounts(mapping):\n return Counter({k: len(v) for k, v in viewitems(mapping)})", "def getAlleleCountDict(rec,idx_list=None):\n alleles = defaultdict(int)\n total_sites = 0\n missing_inds = 0\n if idx_list is None:\n idx_list = range(len(rec.samples))\n for j in idx_list:\n samp = rec.samples[j]\n if None in samp.alleles:\n alleles['N'] += len(samp.alleles)\n #missing_inds += 1\n for k in range(len(samp.alleles)):\n b = samp.alleles[k]\n if b is not None:\n alleles[b] += 1\n total_sites+=1\n return alleles", "def __calc_empirical_counts__(self):\n self.empirical_counts = np.zeros(self._features_vector_length, dtype=float)\n for feature, freq in self.features_dict.items():\n for index in feature:\n self.empirical_counts[index] += freq\n assert len(self.empirical_counts) == np.count_nonzero(self.empirical_counts), \"0 in empirical counts vector\"", "def feature_NaN(data_dict):\n N_poi = 0\n count_NaN = dict.fromkeys(data_dict.itervalues().next().keys(), {})\n for k in count_NaN:\n count_NaN[k] = {'count': 0, 'valid count POI': 0, 'valid count non-POI': 0, \\\n 'valid fraction for POI': 0.0, 'valid fraction for non-POI': 0.0}\n for k, v in data_dict.iteritems():\n mark = v['poi']\n if mark: N_poi += 1\n for feature, value in v.iteritems(): \n if value == 'NaN':\n count_NaN[feature]['count'] += 1\n else:\n if mark:\n count_NaN[feature]['valid count POI'] += 1\n else:\n count_NaN[feature]['valid count non-POI'] += 1\n \n N = len(data_dict)\n for k in count_NaN:\n count_NaN[k]['valid fraction for POI'] = 1.0 * count_NaN[k]['valid count POI']/N_poi\n count_NaN[k]['valid fraction for non-POI'] = 1.0 * count_NaN[k]['valid count non-POI']/(N-N_poi)\n pprint(count_NaN)\n return count_NaN", "def qualify(data,variable):\n total_count = 0\n for k in data:\n if data[k][variable] == 'NaN':\n total_count = total_count + 1\n return total_count, 1.* total_count/len(data)", "def ranking(availability_info,mapds):\n rank=Counter(dict())\n for key in availability_info.keys():\n rank[mapds[key]]=len(availability_info[key])\n #print rank\n return rank", "def normalize(counts):\n numvals = sum(counts.itervalues())\n if numvals <= 0:\n return counts\n res = dict()\n for (k,cnt) in counts.iteritems():\n res[k] = float(cnt)/float(numvals)\n return res", "def test_sum_counts_by_consensus(self):\r\n #otu_table = parse_otu_table(self.otu_table)\r\n #otu_table = parse_biom_table(self.otu_table)\r\n obs_result, obs_mapping = sum_counts_by_consensus(self.otu_table, 3)\r\n exp_result = {(\r\n 'Root', 'Bacteria', 'Actinobacteria'): array([1, 0, 2, 4]),\r\n ('Root', 'Bacteria', 'Firmicutes'): array([1, 3, 1, 1]),\r\n ('Root', 'Bacteria', 'Other'): array([1, 2, 1, 0])}\r\n exp_mapping = {'s1': 0, 's2': 1, 's3': 2, 's4': 3}\r\n self.assertItemsEqual(obs_result, exp_result)\r\n self.assertEqual(obs_mapping, exp_mapping)\r\n\r\n obs_result, obs_mapping = sum_counts_by_consensus(self.otu_table, 2)\r\n exp_result = {('Root', 'Bacteria'): array([3, 5, 4, 5])}\r\n exp_mapping = {'s1': 0, 's2': 1, 's3': 2, 's4': 3}\r\n self.assertItemsEqual(obs_result, exp_result)\r\n self.assertEqual(obs_mapping, exp_mapping)\r\n\r\n obs_result, obs_mapping = sum_counts_by_consensus(self.otu_table, 4)\r\n exp_result = {('Root', 'Bacteria', 'Actinobacteria', 'Actinobacteria'):\r\n array([1, 0, 2, 4]),\r\n ('Root', 'Bacteria', 'Firmicutes', '\"Clostridia\"'):\r\n array([1, 3, 1, 1]),\r\n ('Root', 'Bacteria', 'Other', 'Other'): array([1, 2, 1, 0])}\r\n exp_mapping = {'s1': 0, 's2': 1, 's3': 2, 's4': 3}\r\n self.assertItemsEqual(obs_result, exp_result)\r\n self.assertEqual(obs_mapping, exp_mapping)", "def _convert_to_counts(self, indiv_data):\n count_data = {}\n for indiv in indiv_data:\n for allele in indiv:\n if allele is not None:\n allele_count = count_data.get(str(allele), 0)\n count_data[str(allele)] = allele_count + 1\n return count_data", "def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)", "def calculates_results_stats(results_dic): \n # Creates empty dictionary for results_stats_dic\n results_stats_dic = dict()\n \n # Sets all counters to initial values of zero so that they can \n # be incremented while processing through the images in results_dic \n results_stats_dic['n_dogs_img'] = 0\n results_stats_dic['n_match'] = 0\n results_stats_dic['n_correct_dogs'] = 0\n results_stats_dic['n_correct_notdogs'] = 0\n results_stats_dic['n_correct_breed'] = 0\n \n # process through the results dictionary\n for key in results_dic:\n \n # Labels Match Exactly\n if results_dic[key][2] == 1:\n results_stats_dic['n_match'] += 1\n\n # TODO: 5a. REPLACE pass with CODE that counts how many pet images of\n # dogs had their breed correctly classified. This happens \n # when the pet image label indicates the image is-a-dog AND \n # the pet image label and the classifier label match. You \n # will need to write a conditional statement that determines\n # when the dog breed is correctly classified and then \n # increments 'n_correct_breed' by 1. Recall 'n_correct_breed' \n # is a key in the results_stats_dic dictionary with it's value \n # representing the number of correctly classified dog breeds.\n # \n # Pet Image Label is a Dog AND Labels match- counts Correct Breed\n if results_dic[key][3] == 1 and results_dic[key][2] == 1:\n results_stats_dic['n_correct_breed'] += 1\n \n # Pet Image Label is a Dog - counts number of dog images\n if results_dic[key][3] == 1:\n results_stats_dic['n_dogs_img'] += 1\n \n # Classifier classifies image as Dog (& pet image is a dog)\n # counts number of correct dog classifications\n if results_dic[key][4] == 1:\n results_stats_dic['n_correct_dogs'] += 1\n\n # TODO: 5b. REPLACE pass with CODE that counts how many pet images \n # that are NOT dogs were correctly classified. This happens \n # when the pet image label indicates the image is-NOT-a-dog \n # AND the classifier label indicates the images is-NOT-a-dog.\n # You will need to write a conditional statement that \n # determines when the classifier label indicates the image \n # is-NOT-a-dog and then increments 'n_correct_notdogs' by 1. \n # Recall the 'else:' above 'pass' already indicates that the \n # pet image label indicates the image is-NOT-a-dog and \n # 'n_correct_notdogs' is a key in the results_stats_dic dictionary \n # with it's value representing the number of correctly \n # classified NOT-a-dog images.\n # \n # Pet Image Label is NOT a Dog\n else:\n # Classifier classifies image as NOT a Dog(& pet image isn't a dog)\n # counts number of correct NOT dog clasifications.\n if results_dic[key][3] == 0 and results_dic[key][4] == 0:\n results_stats_dic['n_correct_notdogs'] += 1\n\n\n # Calculates run statistics (counts & percentages) below that are calculated\n # using the counters from above.\n\n # calculates number of total images\n results_stats_dic['n_images'] = len(results_dic)\n\n # calculates number of not-a-dog images using - images & dog images counts\n results_stats_dic['n_notdogs_img'] = (results_stats_dic['n_images'] - \n results_stats_dic['n_dogs_img']) \n\n # TODO: 5c. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # matched images. Recall that this can be calculated by the\n # number of correctly matched images ('n_match') divided by the \n # number of images('n_images'). This result will need to be \n # multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct for matches\n results_stats_dic['pct_match'] = (results_stats_dic['n_match'] / results_stats_dic['n_images']) * 100\n\n # TODO: 5d. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # classified dog images. Recall that this can be calculated by \n # the number of correctly classified dog images('n_correct_dogs')\n # divided by the number of dog images('n_dogs_img'). This result \n # will need to be multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct dogs\n results_stats_dic['pct_correct_dogs'] = (results_stats_dic['n_correct_dogs'] / results_stats_dic['n_dogs_img']) * 100\n\n # TODO: 5e. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # classified breeds of dogs. Recall that this can be calculated \n # by the number of correctly classified breeds of dog('n_correct_breed') \n # divided by the number of dog images('n_dogs_img'). This result \n # will need to be multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct breed of dog\n results_stats_dic['pct_correct_breed'] = (results_stats_dic['n_correct_breed'] / results_stats_dic['n_dogs_img']) * 100\n\n # Calculates % correct not-a-dog images\n # Uses conditional statement for when no 'not a dog' images were submitted \n if results_stats_dic['n_notdogs_img'] > 0:\n results_stats_dic['pct_correct_notdogs'] = (results_stats_dic['n_correct_notdogs'] /\n results_stats_dic['n_notdogs_img']) * 100.0\n else:\n results_stats_dic['pct_correct_notdogs'] = 0.0\n\n \n # TODO 5f. REPLACE None with the results_stats_dic dictionary that you \n # created with this function \n return results_stats_dic", "def fit(self, counts):\n counts = as_counts_array(counts)\n self.nk, self.zk = unique(counts)\n self._n = numpy.sum(self.zk * self.nk)\n self._k1 = numpy.sum(self.zk[self.nk > 0])\n return self", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def getClassCounts(column, uniqueVal, decision, yes, no , total):\r\n dataDict = {} # a dictionary of labels\r\n for val in uniqueVal:\r\n label1 = val + '/Y'\r\n label2 = val + '/N'\r\n dataDict[label1] = 0; dataDict[label2] = 0\r\n for dec, at in zip(decision, column):\r\n if at == val and dec == 'No':\r\n dataDict[label2] += 1\r\n if at == val and dec == 'Yes':\r\n dataDict[label1] += 1\r\n dataDict[val] = (dataDict[label2]+ dataDict[label1])/ total\r\n dataDict[label2] = dataDict[label2] / no\r\n dataDict[label1] = dataDict[label1] / yes\r\n return dataDict", "def count_counts(self):\n count_counts = defaultdict(Counter)\n for token, followers in self._dict.items():\n for f, count in followers.items():\n count_counts[token][count] += 1\n count_counts[token][0] = len(self._dict) - sum(count_counts[token].values())\n return count_counts", "def _count_elements(mapping, iterable): # real signature unknown; restored from __doc__\n pass", "def counts(sequence):\n # initialize the countainer\n count = defaultdict(int)\n # iterates through sequence elements\n for item in sequence:\n # if element not in counts add 0\n # else add 1\n count[item] = count.get(item, 0) + 1\n return dict(count)", "def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res", "def count_entries(aDictEntry):\n total = 0 # Sum of all frequencies\n for x in aDictEntry:\n total += aDictEntry[x] # Add to the total\n return total # Returns", "def getCounts(training_data, test_row, k):\n neighbors = getNeighbors(training_data, test_row, k)\n output_vals = [row[-1] for row in neighbors]\n\n counts = dict()\n\n for i in output_vals:\n counts[i] = counts.get(i, 0) + 1\n \n return counts", "def freq_counts(self, arrs, lens):\n no_nans = reduce(np.logical_and, [~np.isnan(a) if bn.anynan(a) else np.ones(self.m).astype(bool) for a in arrs])\n combined = reduce(add, [arrs[i][no_nans]*reduce(mul, lens[:i]) for i in range(1, len(arrs))], arrs[0][no_nans])\n return np.bincount(combined.astype(np.int32, copy=False), minlength=reduce(mul, lens)).astype(float)", "def observed_species(counts):\n return (counts!=0).sum()", "def get_nan_counts(data, cols, null_col_suffix=''):\n nulls_df = pd.DataFrame(pd.isnull(data[cols]).sum())\n nulls_df.columns = ['null_counts'+null_col_suffix]\n nulls_df['feature'] = nulls_df.index\n nulls_df.reset_index(inplace=True, drop=True)\n return nulls_df", "def count(self, value=None):\r\n\t\t_set = list(set(self.sample))\r\n\t\tif value == None: return {_set[i]: self.sample.count(_set[i]) for i in range(len(_set))}\r\n\t\telse:\r\n\t\t\ttry: return {_set[i]: self.sample.count(_set[i]) for i in range(len(_set))}[value]\r\n\t\t\texcept: return 0", "def count_annotation_values(graph, annotation):\n return Counter(iter_annotation_values(graph, annotation))", "def distribution_of_result(clf_name, all, ids=None, print_res=True):\n id_map = get_id_dict()\n y_map = get_y_dict()\n\n if clf_name == 'svm':\n clf_name = 'Pipeline'\n else:\n clf_name = type(get_base_learners(clf_name)).__name__\n y_preds = load_testset_predictions(all)\n\n distr_count = dict()\n distr_count['TP'] = Counter()\n distr_count['FN'] = Counter()\n distr_count['FP'] = Counter()\n distr_count['TN'] = Counter()\n\n distr = {key: {source: [] for source in set([v for v in id_map.values()])} for key in ['TP', 'FN', 'FP', 'TN']}\n\n for index, row in y_preds.iterrows():\n pred = row[clf_name]\n tweet_id = row['tweet__id']\n\n source = id_map[tweet_id]\n y = y_map[tweet_id]\n\n if ids is not None and tweet_id not in ids:\n continue\n\n if y == pred and pred == 1:\n distr_count['TP'][source] += 1\n distr['TP'][source].append(tweet_id)\n\n elif y != pred and pred == 1:\n distr_count['FP'][source] += 1\n distr['FP'][source].append(tweet_id)\n\n elif y != pred and pred == 0:\n distr_count['FN'][source] += 1\n distr['FN'][source].append(tweet_id)\n\n else:\n distr_count['TN'][source] += 1\n distr['TN'][source].append(tweet_id)\n\n if print_res:\n print(distr_count)\n for key, values in distr.items():\n print('{}:'.format(key))\n for key2, values2 in distr[key].items():\n print('\\t{}:'.format(key2))\n print('\\t{}'.format(values2))\n return distr_count, distr", "def __uniqueCounts(rows):\n results = {} #Initialize a dictionary to store the results\n for row in rows: #Iterate over all rows of data\n #The result is the last column\n r = row[-1]\n if r not in results: results[r] = 0 #Start the count for each class at zero\n results[r] += 1 #Increment the count for this row's class by 1\n return results", "def counts_vals(self):\n\n return unumpy.nominal_values(self.counts)", "def count_classes(labels):\n class_dict = {}\n for image in labels:\n for row in image:\n for label in row:\n if label not in class_dict:\n class_dict[label] = 1\n else:\n class_dict[label] += 1\n return class_dict", "def _get_total_ngrams(n_grams_counts: Dict[int, Dict[Tuple[str, ...], Tensor]]) ->Dict[int, Tensor]:\n total_n_grams: Dict[int, Tensor] = defaultdict(lambda : tensor(0.0))\n for n in n_grams_counts:\n total_n_grams[n] = tensor(sum(n_grams_counts[n].values()))\n return total_n_grams", "def final_kmer_counts(seq_dict, num_seqs, alphabet, min_k, max_k):\n counted = Counter()\n len_seqs = 0\n for name, sequence in seq_dict.items():\n seq = seq_cleaner(sequence, alphabet)\n len_seqs += len(seq)\n counted.update(count_kmers_cython(seq, min_k, max_k))\n final_count = {k: (v // num_seqs) for k, v in counted.items()}\n # total_len = (len_seqs // num_seqs)\n return final_count, len_seqs", "def apply(self):\n counter = {}\n for act in self.activities:\n freq = []\n for trace in self.log:\n freq.append(len(self.project_trace(trace, [act])))\n if not len(freq) == 0:\n counter[act] = {'sum': sum(freq), 'min': min(freq),\n 'max': max(freq)}\n return counter", "def view_counts():\n out = {}\n for i in range(len(classes)):\n out.update({decoded[i]: storage.count(classes[i])})\n return out", "def freq_count(self):\n #eg: fc = spammy.freq_count()\n count_dict = defaultdict(int)\n for entry in self._train_list:\n if entry in self._vocab_set:\n count_dict[entry] += 1\n return count_dict", "def count_terms(self, tokens):\n\n terms = [self.term_match(t) for t in tokens ]\n \n terms = [t for t in terms if t != None]\n\n #print terms\n lf = dict(Counter(terms))\n for k in lf:\n lf[k] /= float(len(tokens))\n #lf[k] = 1 # binarize?\n pass\n return lf", "def array_occurrences(cmd_out: list) -> defaultdict:\n array_frequency = defaultdict(int) # type: defaultdict\n array_name = 0\n for entry in cmd_out:\n array_frequency[entry[array_name]] += 1\n return array_frequency", "def _count_correct_prediction(\n self, logits: Dict[str, torch.Tensor], labels: torch.Tensor\n ) -> None:\n if len(labels.size()) != 1: # For e.g., CutMix labels\n return\n for module_name, logit in logits.items():\n _, predicted = torch.max(F.softmax(logit, dim=1).data, 1)\n n_correct = int((predicted == labels).sum().cpu())\n self.n_correct_epoch[module_name] += n_correct", "def get_tag_counts(label_matches):\r\n\ttag_counts = {}\r\n\tfor word_and_tag in label_matches.keys():\r\n\t\tcurrent_count = tag_counts.get(word_and_tag[_TAG], 0)\r\n\t\ttag_counts[word_and_tag[_TAG]] = current_count+1\r\n\treturn tag_counts", "def prepare_class_freqs(cls_counts, n_classes):\n\n if None in cls_counts:\n return None\n\n lst_cls_counts = []\n\n for party_cls_counts in cls_counts:\n temp = [0] * n_classes\n for label, count in party_cls_counts.items():\n temp[int(label)] = int(count)\n\n lst_cls_counts.append(np.array(temp))\n\n return lst_cls_counts", "def get_kmer_counts(kmer_list, kmer_counts):\n counts = defaultdict(int)\n for kmer in kmer_list:\n counts[kmer] = counts.get(kmer, 0) + kmer_counts[kmer]\n return counts", "def count_each_tag(mat):\n cnts = {}\n for vec in mat:\n if vec[-1] not in cnts:\n cnts[vec[-1]] = 0.0\n cnts[vec[-1]] += 1.0\n return cnts", "def get_value_counts(X, columns, cate_cap=30):\n counts = {}\n for col in columns:\n temp = dict(X[col].value_counts())\n temp['NaN'] = X[col].isnull().sum()\n if len(temp) > cate_cap:\n counts[col] = 'There are more than %d categories. Please check this column.' % cate_cap\n else:\n counts[col] = temp\n return counts", "def Counts(dict_of_list):\n return {k: len(v) for k, v in dict_of_list.iteritems()}", "def review_counts(stat_info_dict):\n review_counts = {}\n for release, stat_dict in stat_info_dict.items():\n review_counts_per_release = {}\n for key, stat in stat_dict.items():\n # review count\n review_counts_per_release[key] = stat['metric']\n review_counts[release] = review_counts_per_release\n return review_counts", "def add_count_data(self, counts: Dict[datetime, int]):\n raise NotImplementedError()", "def trainCount(\n trainData, \n questionType,\n questionDict,\n questionIdict, \n objDict, \n objIdict,\n numAns):\n count_wa = np.zeros((len(objIdict), numAns))\n count_a = np.zeros((numAns))\n objIds = extractObjId(\n trainData[0], \n questionType, \n questionDict, \n questionIdict)\n for i in range(objIds.shape[0]):\n objId = objIds[i]\n obj = questionIdict[objId - 1]\n ansId = trainData[1][i, 0]\n objId2 = objDict[obj]\n count_wa[objId2, ansId] += 1\n count_a[ansId] += 1\n # Add UNK count\n count_a[-1] += 1\n return count_wa, count_a", "def calcCountDict(TFdict):\n\n countDict = {}\n\n for doc in TFdict:\n for term in doc:\n if term in countDict:\n countDict[term] +=1\n else:\n countDict[term] = 1\n\n return countDict", "def count_correct_tags(self):\n correct_dict = {}\n for gold_tag, predict_tag in zip(self.golden_tags, self.predict_tags):\n if gold_tag == predict_tag:\n if gold_tag not in correct_dict:\n correct_dict[gold_tag] = 1\n else:\n correct_dict[gold_tag] += 1\n\n return correct_dict", "def test_counts(self):\n # test DNA seq\n orig = \"AACCGGTTAN-T\"\n seq = self.DNA(orig)\n # no gaps, no ambiguities\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3)\n self.assertEqual(dict(got), expect)\n # gaps allowed\n got = seq.counts(allow_gap=True)\n expect = dict(A=3, C=2, G=2, T=3)\n expect.update({\"-\": 1})\n self.assertEqual(dict(got), expect)\n # ambig allowed\n got = seq.counts(include_ambiguity=True)\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n self.assertEqual(dict(got), expect)\n # ambig and gap allowed\n got = seq.counts(include_ambiguity=True, allow_gap=True)\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n expect.update({\"-\": 1})\n self.assertEqual(dict(got), expect)\n\n # test DNA seq motif length of 2\n got = seq.counts(motif_length=2)\n expect = dict(AA=1, CC=1, GG=1, TT=1)\n self.assertEqual(dict(got), expect)\n # gap allowed\n got = seq.counts(motif_length=2, allow_gap=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1)\n expect.update({\"-T\": 1})\n # ambig allowed\n got = seq.counts(motif_length=2, include_ambiguity=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1, AN=1)\n self.assertEqual(dict(got), expect)\n # ambig and gap allowed\n got = seq.counts(motif_length=2, include_ambiguity=True, allow_gap=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1, AN=1)\n expect.update({\"-T\": 1})\n self.assertEqual(dict(got), expect)\n\n # test base -- no concept of ambiguity, but understands gap\n orig = \"AACCGGTTAN-T\"\n seq = self.SEQ(orig)\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n self.assertEqual(dict(got), expect)\n\n # handle '?'\n orig = \"AACCGGTTAN-T?\"\n seq = self.DNA(orig)\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3)\n self.assertEqual(dict(got), expect)\n got = seq.counts(allow_gap=True, include_ambiguity=True)\n expect.update({\"-\": 1, \"N\": 1, \"?\": 1})\n self.assertEqual(dict(got), expect)", "def label_count(self, label_list_ids=None):\n count = collections.defaultdict(int)\n\n for label_list in self.label_lists.values():\n if label_list_ids is None or label_list.idx in label_list_ids:\n for label_value, label_count in label_list.label_count().items():\n count[label_value] += label_count\n\n return count", "def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)", "def partitioner(mappings):\n\t\n\ttoken_counts = defaultdict(list)\n\t\n\tfor sublist in mappings:\n\t\tfor t, c in sublist:\n\t\t\ttoken_counts[t].append(c)\n\t\t\t\n\treturn token_counts", "def counts(self):\n\n counts = defaultdict(int)\n\n for i, geom in zip(self.tree_ids, self.tree):\n point_int = list(self.sindex.intersection(geom.bounds))\n if point_int:\n counts[i] += len(point_int)\n\n return dict(counts)", "def computeNucStats(trues, preds):\n\t#TP, TN, FP, FN\n\tcount = zeros(4)\n\tfor a in range(len(trues)):\n\t\tif (trues[a] == 'F') & (preds[a] == 'F'):\n\t\t\tcount[0] += 1\n\t\telif (trues[a] == 'N') & (preds[a] == 'N'):\n\t\t\tcount[1] += 1\n\t\telif (trues[a] == 'N') & (preds[a] == 'F'):\n\t\t\tcount[2] += 1\n\t\telif (trues[a] == 'F') & (preds[a] == 'N'):\n\t\t\tcount[3] += 1\n\treturn count", "def generate_counts():\n\n counts_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n doc_path = os.path.join(subfolder_path, filename)\n with open(doc_path, 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n counts_dict.update({doc_path: collections.Counter(normalised_text.split())})\n #print(counts_dict.get('file/crude/article560.txt'))\n\n vocab = generate_vocab()\n for value in counts_dict.values():\n for k in vocab.keys():\n if k not in value.items():\n value.update({k: 0})\n\n #print(counts_dict.get('file/crude/article560.txt'))\n return counts_dict", "def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n\n self.total = 2\n ##Your code here\n\n for sentence in corpus:\n temp_1 = get_ngrams(sentence,1)\n temp_2 = get_ngrams(sentence,2)\n temp_3 = get_ngrams(sentence,3)\n for i in range(len(temp_1)):\n if temp_1[i] in self.unigramcounts:\n self.unigramcounts[temp_1[i]] += 1\n else:\n self.unigramcounts[temp_1[i]] = 1\n self.total += 1\n\n for i in range(len(temp_2)):\n if temp_2[i] in self.bigramcounts:\n self.bigramcounts[temp_2[i]] += 1\n else:\n self.bigramcounts[temp_2[i]] = 1\n\n for i in range(len(temp_3)):\n if temp_3[i] in self.trigramcounts:\n self.trigramcounts[temp_3[i]] += 1\n else:\n self.trigramcounts[temp_3[i]] = 1\n return", "def _collect_counts(self):\n for t in self.system.keys():\n if t in self.gold:\n self.tp += 1\n else:\n self.fp += 1\n for t in self.gold.keys():\n if t not in self.system:\n self.fn += 1", "def score_aggregate(results):\n scores = []\n truth_count = detected_count = segment_count = 0\n\n for res in results:\n scores.append(res[\"scores\"])\n truth_count += len(res[\"labels\"])\n detected_count += len(res[\"detected\"])\n segment_count += len(res[\"scores\"][\"segments\"])\n\n ret = dict()\n ret[\"scores\"] = sum_scores(scores)\n ret[\"stats\"] = dict(truth_count=truth_count, detected_count=detected_count, segment_count=segment_count)\n return ret", "def _get_counts(self, X: np.ndarray) -> Dict[int, np.ndarray]:\n return {f: np.bincount(X[:, f].astype(int), minlength=n_cat) for f, n_cat in\n self.categories_per_feature.items()}", "def __init__(self):\n self.num_counts = {}", "def calculate_nominal_occurrences(examples: List[Example], attr_idx: int):\n value_occs = {}\n\n for example in examples:\n value = example[attr_idx]\n\n if value not in value_occs:\n value_occs[value] = [0, 0]\n\n value_occs[example[attr_idx]][int(example.label != 1)] += example.weight\n\n return [value_occ for key, value_occ in value_occs.items()]", "def hits(self):\n return sum(self.labels.values())", "def apk(actual, predicted, k=10):\n if len(predicted) > k:\n predicted = predicted[:k]\n\n score = 0.0\n num_hits = 0.0\n\n for i, p in enumerate(predicted):\n if p in actual and p not in predicted[:i]:\n num_hits += 1.0\n score += num_hits / (i + 1.0)\n\n # if not actual:\n # return 0.0\n\n return score / min(len(actual), k)", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def getClassCounts(b):\n c = {k:0 for k in labels.keys()}\n for r in b:\n c[r[0]] += 1\n return c", "def get_compute_statistics(cls, all_tensors):\n flags = {\n \"total\": {\"count\": 0, \"tensors\": []},\n None: {\"count\": 0, \"tensors\": []},\n \"unknown\": {\"count\": 0, \"tensors\": []},\n }\n for index in OpFlags:\n index = index.value\n flags[index] = {\"count\": 0, \"tensors\": []}\n for tensor in all_tensors:\n flag = tensor.op.tag\n cls.count_flag(flags, flag, tensor)\n return flags", "def _to_partial_counts(\n sliced_record_batch: types.SlicedRecordBatch, path: types.FeaturePath,\n boundaries: Optional[np.ndarray], weight_column_name: Optional[Text]\n) -> Iterator[Tuple[Tuple[types.SliceKey, Union[_XType, _YType]], _CountType]]:\n slice_key, record_batch = sliced_record_batch\n value_presence = _get_example_value_presence(record_batch, path, boundaries,\n weight_column_name)\n if value_presence is None:\n return value_presence\n\n if weight_column_name is not None:\n grouped_values = collections.defaultdict(float)\n else:\n grouped_values = collections.defaultdict(int)\n\n for value, weight in zip(value_presence.values, value_presence.weights):\n grouped_values[value] += weight\n\n for value, count in grouped_values.items():\n yield (slice_key, value), count", "def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)", "def featurize(vector,features):\n dictionary = collections.defaultdict(lambda:0)\n for feature in iter(set(features)):\n dictionary[feature] = [vector[key][feature] if feature in vector[key] else 0 for key in vector] #populates vectors with zeroes where there's no value in an industry for an n-gram.\n return dictionary", "def _calc_refs(counts):\n refs = 0\n for allele in counts.keys():\n refs += counts[allele]\n return refs", "def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count += 1\n return count", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def count_values(array):\n unique, counts = np.unique(array, return_counts=True)\n return dict(zip(unique, counts))", "def countit(objs):\n out = {}\n for el in objs:\n out[el] = 1 + out.get(el, 0)\n out = {k: v for k, v in out.items()}\n return out", "def _normalize_counts(counts, val=1):\n n = sum(counts.values())\n frequencies = {}\n for r in list(counts.keys()):\n frequencies[r] = val * float(counts[r]) / float(n)\n return frequencies", "def test_expected_output(self, arr, counts, expected_output):\n\n output = LeafNodeScaledConformalPredictor._sum_dict_values(arr, counts)\n\n assert output == expected_output, \"_sum_dict_values produced incorrect output\"", "def countUMIs(barcode_dictionary):\n new_dict = {}\n for i in barcode_dictionary:\n \tnew_dict[i] = (sum(Counter(barcode_dictionary[i][1:]).values()), len(Counter(barcode_dictionary[i][1:]).keys()))\n return new_dict", "def test_getAbundanceFrequencyCounts(self):\r\n # Verified with iNEXT.\r\n exp = defaultdict(int, {1: 1, 2: 1, 3: 1, 4: 1, 5: 1})\r\n obs = self.est1.getAbundanceFrequencyCounts()\r\n self.assertEqual(obs, exp)\r\n\r\n # Verified against results in Colwell 2012 paper.\r\n self.assertEqual(self.est2.getAbundanceFrequencyCounts(), colwell_fk1)\r\n self.assertEqual(self.est3.getAbundanceFrequencyCounts(), colwell_fk2)", "def _get_frequencies(data, col, col_vals, agg_col, agg_unit, agg_to_data):\n interesting_data = None\n frequencies = {}\n for col_val in col_vals:\n frequencies[col_val] = 0\n # We can't just use collections.Counter() because frequencies.keys() is used to determine\n # the range of possible values in other functions.\n if _PANDAS_AVAILABLE and isinstance(data, pd.DataFrame):\n interesting_data = agg_to_data[agg_unit][col]\n for name in interesting_data:\n if name in frequencies:\n frequencies[name] = frequencies[name] + 1\n else: # Assumes it is an np.ndarray\n for row in itertools.ifilter(lambda row : row[agg_col] == agg_unit, data):\n if row[col] in frequencies:\n frequencies[row[col]] += 1\n return frequencies, interesting_data", "def apk(actual, predicted, k=10):\n if len(predicted)>k:\n predicted = predicted[:k]\n\n score = 0.0\n num_hits = 0.0\n\n for i,p in enumerate(predicted):\n if p in actual and p not in predicted[:i]:\n num_hits += 1.0\n score += num_hits / (i+1.0)\n\n if not actual:\n return 0.0\n\n return score / min(len(actual), k)", "def count_unvisited(data):\n count = sum(n.count(\"n\") for n in data)\n return count", "def apk(actual, predicted, k=3):\n\n actual = set(actual)\n predicted = list(predicted)\n\n if len(predicted) > k:\n predicted = predicted[:k]\n\n score = 0.0\n num_hits = 0.0\n\n for i, p in enumerate(predicted):\n if p in actual and p not in set(predicted[:i]):\n num_hits += 1.0\n score += num_hits / (i + 1.0)\n\n if not actual:\n return 0.0\n\n return score / min(len(actual), k)", "def run_compute_reread_counts(self):\n questions = []\n contexts = []\n student_data = self.responses[:]\n for response in student_data:\n if response.question.text not in questions:\n questions.append(response.question.text)\n if response.context.text not in contexts:\n contexts.append(response.context.text)\n\n compute_reread_counts_data = []\n\n for question in questions:\n for context in contexts:\n compute_reread_counts_data.append(self.compute_reread_counts(\n question, context))\n\n return compute_reread_counts_data", "def compute_detection_counts(kinds, valid_mask, aoi_mask, scene_counts):\n scene_counts = np.maximum(scene_counts, 1)\n if len(kinds):\n pairs = (kinds == 'pair_trawlers')\n singles = (kinds == 'single_trawler')\n scales = (kinds == 'pair_trawlers') * 2 + (kinds == 'single_trawler')\n aoi_pts = round((scales * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n aoi_pairs = round((pairs * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n else:\n aoi_pts = aoi_pairs = 0\n return aoi_pts, aoi_pairs", "def predict(self, key):\n return self.counts.get(key, 1.0)", "def explicit_counts_map(self, pixels=None):\n # No pixel index, so build one\n if self.hpx._ipix is None:\n if self.data.ndim == 2:\n summed = self.counts.sum(0)\n if pixels is None:\n nz = summed.nonzero()[0]\n else:\n nz = pixels\n data_out = np.vstack(self.data[i].flat[nz]\n for i in range(self.data.shape[0]))\n else:\n if pixels is None:\n nz = self.data.nonzero()[0]\n else:\n nz = pixels\n data_out = self.data[nz]\n return (nz, data_out)\n else:\n if pixels is None:\n return (self.hpx._ipix, self.data)\n # FIXME, can we catch this\n raise RuntimeError(\n 'HPX.explicit_counts_map called with pixels for a map that already has pixels')", "def probe_counts(**kwargs):\n attributes = [\"ping_type\", \"os\", \"app_version\", \"app_build_id\", \"channel\"]\n\n return dict(\n attributes=\",\".join(attributes),\n aggregate_attributes=\"\"\"\n metric,\n metric_type,\n key\n \"\"\",\n aggregate_grouping=\"\"\"\n client_agg_type,\n agg_type\n \"\"\",\n # not boolean\n scalar_metric_types=\"\"\"\n \"counter\",\n \"quantity\",\n \"labeled_counter\",\n \"timespan\"\n \"\"\",\n boolean_metric_types=\"\"\"\n \"boolean\"\n \"\"\",\n **kwargs,\n )", "def get_nan_rows(data_dict):\n\n counts = dict.fromkeys(data_dict.itervalues().next().keys(), 0)\n for record in data_dict:\n person = data_dict[record]\n for field in person:\n if person[field] == 'NaN':\n counts[field] += 1\n\n print(\"Number of NaN rows in the data: \")\n pprint(counts)\n print_separator_line", "def get_all_counts(filename):\r\n column_keys, get_data = get_csv(filename)\r\n all_counts_dict = {}\r\n for key in column_keys[1:]:\r\n all_counts_dict[key] = {}\r\n\r\n for i,(k,v) in enumerate(get_data()):\r\n for key in column_keys[1:]:\r\n column = column_keys[1:].index(key)\r\n x = v[column]\r\n all_counts_dict[key][x] = all_counts_dict[key].get(x, 0) + 1\r\n return all_counts_dict", "def construct_ngrams_dict(ngrams_list):\n counts = {}\n\n for t in ngrams_list:\n key = hash_function(t)\n if key in counts:\n counts[key] += 1\n else:\n counts[key] = 1\n return counts", "def apk(actual, predicted, k):\n\n if len(predicted)>k:\n predicted = predicted[:k]\n \n score = 0.0\n num_hits = 0.0\n\n for i,p in enumerate(predicted):\n if p in actual and p not in predicted[:i]:\n num_hits += 1.0\n score += num_hits / (i+1.0)\n\n if not actual:\n return 0.0\n\n return round(score / min(len(actual), k), 5)", "def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict", "def _update_invalid_counts(self, msg, subtype, by):\n\n try:\n counts = self.get_local(msg, \"invalid_counts\")\n except KeyError:\n counts = defaultdict(int)\n\n counts['all'] += by\n counts[subtype] += by\n self.set_local(msg, \"invalid_counts\", counts)", "def sparse_counts_map(self):\n if self.hpx._ipix is None:\n flatarray = self.data.flattern()\n else:\n flatarray = self.expanded_counts_map()\n nz = flatarray.nonzero()[0]\n data_out = flatarray[nz]\n return (nz, data_out)", "def get_feature_statistics(results):\n to_be_deleted = []\n\n for result in results:\n if len(result.subset) != 6:\n to_be_deleted.append(result)\n\n length = len(results)\n feature_labels = datapoint_features\n statistics = {}\n\n for label in feature_labels:\n result_with = metrics.filter_results(results, features=[label])\n result_without = metrics.filter_results(results, without_features=[label])\n\n with_length = len(result_with)\n without_length = len(result_without)\n prevalence = with_length / length\n\n if prevalence != 0:\n avg_f1_dos = math.fsum([result.metrics['dos'].f1 for result in result_with]) / with_length\n avg_f1_fuzzy = math.fsum([result.metrics['fuzzy'].f1 for result in result_with]) / with_length\n avg_f1_imp = math.fsum([result.metrics['impersonation'].f1 for result in result_with]) / with_length\n else:\n avg_f1_dos = 0\n avg_f1_fuzzy = 0\n avg_f1_imp = 0\n\n avg_f1_without_dos = math.fsum([result.metrics['dos'].f1 for result in result_without]) / without_length\n avg_f1_without_fuzzy = math.fsum([result.metrics['fuzzy'].f1 for result in result_without]) / without_length\n avg_f1_without_imp = math.fsum([result.metrics['impersonation'].f1 for result in result_without]) / without_length\n avg_f1_diff_dos = avg_f1_without_dos - avg_f1_dos\n avg_f1_diff_fuzzy = avg_f1_without_fuzzy - avg_f1_fuzzy\n avg_f1_diff_imp = avg_f1_without_imp - avg_f1_imp\n\n statistics[label] = [prevalence, avg_f1_diff_dos, avg_f1_diff_fuzzy, avg_f1_diff_imp]\n\n return statistics" ]
[ "0.627431", "0.6079571", "0.5920963", "0.5891744", "0.5813785", "0.5808457", "0.57633555", "0.5717828", "0.5693999", "0.5685904", "0.5680773", "0.5613226", "0.55953914", "0.5592989", "0.5554436", "0.5554436", "0.5554436", "0.5552211", "0.5529736", "0.55194706", "0.55182385", "0.55144036", "0.5507741", "0.5472775", "0.54515254", "0.54464763", "0.53958", "0.53921264", "0.5390644", "0.53884727", "0.5385558", "0.53849804", "0.53664565", "0.5366085", "0.53601676", "0.53529555", "0.53499156", "0.5344084", "0.53339297", "0.53148234", "0.5309434", "0.5305252", "0.5290378", "0.5268138", "0.5267178", "0.526634", "0.52620965", "0.5261618", "0.52602494", "0.5258167", "0.52571976", "0.5249782", "0.5245424", "0.52374184", "0.52323836", "0.52225256", "0.5215875", "0.52134216", "0.51991916", "0.51913774", "0.51830417", "0.51782537", "0.51772803", "0.51736325", "0.5167163", "0.51559025", "0.51500666", "0.5147568", "0.5147568", "0.51445836", "0.51423615", "0.5133935", "0.5129277", "0.51280046", "0.51279205", "0.5127477", "0.511907", "0.5119025", "0.5115377", "0.5114717", "0.51118815", "0.51007205", "0.5096759", "0.50961745", "0.50825727", "0.50823295", "0.50822324", "0.50777394", "0.5076615", "0.5071288", "0.5069878", "0.5063111", "0.5049408", "0.50480986", "0.5042137", "0.5037018", "0.503645", "0.5032675", "0.50300014", "0.5027353" ]
0.5712293
8
Predicts a label for a feature vector
def predict(self,entry): assert self.root is not None,"Decision tree is not initialized" return self.root.predict(entry)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, features):\n vec = vectorize(features, self.vocab,\n self.dpvocab, self.projmat)\n label = self.clf.predict(vec)\n # print label\n return self.labelmap[label[0]]", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):", "def predict(self, X):", "def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label", "def predict_label(self, x, weight=None, cutting=0.5, predict_label=None):\n if predict_label is None:\n predict_label = self.pred_label\n if weight is None: weight = self.weights[-1]\n pred = self.predict(x, weight, cutting)\n pred[np.where(pred == 0)] = predict_label[0]\n pred[np.where(pred == 1)] = predict_label[1]\n return pred", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels", "def predict(self, X):\n\n stuff = self._vectorizer.transform(X)\n result = self._classifier.predict(stuff)\n return result\n pass", "def predict(self):\n prediction = np.multiply(self.alpha_vec, self.label[:,np.newaxis]).T\n pred = np.dot(prediction, np.inner(self.train_data, self.train_data)) + self.intercept\n self.prediction = np.sign(pred)\n return(self.prediction)", "def predict(self, X, **kwargs):\n\n X = sanitize_dataframe(X)\n\n for c in set(self._features).difference(set(X.columns.values)):\n X = X.assign(**{c: 1})\n\n X[\"label_prediction\"] = self._base_model.predict(X)\n\n return self._model.predict(X[self._features], **kwargs)", "def predict(model, features):\n result = model.predict(features)\n return result", "def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]", "def predict(self, X):\n raise NotImplementedError", "def predict(self, X, a, b):\n pass", "def predict(self, X_test):\n\n # Predict Label 0\n i = 0\n X = X_test\n\n # Retrieve trained classifier for label 0\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n result = np.zeros((X_test.shape[0], self.label_dim))\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n # iterator = tqdm.trange(1, self.label_dim)\n iterator = range(1, self.label_dim)\n for i in iterator:\n # Predict Label i\n\n # Retrieve trained classifier for label i\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n return result", "def predict(self, predPoints=None):", "def predict(self, X): \n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n # Input validation\n X = check_array(X)\n\n j= 0\n predicted_labels = np.array([])\n while(j < X.shape[0]):\n current_batch_end = j+self.batch_size if j+self.batch_size < X.shape[0] else X.shape[0]\n current_batch = X[j:current_batch_end]\n self._feedforward(current_batch)\n predicted_labels = np.append(predicted_labels, np.take(self.map_labels, self.bmu_indices))\n j = current_batch_end\n \n return predicted_labels", "def clf1_predict(self):\n self._pred_clf_1 = self._clf1.predict(self._vectorized_input)[0]", "def predict(self, inputs):\n if self.use_logistic:\n return self.predict_labels_logistic(self.w, inputs)\n return predict_labels(self.w, inputs)", "def predict_class(self, feature):\n return self._clf.predict(feature)", "def predict_class(self, feature):\n return self._clf.predict(feature)", "def predict(self, x):\n raise NotImplementedError(\"Please Implement this method\")", "def predict():\n to_predict = np.zeros(5).reshape(1, 5)\n features = ['is_male', 'num_interactions_with_cust_service', 'late_on_payment', 'age', 'years_in_contract']\n for i, feat in enumerate(features):\n if request.args.get(feat) is not None:\n to_predict[0][i] = request.args.get(feat)\n\n response = clf2.predict(to_predict)\n\n if response:\n return \"The customer is likely to churn\"\n else:\n return \"He is a loyal customer\"", "def predict(self):\n raise NotImplementedError", "def predict(self, input_vec):\n return self.model.predict(input_vec)", "def predict(self, X, pred_batch_size=None):", "def predictSVM(w, x):\n \n # compute activation for test example and threshold the result\n a = np.dot(w, x);\n label = 1 if a > 0 else -1;\n \n return label;", "def predict(self, features):\n return self.search_results.predict(features)", "def predict(x):\n model = Model()\n res = model.predict([x])[0][0]\n click.echo(res)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self,X):\n return self.classifier.predict(X)", "def predict(self, **kwargs):\n raise NotImplementedError", "def predict(self, X, y=None):\n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n _, Predicted_Labels =\\\n RankSVM_test(test_data=X,\n num_class=self.num_class,\n Weights=self.Weights,\n Bias=self.Bias,\n SVs=self.SVs,\n svm=self.svm, gamma=self.gamma,\n coefficient=self.coefficient,\n degree=self.degree)\n\n return Predicted_Labels", "def predict(self, X):\n # Check is fit had been called\n check_is_fitted(self)\n X = self._clean(X)\n y = self.model_.predict(X)\n return self.map_label_inverse_(y)", "def predict(clf, features):\n return clf.predict(features).astype(np.int)", "def predict(self, features):\n features_scaled = self.scaler.transform(features.reshape(1, -1))\n return self.clf.predict(features_scaled)", "def predict(self, features):\n feature_labels = []\n for f in features:\n get_label = self.get_k_neighbors(f)\n c0 = get_label.count(0)\n c1 = get_label.count(1)\n if c0 >= c1:\n f_label = 0\n else:\n f_label = 1\n feature_labels.append(f_label)\n return feature_labels\n raise NotImplementedError", "def predict(self, x, **kwargs):\n raise NotImplementedError", "def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_", "def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_", "def predict(self, X):\n\n ### TODO: YOUR CODE HERE\n D = shape(X)\n if self.isLeaf:\n return self.label\n else:\n if ( X[self.feature] <= 0.5 ):\n return self.left.predict(X)\n else:\n return self.right.predict(X)", "def predict_label(img, net_model, label):\n img1 = cv2.resize(img, (80, 80))\n predict = net_model.predict(img1.reshape(1, 80, 80, 3))\n maxi = predict[0][0]\n curs = 0\n test = 0\n for i, pred in enumerate(predict[0]):\n test += pred\n if pred > maxi:\n maxi = pred\n curs = i\n return label[curs]", "def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)", "def predict(self, X):\n\n y_pred = np.zeros(X.shape[0])\n y_pred = np.argmax(np.dot(X,self.W), axis=1)\n ###########################################################################\n # Implement this method. Store the predicted labels in y_pred. #\n ###########################################################################\n\n return y_pred", "def predict(self, X, k=1):\n dists = self.compute_distances(X)\n return self.predict_labels(dists, k=k)", "def predict_labels(model, x_test):\n \n pred = model.predict(x_test)\n #pred_labels = model.predict_classes(x_test) # depricated\n pred_labels = np.argmax(model.predict(x_test), axis=-1)\n \n return pred, pred_labels", "def predict(self, model, context, data):\n pass", "def predict_features(self, df_features, df_target, idx=0, **kwargs):\n\n y = np.transpose(df_target.values)\n X = np.transpose(df_features.values)\n\n path, beta, A, lam = hsiclasso(X, y)\n\n return beta", "def predict_labels(clf, features, target):\n\n # Start the clock, make predictions, then stop the clock\n start = time()\n y_pred = clf.predict(features)\n end = time()\n # Print and return results\n print(\"Made predictions in {:.4f} seconds\".format(end - start))\n return accuracy_score(target, y_pred)", "def predict(self, data):\n xdata, _ = self.array_from_cases(data)\n preds = self.model.predict(xdata)\n label_preds = [dict(zip(self.binarizer.classes_, pred)) for pred in preds]\n return label_preds", "def predict(cls, input):\n clf = cls.get_model()\n return clf.predict(input)", "def _predict(self, X):\n predictions = np.asarray([clf.predict(X) for clf in self.clfs_]).T\n predicted_labels = self.combiner.combine(predictions)\n return predicted_labels", "def predict(self, x):\n features = self._get_features(x)\n\n y_pred = self.classifier.predict(features)\n\n return y_pred", "def predict(theta, X):\n label_array = np.array([]) # initial array\n result_array = sigmoid(X.dot(theta))\n for i in result_array:\n if i >= 0.5:\n label_array= np.append(label_array, 1)\n else:\n label_array= np.append(label_array, 0)\n return label_array", "def predict(self):\n train_array = np.array(self.labels != 0, dtype=float)\n if not self.ising:\n labels_logit = self.ising_weights['vertices']\n else:\n neigh_num = self.adj.dot(train_array)\n neigh_num = np.where(neigh_num == 0, 1, neigh_num)\n neigh_weights = self.ising_weights['edges'] * self.labels\n labels_logit = (np.multiply(neigh_weights, neigh_num**(-1))\n + self.ising_weights['vertices'])\n self.prediction = np.where(labels_logit > 0, 1, -1)\n return self", "def predict_only(self):", "def predict(self,X):\n y_pred = np.random.choice(self.labels, size=(X.shape[0],), p=self.thresholds)\n return y_pred", "def predict(self, data_in):\n pass", "def predict(self, X):\n return self.classifier.predict(X)", "def predict(self, point):\n # TODO\n temp = 0\n for j in range(len(self.data.labels)):\n temp = temp + self.alpha[j]*self.data.labels[j]*self.kf(point, self.data.features[j])\n pred_temp = temp\n if pred_temp <= 0:\n pred = -1\n else:\n pred = 1\n # print(pred_temp)\n return pred", "def predict(self, x):\n new_x = np.array(self.transform(x.reshape(1, -1)).flatten())\n return self.clf.predict(new_x.T)", "def predict(self, data):\n\t\traise NotImplementedError", "def predict(self, model, x_test):\n pass", "def predict(self,X): \n return self._predict(X)", "def predict(self, obs):\n pass", "def predict(self, xs, **kwargs):", "def _predict_and_return_argmax_label(self, example):\n model_out = self._model.predict([example])\n softmax = list(model_out)[0]['preds']\n argmax = np.argmax(softmax)\n return self._model.output_spec()['preds'].vocab[argmax]", "def PredictLabel(sentence, model_main, word2vec, boundary=0.5):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_main.predict_proba(features.reshape(1,-1))[0]\n if model_main.classes_[prediction.argmax()]!=\"clerical\":\n return model_main.classes_[prediction.argmax()]\n else:\n if np.max(prediction)>boundary:\n return \"clerical\"\n else:\n ranger = range(len(prediction))\n del ranger[prediction.argmax()]\n return model_main.classes_[ranger][prediction[ranger].argmax()]", "def predict_label(examples_set):\n all_labels = list(('yes', 'no'))\n prediction = 'no'\n\n for label in all_labels:\n all_same_label = True\n for example in examples_set:\n if example[14] != label:\n all_same_label = False\n break\n if all_same_label:\n prediction = label\n break\n return prediction", "def PredictClerLabel(sentence, model_cler, word2vec):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_cler.predict_proba(features.reshape(1,-1))[0]\n return model_cler.classes_[prediction.argmax()]", "def predict(self, x):\n y_hat = (self.model['b1'] * x) + self.model['b0']\n return y_hat", "def predict(self, X):\n return self.opt.eval(X)", "def predict_response_variable(self, **kwargs):\n pass", "def predict():\n\n\n json_payload = request.json\n #LOG.info(f\"JSON payload: %s\" %json_payload)\n inference_payload = pd.DataFrame(json_payload)\n #LOG.info(\"inference payload DataFrame: %s\" %inference_payload)\n scaled_payload = scale(inference_payload)\n prediction = list(clf.predict(scaled_payload))\n return jsonify({'prediction': prediction})", "def predict(self, X):\n\n X = np.asarray(X)\n if X.ndim == 1:\n# X = np.asarray([X]).T\n # Data has a single feature\n X = X.reshape(-1, 1)\n\n # Indexes of support vectors\n ind_sv = self.ind_sv()\n\n # Compute kernels\n if self.kernel == 'rbf':\n Din = dist.cdist(self.X[ind_sv, :], X, 'sqeuclidean')\n Kin = np.exp(-self.gamma_in * Din)\n else: # Linear kernel\n Kin = np.dot(self.X[ind_sv, :], self.D.dot(X.T))\n\n Dout = dist.pdist(np.asarray([self.probs]).T, 'sqeuclidean')\n Kout = np.exp(-self.gamma_out * dist.squareform(Dout)) \\\n if self.gamma_out != np.inf else np.eye(np.size(self.probs))\n\n pred = np.dot(np.dot(Kout, self.coefs[:, ind_sv]), Kin).T\n pred += self.intercept\n return pred.T", "def predict(self, x):\n \n\n return predictions", "def predict(features, weights, bias):\n z = pre_activation(features, weights, bias)\n # Get normalized scores\n y = activation(z)\n # Get 0 or 1 value\n return np.round(y)", "def target_predict(self, inp):\n return self.target_model.predict(inp)", "def predict_evidences(self, X):", "def predict_labels(weights, data):\n y_pred = np.dot(data, weights)\n y_pred[np.where(y_pred <= 0)] = -1\n y_pred[np.where(y_pred > 0)] = 1\n \n return y_pred", "def predict_labels(weights, data):\n y_pred = np.dot(data, weights)\n y_pred[np.where(y_pred <= 0)] = -1\n y_pred[np.where(y_pred > 0)] = 1\n \n return y_pred", "def predict_labels(weights, data):\n y_pred = np.dot(data, weights)\n y_pred[np.where(y_pred <= 0)] = -1\n y_pred[np.where(y_pred > 0)] = 1\n \n return y_pred", "def predict(self):\n predictLambda = lambda valuesToPredict : self.y_scaler.inverse_transform(self.regressor.predict(self.X_scaler.transform(valuesToPredict)))\\\n if self.datasetManager.params.featureScaleDependentVariables else self.regressor.predict(valuesToPredict)\n \n return [\"Support Vector Regression predictions\", super().predict_user_input_variables(predictLambda)]", "def predict(self, X):\n\n\t\tn_samples = X.shape[0]\n\t\tpredicted = np.zeros(n_samples)\n\n\t\tfor i in xrange(n_samples):\n\t\t\tpredicted[i] = self.classify_example(X[i])\n\n\t\treturn predicted", "def predict(self, review):\n raise NotImplementedError", "def predict(self, X, **kwargs):\n return Learner.predict(self, X, **kwargs)", "def _minimal_predict(Classifier, vector, threshold):\n return Classifier.predict(array([vector.tolist()]), threshold)", "def predict(self, x):\n pred_labels = np.zeros((x.shape[0], 10))\n\n N = len(self.NET)\n for i in range(N):\n\n inputs = self.apply_dct_permutation(x.copy(), self.permutation[i])\n pred_labels += self.NET[i].model.predict(inputs)\n\n return pred_labels", "def predict(self, x):\n raise NotImplementedError('Subclass of LinearModel must implement predict method.')", "def predict(self,x):\n return self._model.predict(x)", "def predict ( self, X ):\n \n return self.knn.predict ( X )\n # End predict()", "def predict(x, clf):\n\n y_pred, y_prob = clf.predict(x), clf.predict_proba(x)\n return y_pred, y_prob", "def predict(self, x):\n th = self._model\n # checks whether the example to predict is a vector or a matrix\n if len(x.shape) > 1:\n xn = np.ones((x.shape[0], x.shape[1] + 1))\n xn[:, 1:] = x\n else:\n xn = np.ones(x.shape[0]+1)\n xn[1:] = x\n return np.dot(xn, th)" ]
[ "0.7951119", "0.77826697", "0.745595", "0.745595", "0.745595", "0.7453583", "0.7453583", "0.74425894", "0.74354345", "0.73819107", "0.73819107", "0.73819107", "0.73376524", "0.7261632", "0.72183657", "0.721681", "0.7200597", "0.7177909", "0.71714", "0.71003073", "0.709932", "0.70824474", "0.70685005", "0.70352405", "0.7034277", "0.7025682", "0.7025682", "0.70255196", "0.70164514", "0.70116675", "0.6999137", "0.6991177", "0.6978002", "0.6967279", "0.6946326", "0.6938367", "0.6938367", "0.6938367", "0.6938367", "0.6938009", "0.6937634", "0.69359446", "0.6917173", "0.691717", "0.69124377", "0.6909832", "0.69035727", "0.69035727", "0.6901967", "0.68926525", "0.68820345", "0.68783814", "0.687315", "0.68642783", "0.68607", "0.6858553", "0.68407005", "0.6839523", "0.6837269", "0.6831942", "0.68285286", "0.68278885", "0.6822642", "0.68219966", "0.68140274", "0.6809646", "0.68065965", "0.6796125", "0.6791053", "0.6789263", "0.6779906", "0.6773868", "0.67723393", "0.6771461", "0.67698514", "0.6766879", "0.67634887", "0.67617667", "0.6757639", "0.675476", "0.67498195", "0.67471695", "0.6740413", "0.67314464", "0.67289233", "0.6725795", "0.6711366", "0.6695046", "0.6695046", "0.6695046", "0.6689643", "0.6683503", "0.6679122", "0.6674201", "0.6673365", "0.6672428", "0.66692525", "0.6669117", "0.66613597", "0.66591054", "0.6655223" ]
0.0
-1
Learns from a Database instance. Each entry is given a label.
def learn(self,db,labels): self.keys = db.keys[:] labelindex = -1 if isinstance(labels,str): labelindex = db.keys.index(labels) assert labelindex >= 0,"label does not exist in database keys" labels = db.get_column(labelindex) elif isinstance(labels,int): labelindex = labels labels = db.get_column(labelindex) else: assert len(labels) == len(db.entries) self.root = DecisionTreeNode() if labelindex >= 0: raise NotImplementedError("Ooops, taking out indexed label broken") entries = np.delete(entries,labelindex,1) db = IndexedDatabase(db) if self.maxnodes != None: return self.greedy_learn_search(db,labels) else: self.deepest = 0 return self.greedy_learn(self.root,db,labels,range(len(labels)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_data_to_db(labelled):\n add_query = sqlite3.connect(DB_PATH).cursor()\n add_query.execute(\n \"CREATE TABLE IF NOT EXISTS labels(text TEXT, label TEXT, score FLOAT)\")\n for entry in labelled:\n add_query.execute(\"\"\"INSERT INTO labels(text,label,score) VALUES(?,?,?)\"\"\",\n (entry))\n return", "def get_by_label(self, label, table, verbose=True):\n assert (self.connected)\n \n theId = -1\n GET_BY_LABEL_COMMAND = \"SELECT id,label FROM {0} WHERE samples.label = \\\"{1}\\\"\".format(table, label)\n \n \n self.cursor.execute(GET_BY_LABEL_COMMAND)\n \n for row in self.cursor:\n theId = row[0]\n break\n \n if verbose and theId != -1: \n print(\"Item with id {0} and label '{1}' retrieved.\".format(theId, label))\n elif verbose: \n print(\"No item in the table '{0}' with the label '{1}' was found.\".format(table, label))\n \n return int(theId)", "def __get_labels(self):\n\n uncertain_pairs_index = self.__query_pairs()\n\n to_label_raw = self.all_raw_data.loc[uncertain_pairs_index]\n to_label_features = self.all_features.loc[uncertain_pairs_index]\n\n # Remove uncertain pairs from the candidate pool\n self.all_features.drop(uncertain_pairs_index, axis=0, inplace=True)\n\n labels_list = []\n for index, row in to_label_raw.iterrows():\n\n print(\"\\n{0:30}\\t{1}\\n{2:30}\\t{3}\\n{4:30}\\t{5}\\n{6:30}\\t{7}\\n\".format(row.name_a, row.name_b,\n row.address_a, row.address_b,\n row.zip_a, row.zip_b,\n row.city_a, row.city_b))\n\n\n label = self.__user_input(\"Is this a match? (0/1)\")\n labels_list.append((index, label))\n\n labels_index = [index for index, label in labels_list]\n labels_values = [label for index, label in labels_list]\n\n # Create dataframe with index and labels\n add_labels = pd.Series(labels_values, index=labels_index, name='label')\n\n # Union the new training set to the full training set\n self.labeled_features = pd.concat([self.labeled_features, to_label_features], axis = 0, ignore_index=False)\n self.labeled_labels = pd.concat([self.labeled_labels, add_labels], axis = 0, ignore_index=False)\n\n return self", "def load_labels(self, labels):\n self.labels = pd.DataFrame(labels, index=[\"label\"]).T", "def fetchall(self, databaseName):\n pass", "def retrieve_labels(user_id: int) -> dict:\n user_label_table = dict()\n cur.execute('''SELECT USER_ID, NAME, CONTENT FROM \"labels\"''')\n rows = cur.fetchall()\n for row in rows:\n if user_id == row[0]:\n user_label_table[row[1]] = row[2]\n return user_label_table", "def get_labels():\n return if_found(dao.get_labels())", "def load_pdbbind_labels(labels_file):\n # Some complexes have labels but no PDB files. Filter these manually\n missing_pdbs = [\"1d2v\", \"1jou\", \"1s8j\", \"1cam\", \"4mlt\", \"4o7d\"]\n contents = []\n with open(labels_file) as f:\n for line in f:\n if line.startswith(\"#\"):\n continue\n else:\n # Some of the ligand-names are of form (FMN ox). Use regex\n # to merge into form (FMN-ox)\n p = re.compile('\\(([^\\)\\s]*) ([^\\)\\s]*)\\)')\n line = p.sub('(\\\\1-\\\\2)', line)\n elts = line.split()\n # Filter if missing PDB files\n if elts[0] in missing_pdbs:\n continue\n contents.append(elts)\n contents_df = pd.DataFrame(\n contents,\n columns=(\"PDB code\", \"resolution\", \"release year\", \"-logKd/Ki\", \"Kd/Ki\",\n \"ignore-this-field\", \"reference\", \"ligand name\"))\n return contents_df", "def find_percepts_to_label(backend, db_name, domain, limit=10):\n\ttimer = DebugTimer('~~~~\tfinding percepts to label')\n\tbackend._ensure_db_exists(db_name)\n\ttimer.tick('ensured db exists')\n\tpercepts = []\n\twith backend.dbs[db_name].get_session(commit=False) as session:\n\t\ttimer.tick('got session')\n\t\traw_sql = \"\"\"\n\t\t\tSELECT percept.id\n\t\t\tFROM percept\n\t\t\tWHERE NOT EXISTS (\n\t\t\t\tSELECT * FROM annotation\n\t\t\t\tWHERE annotation.domain = '{}'\n\t\t\t\tAND annotation.percept_id = percept.id\n\t\t\t)\n\t\t\tORDER BY random()\n\t\t\tLIMIT :limit;\n\t\t\"\"\".format(domain)\n\t\tcol_names = 'id'.split()\n\t\tparams = dict(limit=limit)\n\t\trows = session.execute(text(raw_sql), params=params)\n\t\ttimer.tick('executed sql')\n\t\tpercept_group = dict()\n\t\tthis_percept_id = None\n\t\tfor row in rows:\n\t\t\trowdict = dict(zip(col_names, row))\n\t\t\tpercepts.append(rowdict)\n\t\ttimer.tick('enumerated {} rows'.format(len(percepts)))\n\ttimer.tick('closed session')\n\ttimer.end()\n\treturn percepts", "def fetch_fromDB(self, searchPhrase):\n pass", "def select_node_by_label(conn, label):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Nodes WHERE label=?\", (label,))", "def lookup(conn, language_code, graphic, phonetic, restrictions):\n c = conn.cursor()\n entry_ids = tuple(c.execute('SELECT entry_id FROM lemmas WHERE language = ? AND graphic = ? and phonetic = ?', (language_code, graphic, hiragana_to_katakana(phonetic))))\n return tuple(Lexeme(conn, language_code, entry_id, restrictions) for (entry_id,) in entry_ids)", "def extract_labels(pdbbind_label_file):\n assert os.path.isfile(pdbbind_label_file)\n labels = {}\n with open(pdbbind_label_file) as f:\n content = f.readlines()\n for line in content:\n if line[0] == \"#\":\n continue\n line = line.split()\n # lines in the label file have format\n # PDB-code Resolution Release-Year -logKd Kd reference ligand-name\n #print line[0], line[3]\n labels[line[0]] = line[3]\n return labels", "def _db_store(self, labels: Sequence[Tuple[int, np.ndarray]], table: str) -> None:\r\n # Labels are expected to be\r\n # [\r\n # (class, points),\r\n # (class, points)\r\n # .\r\n # .\r\n # .\r\n # ]\r\n # Where points are np.arrays\r\n # There should also always be one fish in the scene => len(labels) >= 1\r\n\r\n n_points = np.prod(labels[0][1].shape)\r\n\r\n gen = ((self.n, class_, *points.ravel().round(3)) for class_, points in labels)\r\n\r\n # First two \"?\" are for image id and class respectively, rest are for points\r\n sql_command = (\r\n f'INSERT INTO {table} VALUES {(\"?\",\"?\",*[\"?\" for i in range(n_points)])}'\r\n ).replace(\"'\", \"\")\r\n\r\n self.cursor.executemany(sql_command, gen)", "def _gather_data(self):\n for data in self._collection:\n label = data.label\n label = disambiguate(label, self._data)\n self._data[label] = data", "def select(self, labels):\n indexs = []\n \n for i in range(len(labels)):\n indexs.append(self.column_labels.index(labels[i]))\n new_rows = []\n for x in self.rows:\n new_row = []\n for index in indexs:\n new_row.append(x[index])\n new_rows.append(new_row)\n\n\n\n new_Table = T88ble(new_rows, labels)\n\n return new_Table", "def _get_ids_from_label(self, label):\r\n keys = self.list_keys()\r\n results = []\r\n for key in keys:\r\n if key['label'] == label:\r\n results.append(key['id'])\r\n return results", "def lookup(name, db):\n database = load(db)\n matches = [ key for key in database if name in key ]\n if len(matches):\n for name in matches:\n print(\"%s (%s)\" % (name, database[name]))\n else:\n print(\"0 results found\")", "def retrieve_from_db(self):\n pass", "def load(self, theList: DoubleLinkList):\n nextId = self.loadHeadId()\n while nextId:\n rec = self.db.selectById(self.tableName, nextId)\n theList.addNode(appendIt=True, nodeId=rec['nodeId'], childId=rec['childId'],\n label=rec['label'])\n nextId = rec['nextId']", "def acquire_label_by_name(app_label, label_name, obj=None):\n if JeevesLib.doesLabelExist(label_name):\n return JeevesLib.getLabel(label_name)\n else:\n label = JeevesLib.mkLabel(label_name, uniquify=False)\n model_name, field_name, jeeves_id = label_name.split('__')\n\n # Get the model that corresponds to the application label and\n # model name.\n # TODO: Make get_model faster?\n model = apps.get_model(app_label, model_name)\n\n # Gets the current row so we can feed it to the policy.\n # TODO: Figure out why we need the faceted value here...\n obj = model.objects.get(use_base_env=True, jeeves_id=jeeves_id)\n\n restrictor = getattr(model, 'jeeves_restrict_' + field_name)\n JeevesLib.restrict(label, lambda ctxt: restrictor(obj, ctxt), True)\n return label", "def db_values(self, db):", "def __init__(self, entries):\n # objects representing database records\n self.entries = entries", "def gracedb_add_label(gracedb_id, label):\n\n # begin GraceDB API\n client = gracedb_rest.GraceDb()\n\n # append comment to GraceDB entry\n out = client.writeLabel(gracedb_id, label)", "def load_database(self):\n # If there is already data, do not load\n if self:\n raise DatabaseError('Data already loaded!')\n\n # Gather all data from the table\n data = self.cursor.execute(\n 'SELECT unique_id, name, wins, time_stamp, '\n 'last_win FROM gungame_winners'\n )\n data = data.fetchall()\n\n # Are there no winners to add?\n if not data:\n return\n\n # Loop through all the past winners and their data\n for unique_id, name, wins, time_stamp, last_win in data:\n\n # Add the current winner to the database\n instance = self[unique_id]\n instance.name = name\n instance.wins = int(wins)\n instance.time_stamp = float(time_stamp)\n instance.last_win = float(last_win)", "def load_data(database_filepath):\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table('Disasters', engine)\n X = df['message']\n Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)\n category_names = Y.columns\n return X, Y, category_names", "def makeDatabaseNamesList(n, ):", "def getLabels(df, eps=3, min_samples=100):\n #instantiate dbscan\n db = DBSCAN(eps=eps, \n min_samples=min_samples, \n metric='euclidean', \n n_jobs=-1\n )\n \n #fit and predict to data\n db.fit_predict(df[['x', 'y']])\n \n #Returns the sorted unique elements of an array\n labels_unique = np.unique(db.labels_)\n #drop the -1 labels which are unlabeled\n labels_unique = labels_unique[labels_unique != -1]\n \n \n return db.labels_, labels_unique", "def extract_labels(filename,tag,one_hot):\n print('Extracting labels',filename)\n return extractdb_labels(filename,tag,one_hot=one_hot)", "def loadFromDatabase(self, verbose=True):\n #Connect to database\n conn = sqlite3.connect(self.dbname)\n c = conn.cursor()\n\n if verbose:\n print \"loadFromDatabase: Connected to database '%s' and established cursor\" % \\\n self.dbname\n\n #First, populate list of simulations\n c.execute(\"SELECT * FROM tamas\")\n propList = c.fetchall()\n self.simulations = {}\n if verbose: print \"SELECT answer:\", propList\n \n for prop in propList:\n uid = prop[0]\n pw = prop[2]\n prop = list(prop)\n prop[5] = bool(prop[5]) #sick should be a bool!\n self.simulations[uid] = TamaSimulation(uid, pw)\n self.simulations[uid].readDBValues(prop)\n \n if verbose:\n print \"self.simulations: %s\" % str(self.simulations)\n \n #Load up all items to all tamas\n c.execute(\"SELECT * FROM has\")\n hasList = c.fetchall()\n if verbose: print \"has SELECT answer:\", hasList\n for has in hasList:\n uid, name, amount = has\n for _ in range(amount):\n self.simulations[uid].inventory.append(name)\n\n #Load up all relationships\n c.execute(\"SELECT * FROM knows\")\n knowsList = c.fetchall()\n if verbose: print \"knows SELECT answer:\", knowsList\n for knows in knowsList:\n print \"knows:\", knows\n self.simulations[knows[0]].knows[knows[1]] = knows[2]", "def test_recordlabels_get(self):\n pass", "def main():\n for line in sys.stdin:\n _id, title, desc = line[:-1].split(\"\\t\")\n sql = \"select * from langlinks where ll_from = %s && ll_lang = 'en';\" % _id\n res = execute_and_fetch(DB, sql)\n langlink = \"\"\n if len(res) > 0:\n langlink = res[0][1] + \":\" + res[0][2].decode('utf-8')\n print \"\\t\".join([_id, langlink, title, desc])", "def labelTable(self,table):\n \n for sslice,_, lFields in self._lLabellingInstruction:\n for field in lFields:\n if field is not None:\n try:\n for cell in np.nditer(table.getNPArray()[sslice],['refs_ok'],op_dtypes=np.dtype(object)):\n cell[()].addField(field.cloneMe())\n except: pass", "def loaddata(self):\n # Connect to the db\n self.conn, self.c = self.connect_db(self.dbname)\n # create the bdefile table to \n self.c.execute(oeeutil.sql_create_bdefile_table)\n # Delete any previous records\n self.c.execute('DELETE FROM bdefile')\n # hold the content for analysis\n for item in self.content:\n self.c.execute('INSERT INTO bdefile VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)', item)\n self.c.executescript(oeeutil.sql_create_bdefile_view)\n self.conn.commit()", "def get_or_create_from_label_list( self, owner, label_list ):\n return [\n self.get_or_create(\n owner = owner,\n label = label.strip().lower() ) [0]\n for label in label_list ]", "def Fetching(*kw):\n\n \"\"\"\n 1) Connecting to DataBase\n \"\"\"\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(*kw[0]),\n pool_pre_ping=True)\n \"\"\"\n 2) Create Session\n \"\"\"\n Session = sessionmaker(bind=engine)\n session = Session()\n\n \"\"\"\n 3)Quering\n \"\"\"\n\n for instance in session.query(State).filter(State.name.like('%a%')):\n print(\"{}: {}\".format(instance.id, instance.name))", "def load(self):\n\n args = self.id, self.name\n self.loader.session.logger.debug(\"loading CDR%d (%r)\", *args)\n cursor = self.loader.dictionary_cursor\n cursor.execute(self.DICTIONARY_INSERT, self.entry)\n for alias in self.aliases:\n cursor.execute(self.ALIAS_INSERT, alias)\n self.loader.dictionary_conn.commit()", "def db_show_all():\n the_list = []\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n for key in db:\n person = Person()\n person.name = key\n person.phone = db[key]\n the_list.append(person)\n display_list(the_list)\n db.close()", "def make_lmdb( db_filename, data, shape, keep_aspect = False, verbose = 0 ):\n print( \"Making LMDB: shape = %s, keep_aspect=%s\" % ( list( shape ), keep_aspect ) )\n if not os.path.exists( db_filename ):\n print( \"Creating directory: %s\" % db_filename )\n os.makedirs( db_filename )\n db = lmdb.open( db_filename, map_size=int(1e12))\n with db.begin(write=True) as txn:\n for idx, (filename, label) in enumerate( data ):\n img = cv2.imread( filename, cv2.IMREAD_COLOR )\n img2 = transform( img, shape, keep_aspect = True )\n datum = make_datum(img2, label )\n txn.put('{:0>5d}'.format(idx), datum.SerializeToString())\n if verbose > 0:\n print( \"%s: label=%d, shape=%s\" % ( filename, label, list( img.shape ) ) )\n db.close()", "def _read_lap(rows):\n logging.debug(\"parsing lap\")\n tracks = []\n\n for name, row in _safe_generate(rows):\n if re.search(TOTAL, name):\n distance, calories = _read_lap_total(rows)\n logging.info(\"Parsed lap with {} tracks, Distance: {}, Calories: {}\"\n .format(len(tracks), distance, calories))\n return Lap(total_distance=distance, total_calories=calories, tracks=tracks)\n\n tracks.append(Track.from_row(row))", "def verify_labeled(self, d_stmt, table):\n d_label = d_stmt.find_first(\"p_name\")\n if d_label:\n self.label = d_label.value\n table.check_table(d_stmt.linespan, Symbol(self.label, DanaType(\"label\")))", "def update_database(JobData, Labels):\n\n DB = boto3.resource('dynamodb')\n DBTable = DB.Table(os.environ['DB_TABLE_NAME'])\n\t\n DBData = {}\n DBData['VideoID'] = JobData['JobId']\n DBData['VideoName'] = JobData['Video']['S3ObjectName']\n DBData['S3Bucket'] = JobData['Video']['S3Bucket']\n DBData['Labels'] = []\n\n print('Total number of labels detected was {}'.format(len(Labels)))\n\t\n\t# Now want to create a list of unique labels, number of occurrences, time of occurrences and average confidence\n for Label in Labels:\n if len(DBData['Labels']) == 0: # Populate the first item\n add_new_label(DBData['Labels'], Label)\n else:\n FoundMatch = False\n for UniqueLabel in DBData['Labels']:\n if Label['Label']['Name'] == UniqueLabel['LabelName']:\n update_label(UniqueLabel, Label)\n FoundMatch = True\n break\n # If we haven't found a match, need to add another unique label\n if not FoundMatch: add_new_label(DBData['Labels'], Label)\n\n # Now put this into the database. DynamoDB doesn't support Python float format so fix this\n DBData = FixFloats(DBData)\n DBTable.put_item(Item = DBData)\n\n return", "def load(dbname):\n db = Database(dbname)\n\n # Get the name of the objects\n tables = get_table_list(db.cur)\n\n # Create a Trace instance for each object\n chains = 0\n for name in tables:\n db._traces[name] = Trace(name=name, db=db)\n setattr(db, name, db._traces[name])\n db.cur.execute('SELECT MAX(trace) FROM %s'%name)\n chains = max(chains, db.cur.fetchall()[0][0]+1)\n\n db.chains=chains\n db.variables_to_tally = chains * [tables,]\n db._state_ = {}\n return db", "def bulk_load(config):\n from aleph.logic.collections import create_collection\n for foreign_id, data in config.items():\n data['label'] = data.get('label', foreign_id)\n collection = create_collection(foreign_id, data)\n for query in dict_list(data, 'queries', 'query'):\n bulk_load_query.apply_async([collection.id, query], priority=6)", "def import_labels():\n\n dict_labels = df.set_index('id').to_dict()['breed']\n unique_labels = sorted(list(set(dict_labels.values())))\n for index, label in dict_labels.items():\n dict_labels[index] = unique_labels.index(label)\n return dict_labels, unique_labels", "def infer_label(self, string):\n label_bits = []\n if string:\n label_bits.append(string)\n for k, v in self.table.items():\n # The automatic label includes any keys with multiple values\n if len(v) > 1:\n # If a key has multiple values, add both its name and its key.\n # That is, if @key1@ has multiple values, label_bits will have\n # 'key1' + '@key1@' appended. This means the label includes\n # both the key's name and the particular value it has for a\n # given job.\n label_bits.append(re.search(self.key_pattern, k).group(1) + k)\n label = '-'.join(label_bits)\n # Add the label as a key-values pair to the weird data structure\n # This is as if there were in the bp file the line,\n # label\n if not label:\n raise ValueError, \"The label is blank. No label was supplied \"\\\n \"and none can be inferred.\"\n self.table[self.label_key] = [label]", "def populate_persons():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Person class')\n\n PERSON_NAME = 0\n LIVES_IN_TOWN = 1\n NICKNAME = 2\n\n people = [\n ('Andrew', 'Sultan', 'Andy'),\n ('Peter', 'Seattle', None),\n ('Susan', 'Boston', 'Beannie'),\n ('Pam', 'Coventry', 'PJ'),\n ('Steven', 'Stevens Pass', None),\n ('Ryan', 'New York', 'Private'),\n ('Pamela', 'Spokane', 'Patrol'),\n ('Monica', 'Portland', None),\n ]\n\n logger.info('Creating Person records: iterate through the list of tuples')\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for person in people:\n with database.transaction():\n new_person = Person.create(\n person_name = person[PERSON_NAME],\n lives_in_town = person[LIVES_IN_TOWN],\n nickname = person[NICKNAME])\n new_person.save()\n logger.info('Database add successful')\n\n logger.info('Print the Person records we saved...')\n for saved_person in Person:\n logger.info(f'{saved_person.person_name} lives in {saved_person.lives_in_town} ' +\\\n f'and likes to be known as {saved_person.nickname}')\n\n except Exception as e:\n logger.info(f'Error creating = {person[PERSON_NAME]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()", "def data_source(self, label):\r\n return datasource.Datasource(self.apikey_or_username, label)", "def load_data(db_handler):\n\n from random import seed\n from random import random\n \n seed(1)\n\n new_notes = []\n\n for i in range(1,10):\n\n new_notes.append({\n\n\n 'title': str(i) + str(random()),\n 'content': 'Lorem ipsum' + str(i),\n 'active': True,\n 'created_by':\"Cristhian\" + str(i),\n 'created_at': date.today(),\n 'edited_at':date.today(),\n \n })\n\n new_notes.append(\n {\n \"active\": False,\n \"content\": \"Jesenia\",\n \"edited_at\": \"2019-10-24\",\n \"title\": \"Jesenia La chica de al lado\",\n \"created_by\": \"Cristhian1\",\n \"created_at\": \"2019-10-24\"\n })\n\n new_notes.append(\n {\n \"active\": False,\n \"title\": \"La vida de los numeros\",\n \"content\": \"Lorem ipsum y los numeros de la muerte\",\n \"edited_at\": \"2019-10-25\",\n \"created_by\": \"Jesenia\",\n \"created_at\": \"2019-10-24\"\n })\n\n Note.insert_many(new_notes).execute()\n\n User(name=\"Cristhian\", email=\"[email protected]\",\n password=b'$2b$12$U/QjtHt/j0xRT4r8Hx3fOe93EssM6M0iiUaQJOrTd64RXbxvhw6Ii').save()", "def __init__(self, label, primary_key=None, functional=False):\r\n self.label = label\r\n self.primary_key = primary_key\r\n self.functional = functional", "def label_list(entry):\n printing_resident_sheets(entry, rf'{constants.OUTPUTS_DIR}\\label_sheet.xlsx')\n printing_documents.create_label_list()", "def label_7m(predictor, zhibo7m):\n for item_ in zhibo7m.find():\n try:\n live_texts_ = item_[\"content\"][\"textFeed\"]\n except Exception as e:\n zhibo7m.delete_one({\"_id\": item_['_id']})\n print(\"delete error id: {}\".format(item_[\"_id\"]))\n print(e)\n for l_index_, l_item_ in enumerate(live_texts_):\n l_item_[\"p_label\"] = predictor.predict(l_item_[\"msg\"])[0]\n live_texts_[l_index_] = l_item_\n # print(l_item_)\n zhibo7m.update_one({\"_id\": item_['_id']}, {\"$set\": {\"textFeed\": live_texts_}})", "def reagent_label_data(bcl_step):\n\n define_step = None\n indexes = {}\n flowcell_total_reads = 0\n\n for inp, outp in bcl_step.input_output_maps:\n pre_bcl_steps = MASTER_STEPS_UDFS['reagent_labels']['steps']['pre_bcl']\n if inp['parent-process'].type.name not in pre_bcl_steps:\n continue\n\n if outp['output-generation-type'] != 'PerReagentLabel':\n continue\n\n lane = inp['uri']\n art = outp['uri']\n\n index_reads = art.udf.get(\n MASTER_STEPS_UDFS['reagent_labels']['udfs']['reads'])\n\n if index_reads is None or art.qc_flag == 'FAILED':\n continue\n\n flowcell_total_reads += index_reads\n\n sample = art.samples[0] # Will always be only one sample in the list\n application_tag = sample.udf.get('Sequencing Analysis')\n\n if application_tag[0:2] in MASTER_STEPS_UDFS['reagent_labels'][\n 'exclue_tags']:\n continue\n\n if not define_step:\n define_step_outputs, flowcell_target_reads, define_step = get_define_step_data(\n lane)\n\n if not sample.id in define_step_outputs:\n LOG.info('This sample whas put as a pool into the define step: ' +\n sample.id + ' ' + application_tag)\n continue\n\n index = art.reagent_labels[0]\n\n container, lane_nr = lane.location\n if index not in indexes:\n indexes[index] = {\n '_id': '_'.join([index, container.name]),\n 'url': index.replace(' ', ''),\n 'index_total_reads': index_reads,\n 'index_target_reads': define_step_outputs[sample.id],\n 'flowcell_target_reads': flowcell_target_reads,\n 'index': index,\n 'sample': sample.id,\n 'lanes': {\n lane_nr: dict(art.udf.items())\n },\n 'flowcell_id': container.name,\n 'flowcell_type': container.type.name,\n 'define_step_udfs': dict(define_step.udf.items()),\n 'define_step_id': define_step.id,\n 'bcl_step_id': bcl_step.id\n }\n else:\n indexes[index]['lanes'][lane_nr] = dict(art.udf.items())\n indexes[index]['index_total_reads'] += index_reads\n\n for index, data in indexes.items():\n data['flowcell_total_reads'] = flowcell_total_reads\n indexes[index] = filter_none(data)\n\n return indexes", "def read_helices_from_given_db (self, db_path):\n\n\tconn = sqlite3.connect(db_path)\n\tconn.row_factory = sqlite3.Row\n\n\tc = conn.cursor()\n\tc.execute('select * from TMs')\n\tr = c.fetchall()\n# musze to przerobic na tworzenie z pdb, i musze sie pozbyc raw sql, moze z ReadPDB to dobry pomysl\n\tfor i in r:\n\n\t\ttmhelix = TMHelixModel.objects.create(TMHelix_ID= i['ID'], TMHelix_Tilt = i['Tilt'], \\\n TMHelix_Tilt_EC = i['Tilt_EC'], \\\n TMHelix_Tilt_IC = i['Tilt_IC'], \\\n TMHelix_KinkAngle = i['KinkAngle'], \\\n TMHelix_Overhang = i['Overhang'],\\\n TMHelix_AASEQ = i['AASEQ'],\\\n )\n\t\tself. TMHelixModel_set.add(tmhelix)\n\n return", "def getLabel(*args):", "def getLabel(*args):", "def getLabel(*args):", "def label_record_pair(self, label, record_pair):\n\n if label == 'y':\n self.labeled_examples['match'].append(record_pair)\n elif label == 'n':\n self.labeled_examples['distinct'].append(record_pair)\n elif label == 'u':\n record_pair = ()\n elif label == 'f':\n print('Finished labeling')\n self.__create_uncertain_pairs_file()", "def __init__(self):\r\n self.con = lite.connect('Profile_database.db') # Opening of database file\r\n self.cursor = self.con.cursor()\r\n\r\n self.profiles_name_list = []\r\n self.output_zakladki = []\r\n self.output_leki = []\r\n self.output_leki_cegly = []\r\n self.output_lista_cegiel = []", "def get_labels(label_file):\n labels = None\n with open(label_file, 'r') as infile:\n reader = csv.reader(infile)\n labels = dict((rows[0], rows[1]) for rows in reader)\n return labels", "def labeltable(self, labeltable):\n if not isinstance(labeltable, GiftiLabelTable):\n raise TypeError(\"Not a valid GiftiLabelTable instance\")\n self._labeltable = labeltable", "def get_data(self, label):\n self.application.get_data(label)", "def create_lmdb_for_ldv():\n # training_raw\n folder_path = 'trainsets/LDV/training_raw'\n lmdb_path = 'trainsets/LDV/training_raw.lmdb'\n img_path_list, keys = prepare_keys_ldv(folder_path)\n make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True)\n\n # training_fixed-QP\n folder_path = 'trainsets/LDV/training_fixed-QP'\n lmdb_path = 'trainsets/LDV/training_fixed-QP.lmdb'\n img_path_list, keys = prepare_keys_ldv(folder_path)\n make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True)\n\n # training_fixed-rate\n folder_path = 'trainsets/LDV/training_fixed-rate'\n lmdb_path = 'trainsets/LDV/training_fixed-rate.lmdb'\n img_path_list, keys = prepare_keys_ldv(folder_path)\n make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True)", "def __init__(self, query, datastoreio_stub, label=None):\n super(GetModels, self).__init__(label=label)\n self.datastoreio = datastoreio_stub\n self.query = query", "def dataset_to_spacy(db, entity_label):\n\n train_data = []\n try:\n #anyone missing context ??\n for word in db[entity_label]:\n if not db[entity_label][word]['context']:\n continue\n\n for contexted_example in db[entity_label][word]['context']:\n entities = []\n\n pure_word = word.replace(\"\\\\\",'')\n if len(word.split(\" \")) > 1 :\n\n start = contexted_example.lower().find(pure_word.lower())\n if start == -1:\n print(word)\n continue\n\n end = start+len(word)\n entities.append((start, end, entity_label))\n\n else:\n splits = list(split_with_indices(contexted_example))\n for touple in splits:\n if pure_word.lower() in contexted_example[touple[0]:touple[1]].lower():\n entities.append((touple[0], touple[0] + len(word), entity_label))\n\n train_data.append((contexted_example, {'entities': entities}))\n\n except Exception as e:\n print(e)\n return None\n\n return train_data", "def lookup_eagle(label, gr, type):\n\t# First check if a cached result exists\n\tmap = globals()['map_' + gr]\n\tif label in map: return map[label]\n\tif label in map['_miss_']: return None\n\t# If not, do a local SPARQL query on the SKOS vocabulary \n\tgraph = globals()['g_' + gr]\n\tq = \"\"\"PREFIX skos: <http://www.w3.org/2004/02/skos/core#>\nSELECT DISTINCT ?x\nWHERE {\n ?x skos:inScheme <\"\"\" + type + \"\"\">\n ; skos:prefLabel|skos:altLabel ?l FILTER( lcase(str(?l)) = '\"\"\" + label.lower() + \"\"\"' )\n}\"\"\"\n\tqres = graph.query(q)\n\t# Update the cache accordingly\n\tfor row in qres:\n\t\tmap[label] = row[0]\n\t\treturn row[0]\n\tprint('[WARN] Could not find a match for label \"' + label + '\" in EAGLE vocabulary <' + type + '>')\n\tmap['_miss_'].append(label)\n\treturn None", "def add_labels(self, labels: dict):\n self.status = \"Creating labels\"\n for lname, value in labels.items():\n self.labels.add_label(lname, value)", "def load_data(database_file):\n engine = create_engine('sqlite:///data/{}'.format(database_file))\n df = pd.read_sql('tweets', con=engine)\n X = df['message']\n Y = df.drop(columns=['id', 'message', 'original', 'genre'])\n category_names = Y.columns\n return X, Y, category_names", "def load_label(self, fixture_label):\n show_progress = self.verbosity >= 3\n for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):\n _, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))\n open_method, mode = self.compression_formats[cmp_fmt]\n fixture = open_method(fixture_file, mode)\n try:\n self.fixture_count += 1\n objects_in_fixture = 0\n loaded_objects_in_fixture = 0\n if self.verbosity >= 2:\n self.stdout.write(\"Installing %s fixture '%s' from %s.\" %\n (ser_fmt, fixture_name, humanize(fixture_dir)))\n\n objects = serializers.deserialize(ser_fmt, fixture,\n using=self.using, ignorenonexistent=self.ignore)\n\n create_dict = OrderedDict()\n\n for object in objects:\n obj = object.object\n objects_in_fixture += 1\n model = obj.__class__\n if router.allow_migrate_model(self.using, model):\n self.models.add(model)\n if model in create_dict.keys():\n create_dict[model].append(obj)\n else:\n create_dict[model] = [obj]\n for model in create_dict.keys():\n objs = create_dict[model]\n loaded_objects_in_fixture += len(objs)\n try:\n model.objects.using(self.using).bulk_create(objs)\n if show_progress:\n self.stdout.write(\n '\\rProcessed %i object(s).' % loaded_objects_in_fixture,\n ending=''\n )\n except (DatabaseError, IntegrityError) as e:\n e.args = (\"Could not load %(app_label)s.%(object_name)s: %(error_msg)s\" % {\n 'app_label': model._meta.app_label,\n 'object_name': model._meta.object_name,\n 'error_msg': force_text(e)\n },)\n raise\n if objects and show_progress:\n self.stdout.write('') # add a newline after progress indicator\n self.loaded_object_count += loaded_objects_in_fixture\n self.fixture_object_count += objects_in_fixture\n except Exception as e:\n if not isinstance(e, CommandError):\n e.args = (\"Problem installing fixture '%s': %s\" % (fixture_file, e),)\n raise\n finally:\n fixture.close()\n\n # Warn if the fixture we loaded contains 0 objects.\n if objects_in_fixture == 0:\n warnings.warn(\n \"No fixture data found for '%s'. (File format may be \"\n \"invalid.)\" % fixture_name,\n RuntimeWarning\n )", "def load_data(db_filepath):\n # load data from database\n engine = create_engine('sqlite:///' + db_filepath)\n\n inspector = inspect(engine)\n # Get table information\n print('tables in the db {}'.format(inspector.get_table_names()))\n\n df = pd.read_sql(\"SELECT * FROM Messages \", engine)\n\n # create X and Y datasets\n X = df['message']\n Y = df.drop(['id','message','original','genre'],axis=1)\n\n # create a list of cat names\n category_names = list(Y.columns.values)\n\n return X, Y, category_names", "def get_label(id):\n return if_found(dao.get_label(id))", "def update_db(bus_id=def_id):\n # Instantiate the SQL class for the business data we will be pulling\n sql = Sql(bus_id)\n\n # Check if we have previously analyzed the requested business\n # If not, pull the raw data and processing the data\n if sql.check() is not False:\n print \"Already in database!\"\n return\n\n # Get business data (name, country, etc) from Yelp API\n # Limited to 25,000 Yelp API calls per day\n # There are over 4 million reviews and over 140,000 businesses in database\n while True:\n try:\n bus_info = Yelp(bus_id)\n break\n except ValueError:\n pass\n except YelpAPI.YelpAPIError:\n return\n\n # Grab review text from SQL database\n sql.pull_reviews()\n\n # Use our trained XGBoost Classifier and TFIDF vectorizer\n # to determine whether each review is \"Favorable\" or \"Unfavorable\"\n model.predict(sql.reviews)\n\n # Conduct sentiment analysis and evaluate word counts in order to\n # \"penalize\" the weighting of reviews that don't fit the threshold\n nlp = Nlp(sql.reviews, sql.stars, model.preds,\n bus_info.name, bus_info.country, bus_info.city)\n\n # Assign variables from all the objects attributes we created\n # and then input them into a tuple.\n # The tuple is used to populate the SQL database for faster lookup of\n # our analysis at a later time\n # The tuple is also used to populate our dictionary which will be\n # used for variables that will be rendered on our website\n name = nlp.name\n city = nlp.city\n country = nlp.country\n old_rating = int(100 * nlp.stars_avg / 5)\n new_rating = int(nlp.new_rating * 100)\n rev_count = len(sql.reviews)\n count_5 = sql.stars.count(5)\n count_4 = sql.stars.count(4)\n count_3 = sql.stars.count(3)\n count_2 = sql.stars.count(2)\n count_1 = sql.stars.count(1)\n fav_count = (model.preds == 1).sum()\n unfav_count = (model.preds == 0).sum()\n avg_wts = int(100*sum(nlp.avg_wts) / len(nlp.avg_wts))\n bus_tup = (bus_id, name, city, country, old_rating, new_rating,\n rev_count, count_5, count_4, count_3, count_2, count_1,\n fav_count, unfav_count, avg_wts)\n sql.insert(bus_tup)\n print bus_tup", "def _load_neighbors_from_database(self) -> None:\r\n self._are_neighbors_loaded = True\r\n\r\n graph: Graph = self._graph\r\n neighbors: List[DBNode] = graph.database.Node.find_by_name(self.name).neighbors\r\n nodes: NodeList = graph.nodes\r\n\r\n for db_node in neighbors:\r\n graph.add_node(db_node.name, db_node.external_id)\r\n neighbor: Node = nodes.get_node_by_name(db_node.name)\r\n graph.add_edge(self, neighbor, 1, False)", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict", "def pick_best_label(self,db,labels,ids):\n self.type = 'v'\n if len(labels) > 0:\n self.value = vote([labels[id] for id in ids])\n else:\n self.value = None\n return", "def load_initial_nl_mapping(matrix: list):\n print(\"Start loading...\")\n\n # delete existed objects\n # models.NominalLabelMapping.objects.all().delete()\n\n for row in matrix[1:]:\n book = row[0].strip()\n plant_code = row[1].strip()\n model = row[2].strip()\n value = row[3]\n\n match_object = models.NominalLabelMapping.objects.filter(\n model=model,\n value=value).first()\n if not match_object:\n match_object = models.NominalLabelMapping(\n model=model,\n value=value\n )\n setattr(match_object, 'book', book)\n setattr(match_object, 'plant_code', plant_code) \n # save models\n match_object.save()", "def user_labels_insert(*args):\n return _ida_hexrays.user_labels_insert(*args)", "def convert_lsdb_to_neighbor_info(lsdb) :\n \n neidb = []\n nei_dict = {}\n\n # trace router lsa, link type 1 and 4\n for lsa_id, lsa in lsdb.rdb.items() :\n\n rtr = { \"router_id\": lsa_id, \"neighbors\": []}\n neidb.append(rtr)\n nei_dict[lsa_id] = rtr\n\n for rlink in lsa.attached_links :\n if rlink.link_type == P2P_LINK :\n rtr[\"neighbors\"].append({\"router_id\": rlink.link_id,\n \"type\": \"p2p\"})\n\n if rlink.link_type == VIRTUAL_LINK :\n rtr[\"neighbors\"].append({\"router_id\": rlink.link_id,\n \"type\": \"vlink\"})\n\n\n # trace network lsa. in network lsa, attached routers must establish\n # neighbor each other (full mesh).\n for lsa_id, lsa in lsdb.ndb.items() :\n\n for src in lsa.attached_routers :\n for dst in lsa.attached_routers :\n if src == dst : continue\n nei_dict[src][\"neighbors\"].append({\"router_id\": dst,\n \"type\": \"network\"})\n\n # sort\n for rtr in neidb :\n rtr[\"neighbors\"].sort(key = lambda nei: inet_itok(nei[\"router_id\"]))\n neidb.sort(key = lambda rtr: inet_itok(rtr[\"router_id\"]))\n\n return neidb", "def parse_galrep_line(L):\n data = L.split(maxsplit=1)\n record = parse_galrep_data_string(\"\" if len(data) == 1 else data[1])\n record['label'] = label = data[0]\n return label, record", "def get_create_labeling (self, savels=False):\n if savels:\n assert not self.oldls, 'weird, oldls should be None'\n self.oldls = flmo.get_labelset ()\n\n ep_ls, des_ls, sql_prefix = self.get_write_labeling ()\n ep_ls.set_S () # Prove we have empty S label during CREATE TABLE\n ep_ls.set_O ()\n \n sql_prefix = self._prefix (ep_ls, des_ls)\n return ep_ls, des_ls, sql_prefix", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def addLabel(*args):", "def load_data(database_filepath):\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table('Messages', engine)\n X = df['message']\n Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)\n category_names = Y.columns\n return X, Y, category_names", "def _get_labels_for_user(self, project, assignment, user):\n return Session.query(model.Label).filter(and_(model.Label.user_id==user.id,\n model.Label.project_id==project.id,\n model.Label.assignment_id==assignment.id)).all()", "def list_boxdb(self):\n self.tables = self.list_tables()\n for table in self.tables:\n self.mylist.insert(END, table[0])\n self.mylist.bind('<Double-1>', lambda x: self.select_table())", "def from_db(self, if_print=True):\n for fiction in conn_db().find():\n if if_print:\n print(fiction)\n self.fictions.append(QdFictionInfo(fiction))", "def ln_s(ln):\r\n c.execute(\"SELECT * FROM personnel WHERE last=:last COLLATE NOCASE \", {'last': ln})\r\n return c.fetchall() # returns a list of found items, empty if none found\r", "def add_to_database(self, lyrcs):\n\n # allocate list\n lns = []\n\n for i in range(len(lyrcs)):\n # album level\n for j in range(len(lyrcs[i])):\n # song level\n for k in range(1, len(lyrcs[i][j])):\n lns.append(lyrcs[i][j][k])\n\n # create database\n f = open('KISS_LINES', 'a')\n\n for iln in lns:\n f.write(iln)\n\n f.close()\n\n return 1", "def load_data(database_filepath):\n\n engine = create_engine('sqlite:///{}'.format(database_filepath))\n df = pd.read_sql_table('messages_cleaned', engine)\n X = df.message \n Y = df.drop(columns=['message', 'original', 'genre'])\n category_names = list(Y.columns.values)\n return X, Y, category_names", "def similar(text, database):\n # TODO\n pass", "def find(self, **kwargs):\n return super(LootsTable, self).records('loots', **kwargs)", "def load_data(database_filepath):\n # load data from database\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table('messages_categories', engine)\n\n # create X and y dataframes for pipeline\n X = df['message'] # predictors\n y = df.drop(['id', 'message', 'original', 'genre'], axis = 1).astype('int64') # labels\n return X, y", "def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment", "def load_data(database_filepath):\n engine = create_engine('sqlite:///'+ database_filepath)\n df = pd.read_sql(\"SELECT * FROM DisasterResponse\", engine)\n #exclude colums that are not needed in model\n col=[i for i in df.columns if i not in ['id','original', 'genre']]\n X = df[\"message\"]\n Y = df.iloc[:,4:]\n #global category_names\n category_names = Y.columns\n return X, Y, category_names", "def construct_df(pdb_stem_directory, pdbbind_label_file, pdbbind_df_joblib):\n labels = extract_labels(pdbbind_label_file)\n df_rows = []\n os.chdir(pdb_stem_directory)\n pdb_directories = [pdb.replace('/', '') for pdb in glob('*/')]\n\n for pdb_dir in pdb_directories:\n print(\"About to extract ligand and protein input files\")\n pdb_id = os.path.basename(pdb_dir)\n ligand_pdb = None\n protein_pdb = None\n for f in os.listdir(pdb_dir):\n if re.search(\"_ligand_hyd.pdb$\", f):\n ligand_pdb = f\n elif re.search(\"_protein_hyd.pdb$\", f):\n protein_pdb = f\n elif re.search(\"_ligand.mol2$\", f):\n ligand_mol2 = f\n\n print(\"Extracted Input Files:\")\n print (ligand_pdb, protein_pdb, ligand_mol2)\n if not ligand_pdb or not protein_pdb or not ligand_mol2:\n raise ValueError(\"Required files not present for %s\" % pdb_dir)\n ligand_pdb_path = os.path.join(pdb_dir, ligand_pdb)\n protein_pdb_path = os.path.join(pdb_dir, protein_pdb)\n ligand_mol2_path = os.path.join(pdb_dir, ligand_mol2)\n\n with open(protein_pdb_path, \"rb\") as f:\n protein_pdb_lines = f.readlines()\n\n with open(ligand_pdb_path, \"rb\") as f:\n ligand_pdb_lines = f.readlines()\n\n try:\n with open(ligand_mol2_path, \"rb\") as f:\n ligand_mol2_lines = f.readlines()\n except:\n ligand_mol2_lines = []\n\n print(\"About to compute ligand smiles string.\")\n ligand_mol = Chem.MolFromPDBFile(ligand_pdb_path)\n if ligand_mol is None:\n continue\n smiles = Chem.MolToSmiles(ligand_mol)\n complex_id = \"%s%s\" % (pdb_id, smiles)\n label = labels[pdb_id]\n df_rows.append([pdb_id, smiles, complex_id, protein_pdb_lines,\n ligand_pdb_lines, ligand_mol2_lines, label])\n\n pdbbind_df = pd.DataFrame(df_rows, columns=('pdb_id', 'smiles', 'complex_id',\n 'protein_pdb', 'ligand_pdb',\n 'ligand_mol2', 'label'))\n\n joblib.dump(pdbbind_df, pdbbind_df_joblib)", "def load_pickle(args):\n with open(args.pickle_name, 'rb') as fh:\n datum = pickle.load(fh)\n\n df = pd.DataFrame.from_dict(datum['labels'])\n\n return df", "def fromLabel(self, label):\n indices = grids.ringPosFromRingLabel(label)\n\n if indices is None:\n # arbitrary label. excore.\n # set it up as if it's a freshly initialized one\n self.axial = None\n self.i1 = None\n self.i2 = None\n self.label = label\n else:\n self.i1, self.i2, self.axial = indices\n\n self.getFirstChar()\n self.makeLabel()", "def retrieve(self):\n self.DB.close_connection()\n self.r1.queryCase = self.case\n self.r1.knn(1)", "def add_new_label(UniqueLabelList, Label):\n\n NewLabel = {}\n NewLabel['LabelName'] = Label['Label']['Name']\n NewLabel['Confidence'] = Label['Label']['Confidence']\n NewLabel['TimeStamps'] = [Label['Timestamp']]\n NewLabel['Count'] = 1\n\t\n UniqueLabelList.append(NewLabel)\n\t\n return NewLabel", "def test_label_anonymizing(self):\n class User(Base):\n @property\n def prop_score(self):\n return sum([tag.prop_score for tag in self.tags])\n\n class Tag(Base):\n @property\n def prop_score(self):\n return self.score1 * self.score2\n \n for labeled, labelname in [(True, 'score'), (True, None), (False, None)]:\n clear_mappers()\n \n tag_score = (tags_table.c.score1 * tags_table.c.score2)\n user_score = select([func.sum(tags_table.c.score1 *\n tags_table.c.score2)],\n tags_table.c.user_id == users_table.c.id)\n \n if labeled:\n tag_score = tag_score.label(labelname)\n user_score = user_score.label(labelname)\n else:\n user_score = user_score.as_scalar()\n \n mapper(Tag, tags_table, properties={\n 'query_score': column_property(tag_score),\n })\n\n\n mapper(User, users_table, properties={\n 'tags': relation(Tag, backref='user', lazy=False), \n 'query_score': column_property(user_score),\n })\n\n session = create_session()\n session.save(User(name='joe', tags=[Tag(score1=5.0, score2=3.0), Tag(score1=55.0, score2=1.0)]))\n session.save(User(name='bar', tags=[Tag(score1=5.0, score2=4.0), Tag(score1=50.0, score2=1.0), Tag(score1=15.0, score2=2.0)]))\n session.flush()\n session.clear()\n\n def go():\n for user in session.query(User).all():\n self.assertEquals(user.query_score, user.prop_score)\n self.assert_sql_count(testing.db, go, 1)\n\n\n # fails for non labeled (fixed in 0.5):\n if labeled:\n def go():\n u = session.query(User).filter_by(name='joe').one()\n self.assertEquals(u.query_score, u.prop_score)\n self.assert_sql_count(testing.db, go, 1)\n else:\n u = session.query(User).filter_by(name='joe').one()\n self.assertEquals(u.query_score, u.prop_score)\n \n for t in (tags_table, users_table):\n t.delete().execute()" ]
[ "0.607027", "0.5603955", "0.52142614", "0.51333624", "0.51184493", "0.5110511", "0.50444543", "0.50385123", "0.50106204", "0.50006604", "0.49866673", "0.49164563", "0.48594052", "0.48593736", "0.48549092", "0.48429736", "0.4827033", "0.48192468", "0.4811291", "0.48048383", "0.47987357", "0.47962224", "0.47835875", "0.47828573", "0.4782045", "0.4780754", "0.4780069", "0.47745526", "0.4748405", "0.47475928", "0.4745853", "0.47425961", "0.47422522", "0.47350824", "0.47278956", "0.4713778", "0.46996891", "0.46709603", "0.4647649", "0.46404335", "0.46310464", "0.4626745", "0.46085903", "0.46012", "0.45970464", "0.45955244", "0.45945856", "0.45883754", "0.45835894", "0.45779192", "0.45764452", "0.45694488", "0.45691216", "0.45686615", "0.4566913", "0.4566913", "0.4566913", "0.45586622", "0.45416075", "0.45322344", "0.45240283", "0.45239994", "0.45235646", "0.45220032", "0.45155835", "0.45141962", "0.45000258", "0.44949958", "0.4494738", "0.44946676", "0.44931692", "0.4492111", "0.44896957", "0.44863287", "0.44818228", "0.44686958", "0.44629577", "0.44597256", "0.44591308", "0.4458699", "0.4449198", "0.4447404", "0.44452828", "0.44449756", "0.44449463", "0.4444593", "0.44380373", "0.44355053", "0.44345483", "0.44345152", "0.44307327", "0.442949", "0.44212618", "0.44168776", "0.44162914", "0.44152844", "0.44117185", "0.44089615", "0.44078943", "0.44074342" ]
0.6163972
0
Returns the maximum depth of the tree
def depth(self): return max(n.depth for n in self.iternodes())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_tree_depth(self):\n\n depths = np.array([leaf.tree_depth for leaf in self.leaves])\n\n return depths.max()", "def _max_depth(self):\n max_depth = 0\n for node, data in self.traverse():\n max_depth = max(max_depth, data['level'])\n return max_depth", "def max_depth(self) -> int:\n return pulumi.get(self, \"max_depth\")", "def max_depth(self) -> int:\n return 0", "def get_max_depth(self):\n return self.MAX_DEPTH", "def depth(self):\n return self._max_depth", "def max_depth(self):\n if len(self.children) == 0:\n return 1\n else:\n child_depths = [c.max_depth() for c in self.children]\n return 1 + max(child_depths)", "def max_depth(self) -> int:\n if self.child_actions:\n return max(child_action.max_depth\n for child_action in self.child_actions)\n else:\n return self.depth", "def get_max_depth(clade):\n depths = clade.depths()\n if not max(depths.values()):\n depths = clade.depths(unit_branch_lengths=True)\n return max(depths.values()) * tree_depth / actual_tree_depth", "def max_depth(self):\r\n lvl = 1\r\n has_lvl_desc = True\r\n while has_lvl_desc:\r\n num_children = len(self.level_n_descendants(lvl))\r\n if num_children==0:\r\n has_lvl_desc = False\r\n else:\r\n lvl+=1\r\n return lvl-1", "def max_depth(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_depth\")", "def maxDepth(node):\n\tif node is None: \n\t\treturn 0 \n\telse: \n\t\tlDepth=maxDepth(node.left)\n\t\trDepth=maxDepth(node.right) \n\t\tif lDepth>rDepth: \n\t\t return lDepth+1\n\t\telse: \n\t\t return rDepth+1", "def depth(self):\n\t\tdef helper(tree, d):\n\t\t\tif tree.isLeaf():\n\t\t\t\treturn d\n\t\t\telse:\n\t\t\t\td_left=helper(tree.left, d+1) if tree.hasLeftChild() else 0\n\t\t\t\td_right=helper(tree.right, d+1) if tree.hasRightChild() else 0\n\t\t\t\treturn max(d_left, d_right)\n\n\t\treturn helper(self.root, 1) if not self.isEmpty() else 0", "def get_max_depth(clf):\n tree =clf.tree_\n def get_node_depths_(current_node, current_depth, l, r, depths):\n depths += [current_depth]\n if l[current_node] != -1 and r[current_node] != -1:\n get_node_depths_(l[current_node], current_depth + 1, l, r, depths)\n get_node_depths_(r[current_node], current_depth + 1, l, r, depths)\n\n depths = []\n get_node_depths_(0, 0, tree.children_left, tree.children_right, depths) \n return max(depths)", "def max_depth(node):\n if not node:\n return 0\n return max(max_depth(node.left), max_depth(node.right)) + 1", "def max_depth_forest(self):\n return max(x.tree_.max_depth for x in self.result.estimators_)", "def max_depth(self): # DirObj.max_depth\n md=self.depth\n if len(self.subdirs.keys()):\n for name, entry in self.subdirs.iteritems():\n if not entry.deleted:\n td = entry.max_depth()\n if td > md:\n md=td\n return md\n elif len(self.files.keys()):\n return md + 1\n else:\n return md", "def depth(self):\n L, R = 0,0\n if self.left:\n L = self.left.depth()\n if self.right:\n R = self.right.depth()\n\n return 1 + max(L, R)", "def get_max_depth_node(nodes):\n curr = nodes[0]\n for i in range(0, len(nodes)):\n if nodes[i].depth > curr.depth:\n curr = nodes[i]\n return curr", "def depth(self, d=0):\n d1 = 0\n d2 = 0\n if self.leftChild:\n d1 = max(self.leftChild.depth(d + 1), d)\n if self.rightChild:\n d2 = max(self.rightChild.depth(d + 1), d)\n return max(d1, d2, d)", "def find_depth_tree(root):\n if root is not None:\n max_depth = 0\n if root.branches is None:\n return 1\n else:\n for value in root.branches.values():\n max_depth = max(max_depth, DecisionTree.find_depth_tree(value))\n return 1 + max_depth\n else:\n return 1", "def depth(self):\n if self.children is None:\n return 1\n\n max_depth = 0\n for child in self.children:\n if child is None:\n return 1\n child_depth = child.depth()\n if child_depth > max_depth:\n max_depth = child_depth\n\n return max_depth+1", "def depth(self):\n result = 0\n if self.val is None:\n return result\n return max(self.left.depth(), self.right.depth()) + 1", "def depth(self):\n ch = self.children\n return 0 if not ch else 1 + max([c.depth for c in ch])", "def __get_max_depth(self, conf):\n return conf[self.conf_item.get_max_depth()]", "def max_recursion_depth(self) -> ConfigNodePropertyInteger:\n return self._max_recursion_depth", "def max_depth(root):\n # basic case\n if root is None:\n return 0\n\n # breadth-first traversal\n queue = collections.deque([root])\n depth = 0\n while queue:\n queue_size = len(queue)\n for i in range(queue_size):\n curr = queue.popleft()\n if curr.left is not None:\n queue.append(curr.left)\n if curr.right is not None:\n queue.append(curr.right)\n depth += 1\n\n return depth", "def _tree_depth(self):\n return self._flat_data._tree_depth()", "def test_MaxDepth_SimpleTree(self):\n\n root = TreeNode(0)\n root.addLeft(1)\n root.addRight(5)\n root.left.addLeft(2)\n root.left.addRight(3)\n root.left.right.addRight(4)\n root.right.addRight(6)\n\n self.assertEqual(findMaxDepthDFS(root),3)", "def get_depth(self, current, n):\n if current is not None:\n return max(self.get_depth(current.left, n + 1), self.get_depth(current.right, n + 1))\n else:\n return n", "def deep_max(self):\r\n node = self\r\n while not node.is_leaf():\r\n node = node.children[-1]\r\n return node.keys[-1] if node.keys else None", "def treeLevel(root):\n\n if not root:\n return 0\n else:\n return 1+max(treeLevel(root.left),treeLevel(root.right))", "def depth(self) -> int:\n if len(self.children) == 0:\n return 0\n\n # Regresar la altura máxima de sus hijos más uno\n return max([n.depth() for n in self.children]) + 1", "def depth(x):\n return max(int(x * depth_multiplier), 8)", "def get_max_depth_val():\n data = SUNRGBDTrainDataset(True)\n return max([data[0][i][-1].flatten().item() for i in range(len(data))])", "def depth(self):\n left_depth = self.left.depth() if self.left is not None else 0\n right_depth = self.right.depth() if self.right is not None else 0\n return max(left_depth, right_depth) + 1", "def find_maximum_value(self):\n if self.root: \n self.max_val = self.root.value\n else:\n return 'No tree found'\n def inner(root):\n if root.left:\n inner(root.left)\n\n if root.right:\n inner(root.right)\n\n if self.max_val < root.value:\n self.max_val = root.value\n\n inner(self.root)\n return self.max_val", "def get_depth(self):\n if self.root is None:\n return 0\n else:\n node_queue = list()\n node_queue.append(self.root)\n depth = 0\n while len(node_queue):\n q_len = len(node_queue)\n while q_len:\n q_node = node_queue.pop(0)\n q_len = q_len - 1\n if q_node.left is not None:\n node_queue.append(q_node.left)\n if q_node.right is not None:\n node_queue.append(q_node.right)\n depth = depth + 1\n return depth", "def _height1(self): # works, but O(n^2) worst-case time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def height(root:Node) -> int:\n current = root.left\n depth = 0\n maxdepth = [0]\n #track the value and whether it has a branchpoint or not (bool)\n seen = dict()\n\n #do the left side first, then the right\n\n while current is not None:\n if current.val not in seen:\n if (current.left is not None) and (current.right is not None):\n seen.update({current.val:True})\n else:\n seen.update({current.val:False})\n depth +=1\n maxdepth.append(depth)\n if current.left is not None:\n current = current.left\n elif current.right is not None:\n current = current.right\n else:\n current = None\n\n print(' maxdepth left so far is {}'.format(maxdepth))\n\n current = root.right\n depth = 0\n\n while current is not None:\n if current.val not in seen:\n if (current.left is not None) and (current.right is not None):\n seen.update({current.val: True})\n else:\n seen.update({current.val: False})\n depth +=1\n maxdepth.append(depth)\n if current.right is not None:\n current = current.right\n elif current.left is not None:\n current = current.left\n else:\n current = None\n print(' maxdepth right so far is {}'.format(maxdepth))\n\n return max(maxdepth)", "def maximum_value(self):\n if not self.root:\n return \"the tree is empty!\"\n\n max_val = self.root.value\n\n def _max_value(node):\n nonlocal max_val\n if not node:\n return\n if node.value > max_val:\n max_val = node.value\n\n _max_value(node.left)\n _max_value(node.right)\n _max_value(self.root)\n return max_val", "def max_level(self):\n return self.__max", "def calculate_tree_height(tree):\n max_height = 0\n for i in tree.values():\n if i.is_leaf():\n path = i.path_to_root()\n if len(path) > max_height:\n max_height = len(path)\n\n return max_height", "def minDepth(self, root: TreeNode) -> int:\n return self.bfs(root)", "def depth(self):\n if self.size == 0:\n return 0\n return int(math.log(self.size, 2)) + 1", "def max_tree_id(self) -> int:\n\n return max(self.tree_ids) if len(self.tree_ids)>0 else 0", "def _height1(self): #works but n^2 time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def depth(self):\n if self.parent is None:\n return 0\n else:\n return self.parent.depth() + 1", "def _active_depth(self):\n for n_left, n_right in self.graph.dfs():\n if self.node(n_right)['pad'] == 0:\n return self.node(n_right)['level']\n return 0", "def score_max_depths(graph, max_depths):\n ###TODO\n pass", "def _find_max(self, root):\n while root.right:\n root = root.right\n return root", "def depth(self):\n if not self.root:\n return None\n else:\n return self.root.balance_number", "def DEFAULT_MAX_DEPTH(self): # real signature unknown; restored from __doc__\n pass", "def _height1(self): # works, but O(n^2) worst-case time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def DEFAULT_MAX_DEPTH_DIFF(self): # real signature unknown; restored from __doc__\n pass", "def depth(self) -> int:\n depth = 0\n node = self\n while node:\n depth += 1\n node = node.parent\n return depth", "def depth(self) -> int:\n return self.__depth", "def peek_max(self):\n if self.root:\n return self.root.max().value\n raise ValueError(\"cannot perform peek_max on an empty tree\")", "def max(self):\n return self._max(self.root)", "def option_max_depth(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionMaxDepth/')))", "def helper(root):\n if not root or not root.children: return 0\n \n if len(root.children) == 1:\n depth = 1 + helper(root.children[0])\n self.diameter = max(self.diameter, depth)\n return depth\n \n else:\n depths = [1+helper(child) for child in root.children]\n max1, max2 = 0, 0\n for depth in depths:\n if depth >= max1:\n max1, max2 = depth, max1\n elif depth < max1 and depth > max2:\n max2 = depth\n self.diameter = max(self.diameter, max1+max2)\n return max1", "def find_max(self) -> TreeNode:\n node = self.root\n while True:\n if not node.right:\n return node\n node = node.right", "def get_max_dmag_from_depth(depth):\n return 2.5 * np.log10(depth)", "def most_visited_child(self):\n return max(self.children, key=lambda c: c.explore_count)", "def depth(self):\n \n return self._depth", "def heightTree(root):\n try:\n if (root is None):\n return -1\n else:\n return 1 + max(heightTree(root['left']), heightTree(root['right']))\n except Exception as exp:\n error.reraise(exp, 'RBT:heightTree')", "def tree_depth(tree):\r\n if(tree==None):\r\n return 0\r\n elif(left(tree)!=None):\r\n return 1+tree_depth(left(tree))\r\n elif(right(tree)!=None):\r\n return 1+tree_depth(right(tree))\r\n else:\r\n return 0", "def depth(self):\n return 0", "def depth(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def _calc_tree_depth(refinement_tree):\n if not refinement_tree:\n return 0\n\n depth = 0\n for k, v in refinement_tree.items():\n d = _calc_tree_depth(v)\n if d > depth:\n depth = d\n\n return 1 + depth", "def max_recursion_depth(self, max_recursion_depth: ConfigNodePropertyInteger):\n\n self._max_recursion_depth = max_recursion_depth", "def reset_max_depth(self) -> None:\n # The max depth is now calculated on the fly, so this is a no-op.\n pass", "def min_depth(t):\n if is_leaf(t):\n return 0\n h = float('inf')\n for b in branches(t):\n # Still works fine!\n h = min(h, 1 + min_depth(b))\n return h", "def find_max(self):\n return self.root and self.root.find_max()", "def get_tree_depth(attributes):\n depth = 0\n for attribute in attributes:\n if attribute == -1:\n depth = depth + 1\n return depth", "def depth(self, node):\n\n if not node:\n return 0\n else:\n l_depth = self.depth(node.left)\n r_depth = self.depth(node.right)\n\n if l_depth > r_depth:\n return l_depth + 1\n else:\n return r_depth + 1", "def _get_new_max(self, insert=True):\n right = 1\n left = 1\n if self._root:\n if self._root._rkid:\n right = self._new_depth(self._root._rkid, 2)\n if self._root._lkid:\n left = self._new_depth(self._root._lkid, 2)\n self._rbal = right\n self._lbal = left\n if insert:\n if right > left:\n if right > self._max_depth:\n self._max_depth = right\n elif left > self._max_depth:\n self._max_depth = left\n else:\n if right > left:\n if right < self._max_depth:\n self._max_depth = right\n elif left < self._max_depth:\n self._max_depth = left", "def max_levels(self):\n\n return self._max_levels", "def diameterOfBinaryTree(self, root):\n self.max_length = 0\n def maxDepth(root):\n if not root:\n return 0\n left_branch = maxDepth(root.left)\n right_branch = maxDepth(root.right)\n self.max_length = max(self.max_length, left_branch + right_branch)\n return max(left_branch, right_branch) + 1\n maxDepth(root)\n return self.max_length", "def get_height_iterative_WithNestedClass(self):\n class NodeMeta:\n def __init__(self, node, depth):\n self.node = node\n self.depth = depth\n def __repr__(self):\n return \"Node: {} Depth: {}\".format(self.node, self.depth)\n\n max_so_far = 0\n nodes_queue = deque()\n nodes_queue.append(NodeMeta(self.root, 0))\n while nodes_queue:\n curr = nodes_queue.popleft()\n if not curr.node:\n continue\n max_so_far = max(max_so_far, curr.depth)\n for node in [curr.node.left, curr.node.right]:\n nodes_queue.append(NodeMeta(node, curr.depth + 1))\n return max_so_far", "def height(t: Tree):\n if len(t.children) == 0:\n return 1\n else:\n return 1 + max([height(c) for c in t.children])", "def findmaxnode(self):\n if not self._rightchild:\n return self\n return self._rightchild.findmaxnode()", "def depth(self, p):\n if self.is root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def get_height_iterative(self):\n max_so_far = 0\n nodes_queue = deque()\n nodes_queue.append((self.root, 0))\n while nodes_queue:\n node, depth = nodes_queue.popleft()\n max_so_far = max(max_so_far, depth)\n if node.left:\n nodes_queue.append((node.left, depth + 1))\n if node.right:\n nodes_queue.append((node.right, depth + 1))\n return max_so_far", "def find_max(self):\n return max(self.nodes, key=int)", "def height(self):\n if self.children == []:\n return 1 \n else:\n arr = []\n for child in self.children:\n result = 1 + child.height()\n arr.append(result)\n return max(arr)", "def depth(self,p):\n return 0 if self.is_root(p) else 1 + self.depth(self.parent(p))", "def depth(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def depth(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def depth(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))", "def get_min_depth(l_k):\n return max(l_k.values())", "def get_max(self):\n if self.root is None: # BC1\n return float('-inf')\n\n current = self.root\n while current.right is not None: # Traverse like a linked-list\n current = current.right\n\n return current.key", "def max_value(tree):\n max_utility = float(\"-inf\")\n \n if (is_terminal(tree)):\n return tree\n else:\n #options = []\n for node in tree:\n #options.append(max_value(node))\n max_utility = max(max_utility, min_value(node))\n return max_utility", "def nbr_nodes(tree_depth):\n return 2**(tree_depth+1)-1", "def depth(self, node):\n if node is self.root:\n return 0\n return nx.shortest_path_length(self.graph, self.root, node)", "def max(self):\n return self.root.rightmost", "def _parse_tree_height(sent):\n children = list(sent._.children)\n if not children:\n return 0\n else:\n return max(_parse_tree_height(child) for child in children) + 1", "def get_recursion_depth(self):\n str_depth_input = self.entries[\"ent_recursion_depth\"].get()\n if str_depth_input == '':\n return None # default of fractal class while drawing in None\n # draws the base curve instead\n return int(str_depth_input)", "def min_depth(node):\n if not node:\n return 0\n elif (not node.left) and (not node.right):\n # found leaf\n return 1\n elif not node.left:\n # if the root has only 1 child, this prevents the minimum depth from\n # equaling zero\n return min_depth(node.right) + 1\n elif not node.right:\n return min_depth(node.left) + 1\n return min(min_depth(node.left), min_depth(node.right)) + 1", "def max_node(self):\n node = self.root\n while node.right is not None:\n node = node.right\n return _BSTNode(node.item)" ]
[ "0.88220024", "0.8809473", "0.8737049", "0.8678075", "0.8640208", "0.84385777", "0.84369", "0.82397825", "0.8233229", "0.8227665", "0.82105106", "0.8154944", "0.8108934", "0.80783707", "0.8075165", "0.7958314", "0.7903902", "0.7895258", "0.78915644", "0.7854622", "0.7788676", "0.7767831", "0.77410656", "0.77398944", "0.76884675", "0.76144445", "0.7589923", "0.7551153", "0.75507694", "0.7513232", "0.7438804", "0.7429544", "0.7413355", "0.7306157", "0.7293628", "0.72874516", "0.7257623", "0.7185808", "0.71414405", "0.70702434", "0.70657206", "0.7038106", "0.7036921", "0.7026808", "0.70174205", "0.70134604", "0.70100844", "0.7008146", "0.69628227", "0.6953819", "0.69497025", "0.6946104", "0.6941636", "0.69357586", "0.69302124", "0.69180393", "0.6914341", "0.68991417", "0.6891467", "0.6851921", "0.68518305", "0.68402225", "0.683019", "0.6794715", "0.679198", "0.67806745", "0.67772305", "0.6774671", "0.6760778", "0.6740075", "0.67332864", "0.6721035", "0.6720996", "0.6708847", "0.6691022", "0.6663535", "0.66628987", "0.6649139", "0.6640063", "0.660504", "0.66037023", "0.65885997", "0.65830183", "0.65799594", "0.65601236", "0.6555657", "0.6543032", "0.65420043", "0.65420043", "0.65420043", "0.6539874", "0.65350854", "0.65138656", "0.64939475", "0.64829314", "0.6473694", "0.6464908", "0.6460901", "0.64217603", "0.6409444" ]
0.8212468
10
Returns the total number of nodes in the tree
def numNodes(self): res = 0 for n in self.iternodes(): res += 1 return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self):\n return self.__tree.node_count", "def count_nodes(self):\n if self.children is None:\n return 0\n\n total_count = 0\n for child in self.children:\n if child is None:\n return 0\n child_count = child.count_nodes()\n total_count = total_count + child_count\n\n return total_count+1", "def count_nodes(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 1 + self.get_left().count_nodes() + self.get_right().count_nodes()\n else:\n return 1 + self.get_left().count_nodes()\n else:\n return 1 + self.get_right().count_nodes()", "def node_count(self):\n return self._root.count()", "def node_count(self):\n if self.value:\n cnt = 0\n else:\n left_cnt = self.left.node_count()\n right_cnt = self.right.node_count()\n cnt = 1 + left_cnt + right_cnt\n return cnt", "def numNodes(T):\r\n n = 1\r\n if T.isLeaf:\r\n return n\r\n for i in range(len(T.child)):\r\n n += numNodes(T.child[i])\r\n return n", "def get_tree_size(self, node):\n\n # If the tree has not been created yet.\n if node == None:\n return 0\n n_nodes = 1\n for child in node.children:\n n_nodes += self.get_tree_size(node.children[child])\n return n_nodes", "def total_nodes(self)->int:\n\t\tqueue=[]\n\t\tsum=0\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tnode=queue.pop(0)\n\t\t\tsum+=1\n\t\t\tif(node.right!=None):\n\t\t\t\tqueue.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tqueue.append(node.left)\n\t\treturn sum", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def countNodes(self, root):\n\n\n if not root:\n return 0\n\n return 1+self.countNodes(root.left)+self.countNodes(root.right)", "def size(self) -> int:\n #binary search tree == empty\n if self.root is None:\n return 0\n\n #recursive helper count nodes\n return self.size_helper(self.root)", "def num_trees(self) -> int:\n\n return len(self.nodes)", "def count(self):\n\t\treturn len(list(self.nodes))", "def tree_size(self) -> int:\n Q = Queue()\n count = 0\n Q.put(self.root)\n while not Q.empty():\n node = Q.get()\n count += 1\n for child in node.children.values():\n Q.put(child)\n return count", "def get_tree_size(cur):\n sql = \"\"\"\n SELECT\n COUNT(*)\n FROM\n nodes;\n \"\"\"\n cur.execute(sql)\n result = cur.fetchone()\n return result['count']", "def number_of_nodes(self) -> int:\n return self.graph.number_of_nodes()", "def node_count(self):\n return self._node_count", "def get_num_nodes(self):\n return len(self._nodes)", "def get_num_nodes(self):\n return len(self._nodes)", "def number_of_nodes(self):\n return int(self._data['number_of_nodes'])", "def num_nodes(self):\n return len(self.nodes)", "def NodesCount(self):\n return len(self.nodes)", "def getNNodesTot(self):\n nNodesTot = 0\n for iElt in Elements._all:\n nNodesTot += len(iElt.coord)\n return nNodesTot", "def leaf_count(self) -> int:\n if self.children == []:\n return 1\n else:\n return sum([x.leaf_count() for x in self.children])", "def n_trees(self):\n return len(self.data_kd)", "def GetCount(self):\r\n\r\n if not self._anchor:\r\n # the tree is empty\r\n return 0\r\n\r\n count = self._anchor.GetChildrenCount()\r\n \r\n if not self.HasAGWFlag(TR_HIDE_ROOT):\r\n # take the root itself into account\r\n count = count + 1\r\n \r\n return count", "def size(self) -> int:\n if self.root is None: # If tree is empty\n return 0\n\n return self.size_helper(self.root)", "def num_trees(self):\n return self._ll_tree_sequence.get_num_trees()", "def _children_count(self):\n cnt = 0\n if self.left:\n cnt += 1\n if self.right:\n cnt += 1\n return cnt", "def node_count(self) -> int:\n return pulumi.get(self, \"node_count\")", "def Nnodes(self):\n return len(self.nodes)", "def tree_size(self):\n if self._tree_size is not None:\n return self._tree_size\n if self.is_root:\n self.arbor._setup_tree(self)\n # pass back to the arbor to avoid calculating again\n self.arbor._store_node_info(self, '_tree_size')\n else:\n self._tree_size = len(list(self[\"tree\"]))\n return self._tree_size", "def count_taxa_tree(tree_nxobj):\n\tnode_count = 0 #number of taxa in the tree\n\tfor node in tree_nxobj.preorder_node_iter():\n\t\tnode_count += 1\n\n\treturn node_count", "def get_node_count(self) -> Iterable:\n return len([i for i in self.all_nodes_as_iterable()])", "def size(self):\n\t\treturn len(self.nodes)", "def num_nodes(self) -> int:\n return pulumi.get(self, \"num_nodes\")", "def count(self):\r\n return self.count_helper(self.top_node)", "def node_count(self):\n return self.process_tree.get_descendant_count() + 1", "def num_nodes(self):\n return len(self._node_reg)", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")", "def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")", "def get_nodes_pixel_count(self):\n sum_count = self.pixel_count\n for i in range(8):\n node = self.children[i]\n if node:\n sum_count += node.pixel_count\n return sum_count", "def nbr_nodes(tree_depth):\n return 2**(tree_depth+1)-1", "def get_num_nodes(self):\n\n return sum(self.topology)", "def num_actual_nodes(tree):\n return (tree.n_node_samples > 0).sum()", "def n_children(self):\n ch = self.children\n return 0 if not ch else len(ch) + sum([c.n_children for c in ch])", "def num_nodes(self):\n return len(self.successors)", "def n(self):\n return sum(list(self.nodes.values()))", "def size(self):\n\n count = 0\n curr_node = self.head\n while curr_node is not None:\n curr_node = curr_node.next_node\n count += 1\n\n return count", "def count_leaves(self) -> int:\n # binary search tree == empty\n if self.root is None:\n return 0\n\n #recursive helper function +=count total leaf\n return self.count_leaves_helper(self.root)", "def node_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"node_count\")", "def node_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"node_count\")", "def num_tree(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumTree(self.handle, ctypes.byref(out)))\n return out.value", "def __len__(self):\n return len(self.subtrees())", "def compute_num_nodes(graph):\n return len(graph.keys()) # return the number of nodes in the graph", "def countNodes(epr):\n result = 1\n argLst = epr.args\n for arg in argLst:\n result += countNodes(arg)\n return result", "def size(node):\n\t\tif node is None:\n\t\t\treturn 0\n\t\treturn 1+BST.size(node.left)+BST.size(node.right)", "def size(self) -> int:\n return self.root.size if not self.empty() else 0", "def count_leaf(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 0 + self.get_left().count_leaf() + self.get_right().count_leaf()\n else:\n return 0 + self.get_left().count_leaf()\n else:\n return 0 + self.get_right().count_leaf()", "def count_leaves(self) -> int:\n if self.root is None: # If tree is empty\n return 0\n\n return self.count_helper(self.root)", "def num_children(self):\n # TODO: Determine how many children this node has\n count = 0\n for child in self.children:\n # if child not none\n if child:\n count += 1\n return count", "def leaf_count(T):\n if T.is_leaf:\n return 1\n else:\n# s = 0\n# for child in T:\n# s += leaf_count(child)\n# return s\n return reduce(add, map(leaf_count, T))", "def _num_nodes(self):\n return int(self._node_map[-1])", "def size(self):\n size = 0 \n node = self.head \n while node is not None:\n size += 1 \n node = node.next_node \n return size", "def size(self):\n size = 0 \n node = self.head \n while node is not None:\n size += 1 \n node = node.next_node \n return size", "def size_helper(self, node: object) -> int:\n #current node\n count = 1\n\n # current node left\n if node.left is not None:\n count += self.size_helper(node.left)\n\n #current node right\n if node.right is not None:\n count += self.size_helper(node.right)\n\n return count", "def get_children_count(cur, node):\n sql = \"\"\"\n SELECT\n COUNT(*)\n FROM\n nodes\n WHERE\n parent=%s;\n \"\"\"\n cur.execute(sql, (str(node), ))\n result = cur.fetchone()\n return result['count']", "def leaf_count(t: Tree) -> int:\n if t.children == []:\n return 1\n else:\n return sum([leaf_count(child) for child in t.children])", "def _num_nodes(self):\n return len(self._nid2partid)", "def nodeCount(eval):\n if not isEvaluator(eval):\n return 0\n return eval.ReferencedNodes().Size()", "def size(self):\n count = 0\n if self.val is None:\n return count\n else:\n count += 1\n count += self.left.size()\n count += self.right.size()\n return count", "def __len__(self) -> int:\r\n return len(self._nodes)", "def size(self):\n count = 0\n current = self.head\n if self.head is None:\n return 0\n while current.next_node is not None:\n count += 1\n current = current.next_node\n return count + 1", "def get_num_children(self):\n return len(self.children)", "def __len__(self) -> int:\n return len(self.nodes)", "def size(self):\n if len(self.children) == 0:\n return 1\n else:\n return 1 + sum([x.size() for x in self.children])", "def size(self):\n current = self.__head\n count = 0\n while current:\n count += 1\n current = current.next_node\n return count", "def number_of_nodes(self, ntype: str = None) -> int:\n return self.num_nodes(ntype)", "def num_children(self, n):\n counter = 0\n if self.left(n):\n counter += 1\n if self.right(n):\n counter += 1\n return counter", "def total(tree):\n if tree is None:\n return 0\n return total(tree.left) + total(tree.right) + tree.cargo", "def size(self):\n if self.root is None:\n return 0\n return self.root.size", "def num_children(self, node):\n self._validate_node(node)\n count = 0\n if self.left(node) is not None:\n count += 1\n if self.right(node) is not None:\n count += 1\n return count", "def num_nodes(self) -> Optional[int]:\n return pulumi.get(self, \"num_nodes\")", "def node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"node_count\")", "def Test_NumNodes(Graph_MD):\n N_Knoten = Graph_MD.number_of_nodes()\n \n return N_Knoten", "def num_nodes(self, ntype: str = None) -> int:\n if ntype:\n return self.num_nodes_dict[ntype]\n else:\n return self.total_number_of_nodes", "def num_nodes(self) -> Optional[int]:\n return super().num_nodes", "def num_nodes(self):\n return ((len(self.tensor_u)+1) * (len(self.tensor_v)+1) *\n (len(self.tensor_w)+1))", "def node_count(self, *n_labels):\n if not n_labels:\n return len(self._nodes)\n elif len(n_labels) == 1:\n return len(self._nodes_by_label.get(n_labels[0], ()))\n else:\n return sum(1 for _ in self.nodes(*n_labels))", "def __len__(self):\n return self.count_of(CUBA.NODE)", "def size(self):\r\n return self.root.size_tree", "def number_of_data_nodes(self):\n return int(self._data['number_of_data_nodes'])", "def __len__(self) -> int:\n return 1 + sum(len(child) for child in self.children)", "def num_of_children_steps(self):\n total = 0\n for child in self.children:\n total += child.total_steps\n return total", "def size(self):\n traverse = self.head\n count = 1\n while traverse.next != None:\n traverse = traverse.next\n count += 1\n return count", "def size(self):\n traverse = self.head\n count = 0\n while traverse.next != None:\n traverse = traverse.next\n count += 1\n return count + 1", "def leaf_count(T):\n if T.is_leaf:\n return 1\n else:\n s = 0\n for child in T:\n s += leaf_count(child)\n return s\n # Can you put the else clause in one line instead?\n return functools.reduce(operator.add, map(leaf_count, T), 0)", "def count(self):\n node = self.head\n i = 0\n while node:\n i += 1\n node = node.next\n\n return i", "def count_nodes(self, term=None, labels: istr = None) -> int:" ]
[ "0.85499597", "0.8500708", "0.84604555", "0.84518975", "0.8407748", "0.83981884", "0.8378013", "0.832748", "0.83060443", "0.826005", "0.81602746", "0.81444424", "0.81200975", "0.808973", "0.80409694", "0.8024372", "0.8013639", "0.800769", "0.800769", "0.79400194", "0.7878856", "0.78758013", "0.7828869", "0.7792113", "0.7770062", "0.7760109", "0.7735914", "0.77328706", "0.77166057", "0.770595", "0.7701537", "0.76925355", "0.768392", "0.762619", "0.7623354", "0.7612661", "0.76033604", "0.75920105", "0.75867844", "0.75720984", "0.7531618", "0.7531618", "0.7515429", "0.7498199", "0.7489195", "0.74889755", "0.74870986", "0.74629396", "0.74324405", "0.7426599", "0.74216694", "0.7402957", "0.7402957", "0.7400687", "0.7382425", "0.7373396", "0.7355572", "0.73452723", "0.73272055", "0.7323368", "0.73096484", "0.73025674", "0.7301016", "0.72979623", "0.7294097", "0.7294097", "0.7293318", "0.72883004", "0.72793853", "0.7266047", "0.7237884", "0.7224858", "0.721702", "0.72169083", "0.7214091", "0.7212066", "0.72073096", "0.7205736", "0.7190853", "0.7173826", "0.7168798", "0.7158877", "0.7154225", "0.7153514", "0.7144649", "0.71435684", "0.7135849", "0.7135165", "0.7093757", "0.7092908", "0.7088354", "0.7079106", "0.7075693", "0.70631784", "0.70607316", "0.70537215", "0.70506436", "0.7035441", "0.70354", "0.70327944" ]
0.83307356
7
Returns all nodes in the tree
def nodes(self): return [n for n in self.iternodes()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_nodes(self):\n return self._get_all_nodes()", "def get_nodes(self):\n all_nodes = [] \n if not self._root is None:\n all_nodes.append(self._root)\n i = 0\n while i < len(all_nodes):\n for node in all_nodes[i]._children:\n all_nodes.append(node)\n i += 1 \n return all_nodes", "def all_nodes(tree: tree_mod.Tree):\n if isinstance(tree, tree_mod.Tree):\n return tree_mod.Tree(tree.label(), [all_nodes(child) for child in tree])\n else:\n return tree_mod.Tree(tree, [])", "def get_nodes(self) -> Iterable[RTreeNode[T]]:\n yield from self._get_nodes(self.root)", "def getNodes(self):\n return self.__allNodes", "def get_all_nodes(self):\n # NOTE: return copy, so no one will screw\n # our list?\n return self.nodes", "def nodes(self):\n return self._get_tree_queryset()", "def get_nodes(self):\n\n nodes = []\n\n if not self.node:\n return nodes\n \n nodes.extend(self.node.left.get_nodes())\n nodes.append(self.node.vp)\n nodes.extend(self.node.right.get_nodes())\n\n return nodes", "def nodes(self):\n root = self.root()\n queue = Queue()\n queue.enqueue(root)\n return self._breadth_first(queue, elements=False)", "def nodes(self):\r\n return (node.content for node in self.traverse())", "def get_nodes(self):\n return [node for node in self._nodes.itervalues()]", "def get_nodes(self):\n return [node for node in self._nodes.itervalues()]", "def get_rootnodes(self) -> List[RootNode]:\n\t\treturn sorted(self.root_nodes, key=lambda x: x.name.lower())", "def nodes(self):\n return self._nodes", "def nodes(self):\n return self._nodes", "def nodes(self):\n return self._nodes", "def nodes(self):\n return self.__nodes", "def all_nodes(self, namespace=None):\n source = self._source(namespace)\n return self._list(source, 'all')", "def nodes (self):\n return self.__nodes", "def get_nodes(self):\n pass", "def findall_nodes(self):\n\n nodes = []\n for n in self.nodes:\n nodes += n.findall_forward()\n\n # Make sure list only contains every element once\n nodes = dict((k,1) for k in nodes)\n self.nodes = list(nodes.keys())\n self.connect_backwards()", "def get_node_list(self):\n return []", "def leaf_nodes(self):\n return self.nodes.filter(lft=models.F('rght') - 1)", "def all_nodes(self):\n nodes = []\n for layer in self.layers:\n nodes += layer.nodes\n return nodes", "def list_of_nodes(tree):\n list_ = []\n if tree.left.is_leaf() and tree.right.is_leaf():\n list_.append(tree)\n return list_\n elif tree.left.is_leaf():\n list_.extend(list_of_nodes(tree.right))\n list_.append(tree)\n return list_\n elif tree.right.is_leaf():\n list_.extend(list_of_nodes(tree.left))\n list_.append(tree)\n return list_\n else:\n list_.extend(list_of_nodes(tree.left))\n list_.extend(list_of_nodes(tree.right))\n list_.append(tree)\n return list_", "def get_nodes(self) -> List[Node]:\n\t\treturn sorted(self.nodes, key=lambda x: x.name.lower())", "def get_nodes(self) -> List[Node]:\n\t\t# Setup a node holder\n\t\tnode_holder: NodeSubNodeHolder = NodeSubNodeHolder()\n\n\t\t# Iterate over RootNodes\n\t\tfor rootnode in self.root_nodes:\n\t\t\t# Iterate over node in each RootNode\n\t\t\tfor node in rootnode.get_nodes():\n\t\t\t\t# Add the Nodes to the node_holder\n\t\t\t\tnode_holder.add_node(node)\n\n\t\t# Return the list of nodes from node_holder\n\t\treturn node_holder.get_nodes()", "def get_nodes(self):\n\n return self._nodes", "def _tree_nodes(self):\n\n self.arbor._grow_tree(self)\n yield self\n if self.ancestors is None:\n return\n for ancestor in self.ancestors:\n for a_node in ancestor._tree_nodes:\n yield a_node", "def nodes(self):\n # is the current node a leaf?\n if self.is_leaf():\n return {self}\n # or does it already have leaf-entries?\n elif not self.nodes_set == set():\n return self.nodes_set\n # if not, recursively return all children\n else:\n children_nodes = {self}\n for child in self.child_nodes:\n children_nodes = children_nodes.union(self.child_nodes[child].nodes)\n # storing it for later use\n self.nodes_set = children_nodes\n return children_nodes", "def get_leaf_nodes(self):\n pass", "def getNodes(self):\n return [ node for node in sorted(self._nodes.values()) ]", "def _all_subnodes(self):\n return self.__dict__.values()", "def nodes(self):\n return list(self.keys())", "def nodes(self):\n return list(self.keys())", "def nodes(self):\n return self.graph.nodes", "def get_nodes(self, tree):\n if tree.name == '':\n tree.name = 'N' + str(self.n_nodes)\n self.n_nodes += 1\n self.nodes[tree.name] = SEMTreeNode('node')\n else:\n self.nodes[tree.name] = SEMTreeNode('leaf')\n\n if tree.up is not None:\n name_p = tree.up.name # parent\n name_c = tree.name # children\n dist = tree.dist\n\n self.nodes[name_p].add_dist(name_c, dist)\n self.nodes[name_c].add_dist(name_p, dist)\n\n for node in tree.children:\n self.get_nodes(node)\n return", "def nodes(self):\n return list(self.node_dict.keys())", "def get_ordered_nodes(self):\n nodes = []\n self.build_nodes_list(self.root, nodes)\n return nodes", "def nodes( self, data = False ):\n return self._G.nodes(data = data)", "def nodes(self):\n for node_set in self.itervalues():\n for node in node_set:\n yield node", "def nodes(self):\n return list(self._nodes_dict.values())", "def get_root_nodes(self):\n\n selector = \"forest\"\n desc_uids = self[selector, \"desc_uid\"]\n rids = np.where(desc_uids == -1)[0]\n for rid in rids:\n yield self.get_node(selector, rid)", "def getNodes(self):\n data = self.connect('get','nodes',None)\n return data", "def nodes(self):\n return self._node_reg", "def getListOfNodes(self):\n return _libsbml.ASTNode_getListOfNodes(self)", "def get_nodes(self):\n\n return list(self.graph.nodes)", "def list_nodes(self):\n return self.ironic_client.node.list()", "def get_nodes(self):\n return requests.get(self.__url + 'nodes').json()", "def nodes(self):\n return list(self._g.keys())", "def get_node_list(self):\n return self.node_list", "def nodes(self) -> NodeList:\r\n return self._nodes", "def getchildren(self):\n return self.root.getchildren()", "def get_node_list(self):\n return [[node] for node in self.graph.nodes]", "def getNodes(self, strategy = 'DEPTH-FIRST'):\n nodes = []\n queue = [self.root]\n while len(queue) > 0:\n node = queue.pop()\n nodes.append(node)\n # if strategy.upper().startswith('DEPTH'):\n if not node.isLeaf():\n queue.extend(node.children)\n return nodes", "def nodes(self):\n data = list(self._nodes.values())\n data.sort()\n return data", "def leaf_nodes(self):\n deps = set([\n item for sublist in self.edges.values() for item in sublist\n ]) # Now contains all nodes that contain dependencies.\n return (x for x in self.nodes if x not in deps) # Generator that\n # contains all nodes *without* any dependencies (leaf nodes)", "def nodes(self):\n return self.dict.keys()", "def s_all_descendants(node):\r\n if len(node.children)==0:\r\n return []\r\n else:\r\n children = node.children[:]\r\n for child in node.children:\r\n children.extend(Node.s_all_descendants(child))\r\n return children", "def getNodes(self):\n return self.graph.keys()", "def get_nodes(self):\n return list(map(lambda x: x[0], self.__nodes))", "def all_descendants(self):\r\n return Node.s_all_descendants(self)", "def nodes(self):\n new = self.copy()\n new._filter = [\"nodes\"]\n return new", "def _get_nodes(self, selector):\r\n arr = []\r\n def traverse(cont):\r\n children = cont.get_children()\r\n for n in xrange(len(children)):\r\n child = children[n]\r\n if child.node_type == selector:\r\n arr.append(child)\r\n elif child.node_type != 'Shape':\r\n traverse(child)\r\n traverse(self)\r\n return arr", "def nodeItems(self):\n nodes = list()\n for item in self.items():\n if isinstance(item, NodeItem):\n nodes.append(item)\n return nodes", "def get_nodes():\n with session_for_read() as session:\n res = session.query(\n model.Node\n ).order_by(\n model.Node.started_at.desc()\n )\n return [model.Node(uuid=entry.uuid, version_id=entry.version_id,\n state=entry.state, started_at=entry.started_at,\n finished_at=entry.finished_at, error=entry.error,\n manage_boot=entry.manage_boot)\n for entry in res.all()]", "def subtrees(self):\n return list(iter(self))", "def nodes(self):\n return list(self.__graph.keys())", "def roots(self):\n for node in self:\n if not node.incoming:\n yield node", "def get_child_nodes(node):\r\n return list(iter_child_nodes(node))", "def read_all_trees(self, **kwargs):\n return self.read_single_tree(-1, halonum=None, **kwargs)", "def get_leaf_nodes(self):\n leaf_nodes = []\n for i in range(8):\n node = self.children[i]\n if node:\n if node.is_leaf():\n leaf_nodes.append(node)\n else:\n leaf_nodes.extend(node.get_leaf_nodes())\n return leaf_nodes", "def get_recursed_tree(self, root_nodes):\n nodes = []\n for n in root_nodes:\n nodes.append({\n \"id\": n.pk,\n \"label\": n.title,\n \"slug\": n.slug,\n \"view_url\": reverse('sveedocuments:page-details', args=[n.slug]),\n \"children\": self.get_recursed_tree(n.get_children())\n })\n return nodes", "def get_nodes(self, names):\n nodes = []\n for name in names:\n node = self.get_node(name, prevent_error=True)\n if node == None:\n if verbose:\n print('Warning: could not find a TreeNode named {}.'.format(name))\n else:\n nodes.append(node)\n return nodes", "def getList(self):\r\n node = self.root\r\n list = []\r\n return self.inOrderTraverse(node, list)", "def get_leaf_nodes(self, selector=None):\n\n if selector is None:\n if self.is_root:\n selector = \"forest\"\n else:\n selector = \"tree\"\n\n uids = self[selector, \"uid\"]\n desc_uids = self[selector, \"desc_uid\"]\n lids = np.where(~np.in1d(uids, desc_uids))[0]\n for lid in lids:\n yield self.get_node(selector, lid)", "def nodes(self):\n nodes = list()\n for node in self._nodes:\n if isinstance(node, Graph):\n nodes += node.nodes\n else:\n nodes.append(node)\n return nodes", "def get_leafs(self) -> list:\n return self._leafs[:]", "def list_nodes(self):\n return self.datanodes.keys()", "def iter_nodes(self):", "def nodes(self):\n\n return list(set(self._graph.keys() + [x for x in itertools.chain.from_iterable(self._graph.values())]))", "def __getNodeChildren(self, node, level):\n if level <= 0:\n return []\n\n allNodes = []\n\n # Add all nodes one level below the current node\n parents = self.__graph.successors(node)\n for subNode in parents:\n subNodes = self.__getNodeChildren(subNode, level - 1)\n allNodes.append(subNode)\n allNodes.extend(subNodes)\n\n return list(set(allNodes))", "def get_nodes_on_level(self, level):\n nodes = [self.root]\n for i in range(level):\n children_nodes = []\n while nodes:\n node = nodes[0]\n children_nodes.append(node.left if node else None)\n children_nodes.append(node.right if node else None)\n nodes.remove(node)\n nodes = children_nodes\n return nodes", "def get_tree(self):\n tn, td = self.__get_graph(self.path)\n tree = [[os.sep, -1, os.sep]]\n tree.extend(self.__get_node(0, tn, td, os.sep))\n return tree", "def list_all(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be iterated\")\n return list()\n\n nodes = list()\n for node in self.graph.nodes():\n if node == self.NONE_PACKAGE:\n continue\n nodes.append(node)\n return nodes", "def list_nodes(self):\n\n return list(\n dict(\n self._from_json(self.manage.run(override=\"list-nodes\"))\n ).keys()\n )", "def get_nodes(self):\n return_set = set()\n for value in self._name:\n return_set.add(value)\n return return_set", "def nodes( self, data=False):\n return self.node.items() if data else self.node.keys()", "def get_children(self):\n return []", "def nodes(self):\n return self.source_net.nodes()", "def iter_all(self):\n for i in range(self.num_nodes):\n self.iter_node(i)", "def subtrees(self):\n yield from subtrees(self)", "def get_all_children(self):\n return tuple(self.children)", "def roots(self):\n return {\n nodeObj for nodeId, nodeObj\n in self.nodes.items()\n if nodeId not in self.childNodes}", "def nodes_iter(self) -> Generator:\n for n in self.graph.nodes(data=True):\n yield n", "def all_nodes_as_iterable(self, include_metadata: bool = False) -> Generator:\n if include_metadata:\n return [\n (self._names.get_name(i), self._meta.get_node(self._names.get_name(i)))\n for i in self._nk_graph.iterNodes()\n ]\n return [self._names.get_name(i) for i in self._nk_graph.iterNodes()]", "def traverse(self):\n return self.root.traverse()", "def in_order(self):\n try:\n if not self.root:\n return \"the tree is empty!\"\n else:\n output = []\n\n def order_tree(node):\n if node.left:\n order_tree(node.left)\n nonlocal output\n output += [node.value]\n if node.right:\n order_tree(node.right)\n return output\n return order_tree(self.root)\n except:\n print(\"something went wrong please try again\")", "def nodeset(self):\n return self._nodeset", "def get_roots(self, connection=None):\n\n connection = connection or self.engine.connect()\n\n return connection.execute(\n select(\n [self.nodes.c.title, self.nodes.c.id.label('descendant')]\n ).where(\n self.nodes.c.id.notin_(\n select([self.paths.c.descendant]).where(self.paths.c.depth > 0)\n )\n )\n )" ]
[ "0.8067358", "0.805089", "0.80140465", "0.7930633", "0.7844008", "0.7699735", "0.766375", "0.7651623", "0.7554823", "0.74022615", "0.73905694", "0.73905694", "0.72834", "0.7274381", "0.7274381", "0.7274381", "0.72736555", "0.7273303", "0.7249022", "0.7241052", "0.7209985", "0.72043943", "0.7185233", "0.7167908", "0.713096", "0.7124315", "0.71182954", "0.706873", "0.70681727", "0.70666975", "0.70385456", "0.7030591", "0.69656587", "0.6965533", "0.6965533", "0.69611186", "0.6944809", "0.69428104", "0.69273263", "0.69209003", "0.69171184", "0.6912675", "0.69079435", "0.6903806", "0.6891795", "0.6882336", "0.6878817", "0.6866486", "0.6813663", "0.68111587", "0.67879695", "0.6787517", "0.6783607", "0.67762387", "0.67478454", "0.673479", "0.67346567", "0.6717018", "0.66970634", "0.6685898", "0.6668053", "0.6626671", "0.6618331", "0.6615139", "0.6597248", "0.6596446", "0.6591846", "0.6589729", "0.6582381", "0.6581269", "0.65589315", "0.6533974", "0.6531116", "0.65272254", "0.651893", "0.65180707", "0.6517785", "0.65116715", "0.6506096", "0.6490039", "0.6478454", "0.6471631", "0.64553964", "0.64543295", "0.6452144", "0.64450616", "0.64321125", "0.64112926", "0.6408266", "0.6387287", "0.63834053", "0.6375618", "0.63640314", "0.6363435", "0.6352276", "0.63445634", "0.63423115", "0.6335653", "0.6328312", "0.63273734" ]
0.7894537
4
Pretty prints the tree
def pprint(self,indent=0,node=None): if node == None: node = self.root if node == None: print_indent(indent) print "[empty tree]" return if node.type == 'v': print_indent(indent) print node.value elif node.type == 's': for (val,c) in node.children.iteritems(): print_indent(indent) print "-",self.keys[node.feature],"=",val,":" self.pprint(indent+1,c) elif node.type == 'i': print_indent(indent) print self.keys[node.feature],"<=",node.value,":" self.pprint(indent+1,node.children[0]) print_indent(indent) print self.keys[node.feature],">",node.value,":" self.pprint(indent+1,node.children[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_tree(self):\n\t\tprint(self.__print_tree('', True, ''))", "def pprint(tree):\n p = PrettyPrinter(indent=2)\n p.pprint(tree)", "def print_tree(self):\n return \"\"", "def printTree(self):\n print(printTreeF(self, 0, self))", "def pretty_print(self):\n return self.tree.pretty_print()", "def pretty_print(self,depth=0):\n\t\tfor i in range(depth):\n\t\t\tprint \"\\t\",\n\t\t\t\t\n\t\tprint self.__str__()\n\t\t\n\t\tfor c in self.tree.children:\n\t\t\tc.viz.pretty_print(depth+1)", "def print_tree(self):\n\t\tself.root.print_recursive(0)", "def pretty_print(self):\n print(self.root)\n self.__pretty_print__(self.root, depth=1)", "def print_tree(self, parser=None):\n for pre, _, node in RenderTree(self):\n print(pre + node._self_string(parser))", "def printTree(self):\n\t\tprint 'Tree:'\n\t\tprint self.root.toString(0)\n\t\tprint '\\n'", "def print_tree(node):\n print tree(node)", "def print_tree(self):\n print(_Node.__print_tree(self))", "def print_tree(self):\n out = \"\"\n for i in range(self.level):\n out += ' |'\n out += '___'\n out += str(self.action)\n if self.action is None:\n print \"None\"\n else:\n print out\n for child in self.children:\n child.print_tree()", "def print_tree(tree, indent=0):\n for c in tree.children:\n print \" \" * indent, \"-->\", c.name\n \n if c.children != []:\n print_tree(c, indent+1)", "def print_tree(self):\n stack = [(self.root, 0, 0)] # (node, child no., tabs)\n ntabs = 0\n while len(stack):\n n, i, tabs = stack.pop()\n if len(n.branch):\n if i>=1 and i==len(n.children)-1:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': >' + str(n.branch[i-1]))\n else:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': <=' + str(n.branch[i]))\n stack.append((n, i+1, tabs))\n if i<len(n.children):\n stack.append((n.children[i], 0, tabs+1))\n else:\n avg = np.dot(n.probabilities[:,0], n.probabilities[:,1])\n print(tabs*'\\t' + 'Label: ' + str(avg) + '\\n')", "def print_tree(self, prefix=\"\"):\n print(\"%s%s\" % (prefix, self.node_label()))\n if self.left:\n self.left.print_tree(prefix + \" \")\n if self.right:\n self.right.print_tree(prefix + \" \")", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def print_tree(self):\n self.__print_node(self.root, 0)", "def print_tree(self):\n return self.preorder_print(self.root, \"\")[:-1]", "def print_tree(self):\n return self.preorder_print(self.root, \"\")[:-1]", "def recursifTreePrinter(tree,indent):\n listOfBranches = tree.GetListOfBranches()\n if len(listOfBranches) > 0: # Width informations\n maxCharName = max([len(branch.GetName()) \\\n for branch in listOfBranches])\n maxCharTitle = max([len(branch.GetTitle()) \\\n for branch in listOfBranches])\n dic = { \\\n \"nameWidth\":maxCharName+2, \\\n \"titleWidth\":maxCharTitle+4, \\\n \"memoryWidth\":1}\n for branch in listOfBranches: # Print loop\n rec = \\\n [branch.GetName(), \\\n \"\\\"\"+branch.GetTitle()+\"\\\"\", \\\n str(branch.GetTotBytes())]\n write(TREE_TEMPLATE.format(*rec,**dic),indent,end=\"\\n\")\n recursifTreePrinter(branch,indent+2)", "def pprint_nodes(subtrees):\n def indent(s,type=1):\n x = s.split(\"\\n\")\n r = \"+-%s\\n\"%x[0]\n for a in x[1:]:\n if a==\"\": continue\n if type==1:\n r += \"| %s\\n\"%a\n else:\n r += \" %s\\n\"%a\n return r\n if len(subtrees)==0: return \"\"\n f=\"\";\n for a in subtrees[:-1]:\n f += indent(a)\n f += indent(subtrees[-1],2)\n return f", "def print_tree(self):\n recur_print = self.recur_print(tree.root, '')[:-1]\n return recur_print", "def print_tree(t, indent=0):\n print(' ' * indent + str(t.root))\n for b in t.branches:\n print_tree(b, indent + 1)", "def print_tree(tree, indent=''):\n\n for branch in tree:\n if type(branch) == list and branch != []:\n print_tree(branch, indent + ' ')\n else:\n if branch != []:\n print(indent + str(branch))", "def print_tree(self, tree, nodes):\n\t\tprint(self.display(tree, nodes, '', True, ''))", "def printTree(self):\n if self.left:\n self.left.printTree()\n print(self.data)\n if self.right:\n self.right.printTree()", "def printTree(self):\r\n print(self.letter)\r\n if self.left:\r\n self.left.printTree()\r\n if self.right:\r\n self.right.printTree()", "def print_tree(tree, depth=0):\n print('+','--'*depth,tree[0])\n if isinstance(tree[1], str):\n print('|',' '*depth,'->',tree[1])\n return\n if isinstance(tree[1],Terminal):\n print('|',' '*depth,'->',repr(tree[1]))\n return\n for subtree in tree[1]:\n print_tree(subtree, depth+1)", "def print_tree(t, indent=0):\n print(' ' * indent + str(label(t)))\n for b in branches(t):\n print_tree(b, indent + 1)", "def print_tree(tree):\n if not tree:\n print None\n return\n \n if tree.children:\n print 'Directory hash = {}'.format(base64.urlsafe_b64encode(tree.dmt_hash))\n print 'Contents:'\n for name, subtree in tree.children.iteritems():\n print\n print name\n print_tree(subtree)\n \n else:\n print 'File hash = {}'.format(base64.urlsafe_b64encode(tree.dmt_hash))", "def tree_print(clf, X):\n tlevel = _tree_rprint('', clf, X.columns, clf.classes_)\n print('<',end='')\n for i in range(3*tlevel - 2):\n print('-',end='')\n print('>')\n print('Tree Depth: ',tlevel)", "def print_tree(t, indent=0, end='\\n'):\n if isinstance(t, Leaf):\n print(t, end='')\n else:\n s = '(' + t.tag + ' '\n indent += len(s)\n print(s, end='')\n print_tree(t.branches[0], indent, '')\n for b in t.branches[1:]:\n print('\\n' + ' '*indent, end='')\n print_tree(b, indent, '')\n print(')', end=end)", "def pretty_print(tree, depth=0):\r\n if depth == 0:\r\n print('TREE')\r\n\r\n for index, split_criterion in enumerate(tree):\r\n sub_trees = tree[split_criterion]\r\n\r\n # Print the current node: split criterion\r\n print('|\\t' * depth, end='')\r\n print('+-- [SPLIT: x{0} = {1} {2}]'.format(split_criterion[0], split_criterion[1], split_criterion[2]))\r\n\r\n # Print the children\r\n if type(sub_trees) is dict:\r\n pretty_print(sub_trees, depth + 1)\r\n else:\r\n print('|\\t' * (depth + 1), end='')\r\n print('+-- [LABEL = {0}]'.format(sub_trees))", "def print_tree(t):\r\n if (t==None):\r\n return \r\n else:\r\n print_tree(left(t))\r\n print(value(t),end=\" \")\r\n print_tree(right(t))", "def print_tree(tree, pref=\"\"):\r\n leaf = \"|_____> \"\r\n top = \"|_______\"\r\n son1 = \"| \"\r\n son2 = \" \"\r\n width = len(top)\r\n\r\n a = \"\"\r\n if len(tree) == 3:\r\n if (pref == \"\"):\r\n a += pref + str(tree[0]) + \"\\n\"\r\n else:\r\n a += pref[:-width] + top + str(tree[0]) + \"\\n\"\r\n a += print_tree(tree[1], pref + son1)\r\n a += print_tree(tree[2], pref + son2)\r\n return a\r\n\r\n else:\r\n return (pref[:-width] + leaf + str(tree) + \"\\n\")", "def display_tree(self, tree_node, spacing=\"\"):\n if tree_node is None:\n return\n else:\n print(spacing + str(tree_node.val))\n spacing += \" \"\n self.display_tree(tree_node.left, spacing)\n self.display_tree(tree_node.right, spacing)", "def print_tree(node):\r\n if node is None:\r\n return\r\n print_tree(node.left)\r\n print node.key\r\n print_tree(node.right)", "def printTree(self, tree, str):\n\n\t\tif type(tree) == dict:\n\t\t\tfor item in list(tree.values())[0].keys():\n\t\t\t\t\tprint(\"%s %s = %s \" % (str, list(tree.keys())[0], item))\n\t\t\t\t\tself.printTree(list(tree.values())[0][item], str + \"\\t\")\n\t\telse:\n\t\t\tprint(\"%s -> %s = %s\" % (str, self.targetName, tree))", "def tree(self, depth_index=0):\r\n print(self.tree_str(depth_index))", "def _print_tree(self, tree, current_depth=0):\n if 'surv' in tree:\n self._print_with_depth(tree['times'], current_depth)\n return\n self._print_with_depth(\n \"{0} > {1}\".format(self.column_names[tree['feature']],\n tree['threshold']),\n current_depth)\n self._print_tree(tree['left'], current_depth + 1)\n self._print_tree(tree['right'], current_depth + 1)", "def __repr__(self):\n return show_tree(self, lambda node: node.name,\n lambda node: node.children)", "def print_tree(tree, indent=0, use_symbols=False):\n if use_symbols:\n if indent == 0:\n print_tree_symbol(tree, indent)\n indent += 1\n\n for c in tree.children:\n print_tree_symbol(c, indent)\n\n try:\n if c.children:\n print_tree(c, indent + 1, use_symbols)\n except:\n pass\n else:\n for c in tree.children:\n print(\" \" * indent, \"-->\", c.name)\n\n try:\n if c.children:\n print_tree(c, indent + 1)\n except:\n pass", "def pprint(self, indent: str = \"\"):\n\n from os import linesep\n\n res = self.__str__() + linesep\n child_indent = f\"{indent} \"\n\n pos = -1\n for x in self.children:\n pos += 1\n if pos == len(self.children) - 1:\n res += f\"{child_indent}└── {x.pprint(child_indent)}\"\n else:\n res += f\"{child_indent}├── {x.pprint(child_indent)}\"\n return res", "def print_tree(root):\n queue = [(root, [\"1\"])]\n while queue:\n this, depth = queue.pop(0)\n if isinstance(this, int):\n reprr = \"L %i\" % this\n else:\n reprr = str(this.attribute)\n for key, child in this.children.items():\n queue.append((child, depth + [\"%s\" % key]))\n print \"%s: %s\" % (\".\".join(depth), reprr)", "def format_asciitree(self):\n import asciitree\n\n def child_iter(tree):\n return tree.subtrees()\n\n def text_str(tree):\n return ' %s%s %s' % (tree.label, tree.label_suffix,\n tree.token or '')\n return asciitree.draw_tree(self, child_iter=child_iter,\n text_str=text_str)", "def pformat(self, tree):\n return str(self.to_tree_text_block(tree))", "def prettify(tree, indent=0):\n for key, value in six.iteritems(tree):\n if key == FILE_MARKER:\n if value:\n print((' ' * indent + str(value)))\n else:\n print((' ' * indent + str(key)))\n if isinstance(value, dict):\n prettify(value, indent+1)\n else:\n print((' ' * (indent+1) + str(value)))", "def pretty_tree(x, kids, show):\n (MID, END, CONT, LAST, ROOT) = (\"|-- \", \"`-- \", \"| \", \" \", \"\")\n\n def rec(obj, indent, sym):\n line = indent + sym + show(obj)\n obj_kids = kids(obj)\n if len(obj_kids) == 0:\n return line\n else:\n if sym == MID:\n next_indent = indent + CONT\n elif sym == ROOT:\n next_indent = indent + ROOT\n else:\n next_indent = indent + LAST\n chars = [MID] * (len(obj_kids) - 1) + [END]\n lines = [rec(kid, next_indent, sym) for kid, sym in zip(obj_kids, chars)]\n return \"\\n\".join([line] + lines)\n\n return rec(x, \"\", ROOT)", "def print_tree(self, use_short_ids=True):\r\n def short_id(node):\r\n return node.short_id\r\n def id(node):\r\n return node.data.id\r\n\r\n node_fn = short_id if use_short_ids else id\r\n self._logger.debug(\"deps = {\")\r\n for node in self.nodes:\r\n self._logger.debug(\r\n \"\"\" \"%s\": {\"num\": %d, \"children\": [%s]},\"\"\" % (\r\n node_fn(node),\r\n node.data.num_sources,\r\n ','.join(['\"%s\"' % node_fn(child) for child in node.children]))\r\n )\r\n self._logger.debug('}')\r\n self._logger.debug('')", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def format_tree(text):\n return \"\"\"{}\n {}\n\n\"\"\".format(\n COMMENT, textwrap.indent(text, \" \" * 4).strip()\n )", "def print_tree(node, val='', tabs=0):\n align = get_tabs(tabs)\n if isinstance(node, Leaf):\n print(align + str(val))\n print(get_tabs(tabs), str(node))\n return\n print(align + str(val))\n print(align + str(node))\n print_tree(node.true_branch, True, tabs + 1)\n print_tree(node.false_branch, False, tabs + 1)", "def __repr__(self):\n return self.displayTree(0)", "def print_recursive(self, indents):\n\n\t\tind = \"\\t\"\n\t\toutput = indents * ind + self.name\n\t\tprint(output)\n\t\tfor i in self.children:\n\t\t\ti.print_recursive(indents+1)", "def print_tree( root, level ):\n\n if not root.isleaf():\n print level*\"==\" + \"==> \", str(root), \"pointers\", len(root.pointers)\n for p in root.pointers:\n print_tree ( p, level+1 )\n else:\n print level*\"==\" + \"==> \", \n for t in root.tuples:\n print str(t), \n print \"\"", "def visualize_tree(root):\n _visualize_tree(root, [], 0, '-')", "def print_phpsyntax_tree(tree):\n import queue\n q = queue.Queue()\n indent = 10\n\n q.put(tree)\n while q.not_empty:\n item = q.get()\n s = \"\"\n for c in item.children:\n s = s + \" \" * indent + c.name\n q.put(c)\n print(s)\n indent -= 1", "def draw_tree(self):\n\n print \"--- \" + str(self.name)\n \n def draw_child_tree(current, depth):\n \n for c in current.children:\n print depth * \" \" + \"|-- \" + str(c.name)\n if hasattr(c, 'children'):\n draw_child_tree(c, depth + 1)\n \n draw_child_tree(self, 1)\n \n return", "def dump_tree(self) -> str:\n return utils.dump_tree(self._tree)", "def _print_structure(self):\n if self._isthisapropertree() is False:\n print(\"ERROR: this is not a proper Binary Search Tree. ++++++++++\")\n outstr = str(self._element) + \" (hgt=\" + str(self._height) + \")[\"\n if self._leftchild is not None:\n outstr = outstr + \"left: \" + str(self._leftchild._element)\n else:\n outstr = outstr + \"left: *\"\n if self._rightchild is not None:\n outstr += \"; right: \" + str(self._rightchild._element) + \"]\"\n else:\n outstr = outstr + \"; right: *]\"\n if self._parent is not None:\n outstr = outstr + \" -- parent: \" + str(self._parent._element)\n else:\n outstr = outstr + \" -- parent: *\"\n print(outstr)\n if self._leftchild is not None:\n self._leftchild._print_structure()\n if self._rightchild is not None:\n self._rightchild._print_structure()", "def print_tree(self,root_key='',offset=''):\n itm = self._root\n if root_key:\n itm = self.get_data(root_key)\n tstr = os.linesep \n try: #if isinstance(itm,dict):\n for k in itm.keys():\n x_str = self.print_tree(root_key+'.'+k,offset+' ')\n tstr = tstr+offset+'{}: {}'.format(k,x_str)+os.linesep\n except:\n try: #elif isinstance(itm,list):\n for i,x in enumerate(itm):\n x_str = self.print_tree(root_key+'.'+str(i),offset+' ')\n tstr = tstr+offset+'{}: {}'.format(i,x_str)+os.linesep\n except:\n return '{}'.format(itm)\n return tstr", "def printTree(self):\n print self.storeTree.movies", "def display(self, tree, level = 0):\n\t\tresult = \"\"\n\t\tfor name, node in tree.soon:\n\t\t\tresult += \" \"*level+repr(node)+\"\\n\"\n\t\t\tresult += self.display(tree.getSoon(name),level + 1)\n\t\treturn result", "def tree():\n nobv.visual_tree()", "def print_node_tree(node, level=0):\n str_builder = []\n if node:\n str_builder.append(print_node_tree(node.right, level + 1))\n str_builder.append(\"| \" * level)\n str_builder.append(\n ''.join([str(node.value), \" - \", str(node.level), \"\\n\"]))\n str_builder.append(print_node_tree(node.left, level + 1))\n return ''.join(str_builder)", "def __str__(self):\n\t\tself._synchronize_attributes()\n\t\ts = \"\"\n\t\tqueue = c3.Queue()\n\t\tlevel = 0\n\t\tqueue.enqueue((1, self._root))\n\t\twhile queue.peek():\n\t\t\tnodelev, node = queue.dequeue()._data\n\t\t\tif (not node):\n\n\t\t\t\t#NODE IS NOT THERE - just a placeholder\n\t\t\t\t#print spacing and enqueue fake left and right children\n\t\t\t\t#but stops if they would be past the max depth of the tree\n\t\t\t\tif ((self._depth - nodelev + 1) <= 0):\n\t\t\t\t\tcontinue\n\n\t\t\t\tif (nodelev != level):\n\t\t\t\t\ts += \"\\n\"\n\t\t\t\t\t#PRINT THE INDENT\n\t\t\t\t\tindent = \" \"*int((self._max_chars)*(2**(self._depth - nodelev) - 1))\n\t\t\t\t\ts += indent\n\t\t\t\t\tlevel = nodelev\n\n\t\t\t\t#PRINT THE SPACING\n\t\t\t\ts += \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\n\t\t\t\t#PRINT SPACES TO REPLACE DATA\n\t\t\t\ts += \" \"*self._max_chars\n\n\t\t\t\t#Enqueue fake children\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tcontinue\n\n\t\t\tif (nodelev != level):\n\t\t\t\ts += \"\\n\"\n\t\t\t\t#PRINT THE INDENT\n\t\t\t\tindent = \" \"*(self._max_chars)*(2**(self._depth - nodelev) - 1)\n\t\t\t\ts += indent\n\t\t\t\tlevel = nodelev\n\n\t\t\t#adds preceding \"|\"s if the str length of the data is smaller than the max\n\t\t\tfor i in range(int(self._max_chars - len(str(node.value())))):\n\t\t\t\ts += \"|\"\n\t\t\ts += str(node.value()) \n\n\t\t\t#PRINT THE SPACING\n\t\t\tspacing = \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\t\t\ts += spacing\n\n\t\t\t#Enqueues\n\t\t\tif node.lchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.lchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\tif node.rchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.rchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\ts += \"\\n\"\n\t\treturn s", "def tree(node):\n subtrees = []\n for arg in node.args:\n subtrees.append(tree(arg))\n s = print_node(node)+pprint_nodes(subtrees)\n return s", "def printTree(rootNode, level = 0):\n \n if rootNode:\n print(\" \" * level, rootNode.split, \"CLASS:\", rootNode.data)\n printTree(rootNode.lesser, level + 3)\n printTree(rootNode.greater, level + 3)", "def treeview(data,style='unicode'):\n\t# note that dumping to YAML is a great alternative\n\tif style=='unicode': \n\t\t# protect against TeeMultiplexer here because it cannot print unicode to the log file\n\t\tdo_swap_stdout = sys.stdout.__class__.__name__=='TeeMultiplexer'\n\t\tdo_swap_stderr = sys.stderr.__class__.__name__=='TeeMultiplexer'\n\t\tif do_swap_stdout: \n\t\t\thold_stdout = sys.stdout\n\t\t\t#! assume fd1 is the original stream\n\t\t\tsys.stdout = sys.stdout.fd1\n\t\tif do_swap_stderr: \n\t\t\thold_stderr = sys.stderr\n\t\t\t#! assume fd1 is the original stream\n\t\t\tsys.stderr = sys.stderr.fd1\n\t\t# show the tree here\n\t\tasciitree(data)\n\t\t# swap back\n\t\tif do_swap_stderr: sys.stderr = hold_stderr\n\t\tif do_swap_stdout: sys.stdout = hold_stdout\n\telif style=='json': return print(json.dumps(data))\n\telif style=='pprint': \n\t\timport pprint\n\t\treturn pprint.pprint(data)\n\telse: raise Exception('invalid style %s'%style)", "def tree(ctx):\n root_cmd = _build_command_tree(ctx.find_root().command)\n _print_tree(root_cmd)", "def repr_tree(self, node: Node, indent: int) -> str:\n out = \"// \"\n if indent != 0:\n # not First\n out += ' ' * indent + '-> '\n\n out += node.name\n\n if not isinstance(node, Device):\n # still more nodes exist under this node\n for ds in node.ds:\n out += '\\n'\n out += self.repr_tree(ds.ds, indent + 2)\n\n return out", "def print_tree(node, depth=1):\n for child in node:\n print(\" \" * depth + child.get_name())\n print_tree(child, depth+1)", "def print_tree(self, root=None, level=0):\n if not root:\n root = self.root\n\n for lv in range(level):\n print ' ',\n try:\n print root.get_properties()\n except Exception as ex:\n print 'data: %s' % repr(root.get_properties())\n\n query = 'START s=node(%s)\\n' % root._id +\\\n 'MATCH (s)-[r]->(c)\\n' + \\\n 'RETURN c'\n records = neo4j.CypherQuery(self.db_handler, query).execute()\n\n nodes = [record.values[0] for record in records.data]\n for node in nodes:\n self.print_tree(node, level + 1)", "def print_tree(account, level=0):\r\n \"\"\" In the example output below, \"GE\" is the root account, \"Jet Engines\"\r\n and \"Appliances\" are first-degree ChildAccounts, and \"DoD Contracts\"\r\n and \"Washing Machines\" are second-degree ChildAccounts.\r\n\r\n > print_tree(general_electric)\r\n GE (Manufacturing, R&D): Daniel Testperson\r\n Jet Engines (Manufacturing, R&D, Aerospace): Daniel Testperson\r\n DoD Contracts (Defense, R&D, Aerospace): William Testperson\r\n Appliances (Manufacturing, Consumer Goods): Janet Testperson\r\n Washing Machines (Consumer Goods): Janet Testperson\r\n \"\"\"\r\n markets_output = \"\"\r\n # work a little magic to properly format the names of the market segments\r\n # specifically strip off the leading and trailing quotes and add a\r\n # separating comma\r\n for market in account.get_market_segments():\r\n markets_output += market.name.strip(\"\\'\") + \", \"\r\n markets_output = markets_output.strip(\"\\'\")\r\n\r\n # print a row to console\r\n print(\"{arrow}> {ac_name} ({markets}): {rep}\"\r\n .format(arrow=2*level*\"-\",\r\n ac_name=account.name,\r\n markets=markets_output[:-2],\r\n rep=account.get_sales_rep()))\r\n\r\n # recursively call print on the children (if any) Base Case: no children\r\n for child in account.get_children():\r\n print_tree(child, level=level+1)", "def __repr__(self, depth=0):\n indent = 2 * self.depth * ' '\n if self.is_leaf:\n return indent + \"Leaf({}, {}, {})\".format(self.centre, self.size, self.points)\n else:\n s = indent + \"Node({}, {}, [\\n\".format(self.centre, self.size)\n for child in self.children:\n s += child.__repr__(depth + 1) + ',\\n'\n s += indent + '])'\n return s", "def print_bi_tree(self):\n\n to_print = [self]\n # current = None\n\n while to_print:\n current = to_print.pop(0)\n if current:\n print(f'\\t{current.data}')\n to_print.append(current.left)\n to_print.append(current.right)", "def __repr__(self):\n lines = []\n nodes = [(self.root, 0)]\n while nodes:\n node, indent = nodes.pop()\n name = str(node) if node else 'None'\n lines.append(' ' * indent + name)\n if node:\n nodes.append((node.child[True], indent + 1))\n nodes.append((node.child[False], indent + 1))\n\n return os.linesep.join(lines)", "def __str__(self) -> str:\n\n if not self.root:\n return 'Empty RB Tree'\n\n root, bfs_queue, height = self.root, queue.SimpleQueue(), self.root.subtree_height()\n track = {i: [] for i in range(height + 1)}\n bfs_queue.put((root, 0, root.parent))\n\n while bfs_queue:\n n = bfs_queue.get()\n if n[1] > height:\n break\n track[n[1]].append(n)\n if n[0] is None:\n bfs_queue.put((None, n[1] + 1, None))\n bfs_queue.put((None, n[1] + 1, None))\n continue\n bfs_queue.put((None, n[1] + 1, None) if not n[0].left else (n[0].left, n[1] + 1, n[0]))\n bfs_queue.put((None, n[1] + 1, None) if not n[0].right else (n[0].right, n[1] + 1, n[0]))\n\n spaces = 12 * (2 ** (height))\n ans = '\\n' + '\\t\\tVisual Level Order Traversal of RBtree'.center(spaces) + '\\n\\n'\n for i in range(height):\n ans += f\"Level {i + 1}: \"\n for n in track[i]:\n space = int(round(spaces / (2 ** i)))\n if not n[0]:\n ans += ' ' * space\n continue\n ans += \"{} ({})\".format(n[0], n[2].value if n[2] else None).center(space, \" \")\n ans += '\\n'\n return ans", "def print(self):\n dot = \"digraph G {\\nrankdir = UD\\n\"\n\n for i in range(len(self.allNodes)):\n if self.allNodes[i].left is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].left.key) + \"\\n\"\n if self.allNodes[i].right is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].right.key) + \"\\n\"\n\n dot += \"}\"\n\n file = open(\"outputfiles/BinTree.dot\", \"w\")\n file.write(dot)\n file.close()\n\n os.system(\"dot outputfiles/BinTree.dot -Tpng -o outputfiles/BinTree.png\")", "def print_tree(self, feature_names, class_names, show_details=True):\n self.tree_.print_tree(feature_names, class_names, show_details)", "def print_tree(self):\n tree_list = self.traverse_tree(self.root, 0, [])\n depth = self.get_depth(self.root, 0)\n\n for i in range(depth - 1):\n for j in range(len(tree_list[i])):\n if tree_list[i][j] is None:\n tree_list[i + 1].insert(2 * j, None)\n tree_list[i + 1].insert(2 * j + 1, None)\n\n tree_matrix = [['|' for i in range(2 ** depth - 1)] for j in range(depth)]\n for i in range(depth):\n for j in range(len(tree_list[i])):\n if tree_list[i][j] is not None:\n tree_matrix[i][2 ** (depth - i - 1) - 1 + j * 2 ** (depth - i)] = tree_list[i][j]\n return tree_matrix", "def show_tree(self):\n G, vertex_dict = self.tree().graph()\n root = self.tree().root()\n vertical_list = []\n horizontal_list = []\n no_component_list = []\n for i, xi in vertex_dict.items():\n if xi.is_equal(root):\n root_index = i\n if self.is_component(xi):\n if xi.type() == \"II\":\n vertical_list.append(i)\n else:\n horizontal_list.append(i)\n print(i, \": \", xi)\n else:\n no_component_list.append(i)\n vertex_colors = {'red': vertical_list, 'blue': horizontal_list,\n 'grey': no_component_list}\n G.show(vertex_colors=vertex_colors, tree_root=root_index, layout='tree')", "def recursive_print(root: Node, depth=0):\n if not root:\n return\n print(\n (\" \" * depth)\n + f\"({root.resource.order}, exec={root.resource.execution_ms:.3f}, \"\n + f\"ttfb={root.resource.time_to_first_byte_ms}, delay={root.resource.fetch_delay_ms:.3f}, \"\n + f\"size={root.resource.size} B, {ResourceType(root.resource.type).name}, {root.resource.url})\"\n )\n for next_node in root.children:\n recursive_print(next_node, depth + 1)", "def view_tree(expr):\n print (srepr(expr))", "def show_tree(obj,d=0):\n print \"%s%s\" % (\"-\"*d,obj.__class__.__name__)\n if 'get_children' in dir(obj):\n for a in obj.get_children(): show_tree(a,d+1)", "def print_tree(self, paths=None, color=True):\n if not paths:\n paths = ('/', )\n is_first = True\n ns_color = COLOR_YELLOW if color else ''\n end_color = COLOR_NORM if color else ''\n for root_mount_id, namespaces in sorted(self.ns_for_root_id.items()):\n if is_first:\n is_first = False\n else:\n print(\"\")\n assert namespaces\n if len(namespaces) >= 2:\n root_ns = self.items[root_mount_id].mount_ns\n print(\"Namespaces {0}{2}{1} starting with {0}{3}{1}\".format(\n ns_color,\n end_color,\n ', '.join(sorted((format_ns(ns) for ns in namespaces))),\n self.format_ns_with_processes(root_ns),\n ))\n else:\n root_ns = list(namespaces)[0]\n print(\"{0}Namespace {2}{1}\".format(\n ns_color,\n end_color,\n self.format_ns_with_processes(root_ns),\n ))\n self.print_tree_entry(root_mount_id, root_ns, paths, color)", "def printTree(tree, fromNode=None, printDirectory = False):\r\n if fromNode == None:\r\n fromNode = tree.root\r\n print fromNode.name\r\n tree.printChildrenOfNode(fromNode, printDirectory)", "def print_tree(node, spacing=\"\"):\n\n # Base case: we've reached a leaf\n if isinstance(node, Leaf):\n print (spacing + \"Predict\", node.predictions)\n return\n\n # Print the question at this node\n print(spacing + node.question.text())\n\n # Call this function recursively on the true branch\n print (spacing + '--> True:')\n print_tree(node.trueBranch, spacing + \" \")\n\n # Call this function recursively on the false branch\n print (spacing + '--> False:')\n print_tree(node.falseBranch, spacing + \" \")", "def debug_node(fdt, node, depth, path):\n depth += 1\n path = path + node.get_name() + b'/'\n print()\n print(colored(\"Tree:\", 'cyan'), \"-> \", colored(path.decode('ascii'), 'green'), '{')\n for key in node.keys():\n print(colored(\"Node:\", 'cyan'), \"-> \", \" \" * depth, key, \"=\", colored(node[key], 'yellow'))\n for leaf in node.get_children():\n debug_node(fdt, leaf, depth, path)\n print(colored(\"Tree:\", 'cyan'), \"-> \", \" \" * depth, \"};\")", "def tree_string(self, indent=0): # pragma: no cover\r\n return \"\"", "def display_viz(self, width=60, label_max_len=3):\n output = ''\n last_children = [(self, width)] # Nodes to be added next loop\n for i in range(self.depth + 1):\n depth_output = ''\n depth_children = []\n for (node, subtree_width) in last_children:\n label = ' ' if node is None else str(node.label)[:label_max_len]\n this_output = label.center(subtree_width)\n this_children = [] # Children from this item\n cum_width = 0 # Cumulative character-width of all subtrees\n cum_cols = 0 # Cumulative maximum node-width of all subtrees\n # If no children, propogate the empty spaces below terminal\n if not node or not node.children:\n cum_cols += 1\n cum_width += subtree_width\n this_children.append((None, subtree_width))\n # If children, fill-in this_output with '_' to first/last child label\n else:\n children_cols = [c.n_cols for c in node.children]\n total_cols = sum(children_cols)\n for child, child_cols in zip(node.children, children_cols):\n # Convert each child's 'cols' into character spacing\n cum_cols += child_cols\n cum_ratio = cum_cols / total_cols\n target_width = math.ceil(cum_ratio * subtree_width) - cum_width\n remaining_width = subtree_width - cum_width\n child_width = min(target_width, remaining_width)\n # Add record and update tracked values\n this_children.append((child, child_width))\n cum_width += child_width\n # Add lines to the output\n start_padding = this_children[0][1] // 2 - 1 # Midpoint of first child\n end_padding = subtree_width - (this_children[-1][1] // 2) # ..of last child\n with_line = ''\n for i, v in enumerate(this_output):\n with_line += '_' if (i > start_padding and i < end_padding and v == ' ') else v\n this_output = with_line\n depth_output += this_output\n depth_children += this_children\n last_children = depth_children\n if last_children:\n depth_output += '\\n'\n output += depth_output\n return output", "def printSubmissionTree(self, padding=\"\"):\r\n print padding + \"Author: %s\" %self.authorId\r\n print padding + \"Content: %s\" %self.content\r\n\r\n if self.comments:\r\n print padding + \"Children:\" \r\n for c in self.comments:\r\n c.printSubmissionTree(padding+\" \")", "def repr_tree(tree, viz, current_node, rec_depth, color_map, parameters):\r\n for child in tree.children:\r\n if child.operator is None:\r\n viz.attr('node', shape='box', fixedsize='true', width=\"2.5\",\r\n fontsize=\"8\")\r\n this_trans_id = str(uuid.uuid4())\r\n if child.label is None:\r\n viz.node(this_trans_id, \"tau\", style='filled', fillcolor='black')\r\n else:\r\n node_color = get_color(child, color_map)\r\n viz.node(this_trans_id, str(child), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, this_trans_id)\r\n else:\r\n condition_wo_operator = child.operator == pt_operator.Operator.XOR and len(\r\n child.children) == 1 and child.children[0].operator is None\r\n if condition_wo_operator:\r\n childchild = child.children[0]\r\n viz.attr('node', shape='box', fixedsize='true', width=\"2.5\",\r\n fontsize=\"8\")\r\n this_trans_id = str(uuid.uuid4())\r\n if childchild.label is None:\r\n viz.node(this_trans_id, str(childchild), style='filled', fillcolor='black')\r\n else:\r\n node_color = get_color(childchild, color_map)\r\n viz.node(this_trans_id, str(childchild), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, this_trans_id)\r\n else:\r\n viz.attr('node', shape='circle', fixedsize='true', width=\"0.6\",\r\n fontsize=\"14\")\r\n op_node_identifier = str(uuid.uuid4())\r\n node_color = get_color(child, color_map)\r\n viz.node(op_node_identifier, str(child.operator), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, op_node_identifier)\r\n viz = repr_tree(child, viz, op_node_identifier, rec_depth + 1, color_map, parameters)\r\n return viz", "def get_tree_string(self, node):\n string = \"\"\n for child in sorted(node.children):\n string += node.depth * \"\\t\"\n if node.depth > 0:\n string += \"|\"\n string += node.feature + \"=\" + child\n if node.children[child].is_leaf:\n string += \":\" + node.children[child].pred + \"\\n\"\n else:\n string += \"\\n\" + self.get_tree_string(node.children[child])\n\n return string", "def pretty_print(self):\n for dtr in self.dtrs:\n dtr.pretty_print(indent=2)", "def print_tree_symbol(c, indent=1):\n if isinstance(c, Selector):\n print(\" \" * indent, \"--?\"),\n elif isinstance(c, (Sequence, Iterator)):\n print(\" \" * indent, \"-->\"),\n elif isinstance(c, ParallelOne):\n print(\" \" * indent, \"==?\"),\n elif isinstance(c, ParallelAll):\n print(\" \" * indent, \"==>\"),\n elif isinstance(c, Loop):\n print(\" \" * indent, \"<->\"),\n elif isinstance(c, Invert):\n print(\" \" * indent, \"--!\"),\n else:\n print(\" \" * indent, \"--|\"),\n print(c.name)", "def show_as_tree(\n self,\n *,\n format_func: typing.Callable[[HierarchicalCategory], str] = str,\n maxdepth: typing.Union[None, int] = None,\n root: typing.Union[None, HierarchicalCategory, str] = None,\n ) -> str:\n if root is None:\n top_level_nodes = (node for node in self.values() if not node.parents)\n else:\n if not isinstance(root, HierarchicalCategory):\n root = self[root]\n top_level_nodes = [root]\n return \"\\n\".join(\n (\n self._show_subtree(\n node=top_level_node, format_func=format_func, maxdepth=maxdepth\n )\n )\n for top_level_node in top_level_nodes\n )", "def PrintTree(self,num=0):\n self.ResetTarget()\n self.PrintTreeInt(num)\n return self.target", "def showOrdered(self):\n pybtlib.showTree.restype = None\n pybtlib.showTree.argtypes = [ctypes.POINTER(Tree)]\n return pybtlib.showTree(ctypes.byref(self))" ]
[ "0.85880834", "0.8403361", "0.81731063", "0.81423867", "0.813626", "0.8090559", "0.8077962", "0.80575883", "0.8041075", "0.7977933", "0.79050046", "0.7841601", "0.78335243", "0.7790531", "0.77501893", "0.77443475", "0.7728508", "0.7723257", "0.7723172", "0.7723172", "0.7718285", "0.7685349", "0.7683175", "0.75958353", "0.7575059", "0.756061", "0.75220615", "0.75103784", "0.7479931", "0.74740845", "0.7465253", "0.74635726", "0.7423612", "0.74104935", "0.7392815", "0.7378553", "0.7368171", "0.734961", "0.73444146", "0.73161566", "0.73143387", "0.7304793", "0.73036194", "0.72951394", "0.72908473", "0.7254495", "0.72521526", "0.7202906", "0.718342", "0.71703875", "0.71375656", "0.71375585", "0.7136191", "0.7134874", "0.7120847", "0.7120291", "0.7105455", "0.71053815", "0.7080518", "0.7075252", "0.70632774", "0.705907", "0.70577174", "0.7043624", "0.7014542", "0.6998702", "0.6997184", "0.6990842", "0.69824344", "0.6977204", "0.6961617", "0.695439", "0.69388515", "0.6935289", "0.69135666", "0.69089556", "0.6877596", "0.6866419", "0.6841373", "0.6836037", "0.6818717", "0.68002725", "0.677576", "0.67741984", "0.67673004", "0.67547643", "0.67500514", "0.6735006", "0.6731092", "0.67285544", "0.672297", "0.66979057", "0.66942596", "0.6691293", "0.6689613", "0.6680884", "0.66768396", "0.6666033", "0.666566", "0.6663172" ]
0.7580208
24
Can overload this to choose different features
def feature_subset(self,node,db,labels,ids): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feature():\n pass", "def support(self):", "def feat():\n pass", "def feature(self):\n Feature(run=default_frame, flags=TE)\n Feature(run=load(\"window_functions.tests.rows_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_overflow\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_datetime\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_errors\", \"feature\"), flags=TE)", "def __init__(self, features=None):\n self.features = features", "def findFeatures(self):\n\t\tpass", "def __init__(self, features=None, **kwargs):\n super(FeatureIO, self).__init__(**kwargs)\n self.features = features", "def features(self, state, action, next_state):\n raise NotImplementedError", "def __init__(self, features, plot_design, feature_description_class):\n super().__init__(features, plot_design, feature_description_class)", "def get_features(self, request, **kwargs):\n raise NotImplementedError()", "def __call__(self, *args, **kwargs):\n self.features = dict((k, v()) for k, v in self.features.items())\n return self.features", "def check_supported_features(self):", "def __init__(self, **kwargs):\n FeatureDefinition.__init__(self)\n\n nbTypes = self._getTypeNumber(kwargs)\n \n print(\"BETTER FEATURES\")\n \n \n block_transformer = FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!\n (\"xywh\", Pipeline([\n ('selector', NodeTransformerXYWH_v2()),\n #v1 ('xywh', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling\n ('xywh', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling\n ])\n )\n , (\"neighbors\", Pipeline([\n ('selector', NodeTransformerNeighbors()),\n #v1 ('neighbors', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling\n ('neighbors', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling\n ])\n )\n , (\"1hot\", Pipeline([\n ('1hot', Node1HotFeatures()) #does the 1-hot encoding directly\n ])\n )\n ])\n grid_line_transformer = GridLine_NodeTransformer_v2()\n \n self._node_transformer = TransformerListByType([block_transformer, grid_line_transformer]) \n \n edge_BB_transformer = FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!\n (\"1hot\", Pipeline([\n ('1hot', Edge1HotFeatures(PageNumberSimpleSequenciality()))\n ])\n )\n , (\"boolean\", Pipeline([\n ('boolean', EdgeBooleanFeatures_v2())\n ])\n )\n , (\"numerical\", Pipeline([\n ('selector', EdgeNumericalSelector()),\n #v1 ('numerical', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling\n ('numerical', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling\n ])\n )\n ] )\n edge_BL_transformer = DU_ABPTableRG.Block2GridLine_EdgeTransformer()\n edge_LL_transformer = DU_ABPTableRG.GridLine2GridLine_EdgeTransformer()\n self._edge_transformer = TransformerListByType([edge_BB_transformer,\n edge_BL_transformer,\n edge_BL_transformer, # useless but required\n edge_LL_transformer \n ])\n \n self.tfidfNodeTextVectorizer = None #tdifNodeTextVectorizer", "def __init__(self, type_infeatures=None, type_outfeatures=None):\n ## Global initialization\n self.default_initialization()\n ## Initial function set\n self.selfdriven = False\n self._format_default_functions()\n self.set_functions(type_infeatures, type_outfeatures)\n ## Check descriptormodel\n self._assert_correctness()", "def __init__(self, *args, **kwargs):\n if len(args) > len(self.features):\n raise PydmrsTypeError(\"{} takes {} arguments, but {} were given\".format(type(self).__name__,\n len(self.features),\n len(args)))\n for i, value in enumerate(args):\n setattr(self, self.features[i], value)\n for feature, value in kwargs.items():\n setattr(self, feature, value)\n for feature in self.features:\n if not hasattr(self, feature):\n setattr(self, feature, None)", "def supported_features(self):\n return SUPPORT_FLAGS_HEATER", "def supported_features(self):\n if self._is_spotify_sroom:\n return SUPPORT_RAUMFELD_SPOTIFY\n return super().supported_features", "def extractFeatures(self, datum):\n abstract", "def supported_features(self):\n return MEURAL_SUPPORT", "def extract_features(self, *args, **kwargs):\n return self(*args, **kwargs)", "def supports(self, x):\n return True", "def simple_descriptor(patch):\n feature = []\n ### YOUR CODE HERE\n raise NotImplementedError() # Delete this line\n ### END YOUR CODE\n return feature", "def supported_features(self):\n return SUPPORT_FLAGS", "def supported_features(self):\n return SUPPORT_FLAGS", "def supported_features(self):\n return SUPPORT_FLAGS", "def supported_features(self):\n return SUPPORT_FLAGS", "def advanced_features(self):\n return self._advanced_features", "def extract_feature(self, article) :\n pass", "def __init__(self, feature, assertion=NoAssertion):\n self.feature = feature\n self.assertion = assertion", "def parse_features(self, skip=...):\n ...", "def parse_features(self, skip=...):\n ...", "def __init__(self):\n \n self.csv_features = {} # Create dictionary to load the CSV features\n self.meta_features = [] # Create list to load the metadata features", "def __call__(self, feature):\n return self.is_enabled(feature)", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_steppers'] = True", "def supported_features(self):\n return SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE", "def get_style1_features(self):\n return self.style1_features", "def f_supports(self, data):\n return True", "def basic(self):\n pass", "def supported_features(self):\n return SUPPORT_ARLO", "def __init__(self, feature, threshold, hardStump, checkGreaterThanThreshold = None, beta = None):\n self.feature = feature\n self.threshold = threshold\n self.hardStump = hardStump\n self.checkGreaterThanThreshold = checkGreaterThanThreshold\n self.beta = beta\n self.enabled = True", "def load_features(self, features):\n pass\n # self.features = features", "def function(self):\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def get_all_features(self) :\n raise NotImplementedError", "def _create_features_dropdown_callbacks(self, **kwargs):\n raise NotImplementedError", "def supported_features(self):\n if self._slave_mode and self._features:\n return self._features\n\n if self._playing_localfile or self._playing_spotify or self._playing_webplaylist or self._playing_mass:\n if self._state in [STATE_PLAYING, STATE_PAUSED]:\n self._features = \\\n SUPPORT_SELECT_SOURCE | SUPPORT_SELECT_SOUND_MODE | SUPPORT_PLAY_MEDIA | SUPPORT_GROUPING | SUPPORT_BROWSE_MEDIA | \\\n SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE | \\\n SUPPORT_STOP | SUPPORT_PLAY | SUPPORT_PAUSE | \\\n SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK | SUPPORT_SHUFFLE_SET | SUPPORT_REPEAT_SET \n if not self._playing_mass_radio:\n self._features |= SUPPORT_SEEK\n else:\n self._features = \\\n SUPPORT_SELECT_SOURCE | SUPPORT_SELECT_SOUND_MODE | SUPPORT_PLAY_MEDIA | SUPPORT_GROUPING | SUPPORT_BROWSE_MEDIA | \\\n SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE | \\\n SUPPORT_STOP | SUPPORT_PLAY | SUPPORT_PAUSE | \\\n SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK | SUPPORT_SHUFFLE_SET | SUPPORT_REPEAT_SET\n\n elif self._playing_stream or self._playing_mediabrowser:\n self._features = \\\n SUPPORT_SELECT_SOURCE | SUPPORT_SELECT_SOUND_MODE | SUPPORT_PLAY_MEDIA | SUPPORT_GROUPING | SUPPORT_BROWSE_MEDIA | \\\n SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE | \\\n SUPPORT_STOP | SUPPORT_PLAY | SUPPORT_PAUSE\n if self._state in [STATE_PLAYING, STATE_PAUSED] and (self._playing_mediabrowser):\n self._features |= SUPPORT_SEEK\n\n elif self._playing_liveinput:\n self._features = \\\n SUPPORT_SELECT_SOURCE | SUPPORT_SELECT_SOUND_MODE | SUPPORT_PLAY_MEDIA | SUPPORT_GROUPING | SUPPORT_BROWSE_MEDIA | \\\n SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE | \\\n SUPPORT_STOP | SUPPORT_PLAY\n\n return self._features", "def _parseFeature(self, name, value=None):\n supported = self._parse([(name, value)])\n return supported.getFeature(name)", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def regular(self):", "def enter_feature(self, feature):\n print feature.what", "def __init__(self, value, inherited_from):\n super(Feature, self).__init__()\n self.inherited_from = inherited_from", "def _extract_feature(self,f):\n if callable(f): \n return f()\n elif type(f) == tuple:\n return f[0](*list(f[1:]))", "def add_features(self, obj, annotation):\n if annotation['problem']:\n obj.add(folia.Feature, subset='problem', cls=annotation['problem'])\n if annotation['pos']:\n obj.add(folia.Feature, subset='pos', cls=annotation['pos'])", "def __init__(self,\n features,\n plot_design,\n feature_description_class,\n categorical_features,\n feature_descriptions,\n feature_mapping,\n categorical_suffix=\"_categorical\"\n ):\n self.categorical_columns = categorical_features\n self.feature_descriptions = feature_descriptions\n self.feature_mapping = feature_mapping\n self.categorical_suffix = categorical_suffix\n super().__init__(features, plot_design, feature_description_class)", "def features_selection(x_train, y_train,x_val,x_test,model,feature_list):\n n_features = x_train.shape[1]\n print(\"n_features original: \",n_features)\n if model == 'LR':\n estimator = LogisticRegression(random_state = 442, penalty = 'elasticnet', solver= 'saga',l1_ratio=0.5)\n if model == 'SVM':\n estimator = svm.LinearSVC(class_weight = 'balanced', random_state = 442)\n if model == 'SGD':\n estimator = SGDClassifier(class_weight = 'balanced', random_state = 442)\n if model == 'ADA':\n estimator = AdaBoostClassifier(DecisionTreeClassifier(max_depth=5, class_weight = 'balanced'),random_state = 442)\n if model == 'RF':\n estimator = RandomForestClassifier(random_state=442, class_weight = 'balanced')\n if model == 'GBT':\n estimator = GradientBoostingClassifier(random_state = 442)\n if model == 'XGBT':\n ratio = float(np.sum(y_train == 0)) / np.sum(y_train==1)\n estimator = XGBClassifier(seed = 442,eval_metric = 'auc', scale_pos_weight = ratio)\n if model == 'LightGB':\n ratio = float(np.sum(y_train == 0)) / np.sum(y_train==1)\n estimator = lgb.LGBMClassifier(seed = 442, scale_pos_weight = ratio)\n\n print(\"Searching RFE\")\n classifier = RFE(estimator=estimator, step=1)\n model = Pipeline([('classifier', classifier)])\n parameters = {'classifier__n_features_to_select': [int(n_features*0.25),int(n_features*0.5),int(n_features*0.75),n_features]}\n grid = GridSearchCV(model, parameters, cv=3)\n grid.fit(x_train, y_train)\n num_features = grid.best_params_\n num_features = re.sub(r'[^\\d]','',str(num_features))\n print(\"Optimal number of features\",num_features)\n\n print(\"SelectKBest\")\n selector = SelectKBest(f_classif, k=int(num_features)) #we pass the \"optimal number of features\" discovered in the previous pass\n selector.fit(x_train, y_train)\n x_train = selector.transform(x_train).astype('float32')\n x_val = selector.transform(x_val).astype('float32')\n x_test = selector.transform(x_test).astype('float32')\n feature_list = [feature_list[i] for i in selector.get_support(indices=True)]\n return x_train, x_val, x_test,feature_list, num_features", "def __contains__(self, feature):\n return feature == 'cvarsort' or feature in self.features", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def getFeatures(self, state, action, thisAgent):\n util.raiseNotDefined()", "def special_features(self):\r\n return self._special_features", "def matches(self, feature):\n pass", "def detect(self, features):\n pass # TODO", "def generateFeatures(self, data):\n pass", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def _get_feature_support(self):\r\n res = super(AcquirerbKash, self)._get_feature_support()\r\n res['fees'].append('bkash')\r\n return res", "def __init__(self, categorical_features: List[str],\n numerical_features: List[str]) -> None:\n self.categorical_features = categorical_features\n self.numerical_features = numerical_features\n self.pipeline = None", "def produce_features(self, chip):\n raise NotImplementedError(\"produce_features() not implemented\")", "def _derived_features(self):\n for created_feature, creator in self.feature_creators.items():\n self.parameters[created_feature] = creator(self.parameters)", "def features(self, features):\n\n self._features = features", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "def __call__(self):\n raise NotImplementedError", "def advanced_features(self, advanced_features):\n\n self._advanced_features = advanced_features", "def getFeatures(self, gameState, action):\n # features = util.Counter()\n # successor = self.getSuccessor(gameState, action)\n # features['successorScore'] = self.getScore(successor)\n # return features\n if self.isOffensive:\n return self.getOffensiveFeatures(gameState, action)\n else:\n return self.getDefensiveFeatures(gameState, action)", "def use(self):", "def __new__(cls, component, *args, **kwargs):\n if cls.dependencies:\n for dependency in cls.dependencies:\n component.load_feature(dependency, *args, **kwargs)\n return super(Feature, cls).__new__(cls)", "def exposes_features(self):\n return self._features_op is not None", "def __call__(self, **kwargs):\n raise NotImplementedError", "def __gia(self, *args, **kwargs):\n pass", "def __init__(self):\n # Calls super constructor:\n super().__init__()\n\n # This list holds all the information for computing each 'group' of features:\n self.features_group_list = []", "def train(self):\n\t\traise NotImplementedError", "def __init__(self, opts = None):\n if opts is not None:\n self.opts = opts\n else:\n self.opts = Options()\n \n # Initialize dictionary of features\n self.init_features()", "def flavors(self, **kwargs):\n raise NotImplementedError", "def supported_features(self):\n return self._supported_features", "def other_features_(tweet, cleaned_tweet):\n #print(\"WARNING>>>>>>>>>>>>>>>>> VADERSENTIMENT DISABLED\")\n sentiment = nlp.sentiment_analyzer.polarity_scores(tweet)\n\n words = cleaned_tweet #Get text only\n\n syllables = textstat.syllable_count(words) #count syllables in words\n num_chars = sum(len(w) for w in words) #num chars in words\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n\n\n twitter_objs = count_twitter_objs(tweet) #Count #, @, and http://\n features = [FKRA, FRE, syllables, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['compound'],\n twitter_objs[2], twitter_objs[1],]\n #features = pandas.DataFrame(features)\n return features", "def feature(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___Feature]:", "def __init__(self, features, labels, bigdl_type=\"float\"):\n self.feature = features[0]\n self.features = features\n self.label = labels[0]\n self.bigdl_type = bigdl_type\n self.labels = labels", "def __call__(self, # pylint: disable=arguments-differ, useless-super-delegation\n features, labels, params=None, config=None):\n return super(BaseModel, self).__call__(features, labels, params, config)", "def __contains__(self, feature):\n return feature in self.features", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def make_feature_layers(self, config):\n raise NotImplementedError" ]
[ "0.72120637", "0.6650957", "0.6531752", "0.63173395", "0.630279", "0.62784165", "0.62421846", "0.62280315", "0.6188825", "0.6120786", "0.61044574", "0.6051126", "0.5929795", "0.5882283", "0.5840704", "0.57487196", "0.5733037", "0.5721487", "0.57165647", "0.56840765", "0.5680112", "0.56228626", "0.561563", "0.561563", "0.561563", "0.561563", "0.5607525", "0.558441", "0.55811983", "0.5568737", "0.5568737", "0.5566728", "0.5527336", "0.5495683", "0.5490016", "0.5489854", "0.54795283", "0.54771644", "0.5475689", "0.5474593", "0.5463767", "0.5463094", "0.54614764", "0.54614764", "0.54614764", "0.54553545", "0.54553545", "0.54553545", "0.54522", "0.54498756", "0.5445925", "0.54407424", "0.5438757", "0.5436072", "0.5435552", "0.5431249", "0.5426393", "0.5419394", "0.5417115", "0.5413539", "0.5411056", "0.5410949", "0.5410949", "0.5410949", "0.5410949", "0.5407457", "0.54002315", "0.53985584", "0.5392632", "0.53892946", "0.5384526", "0.5384526", "0.53748506", "0.5368316", "0.5363147", "0.5360792", "0.53601986", "0.53542596", "0.53542596", "0.53542596", "0.5351691", "0.53476477", "0.534435", "0.5344012", "0.5339178", "0.53386134", "0.5338588", "0.5325874", "0.5318906", "0.53122145", "0.5308974", "0.5306958", "0.5305092", "0.5300096", "0.5298758", "0.5294533", "0.52916354", "0.5287731", "0.5283563", "0.5281939", "0.5275353" ]
0.0
-1
Given a indexed database, greedily and recursively learns the split value for the subtree of the indicated node. Return value is the number of mistakes made by the decision tree. Missing values are handled properly as indicating a 'don't care' value that gets passed down to both sides of the tree.
def greedy_learn(self,node,db,labels,ids): if node.depth >= self.maxdepth or len(ids) <= self.minexamples: #terminate recursion node.pick_best_label(db,labels,ids) err = misclassification_error([labels[id] for id in ids]) if err > 0: print "Reached a leaf and had to make some sacrifices, cost",err print " depth",node.depth print " labels",[labels[id] for id in ids] return err features = self.feature_subset(node,db,labels,ids) cost = node.pick_best_split(db,labels,ids,features) #do a split if node.type == 'v': #base case: no misclassifications """ if cost>0: print "greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero" print "cost=",cost,"misclassification=",misclassification_error([labels[id] for id in ids]) print "# of ids:",len(ids) for i in ids: print "id",i,",", for k in range(db.numFeatures()): if db[k,i] != None: print k,"=",db[k,i],",", print "label",labels[i] raw_input() """ return 0 elif node.type == 's': #print "Picked feature",node.feature,"split" #do a discrete split node.children = dict() #select sub-indices Eids = defaultdict(list) noneids = [] for id in ids: v = db[node.feature,id] if v is None: #item doesn't exist, it's a missing value noneids.append(id) else: Eids[v].append(id) #print " split sizes:",[len(x) for x in Eids.values()] #print " None ids:",len(noneids) ids = None errors = 0 for v,vids in Eids.iteritems(): #recurse c = DecisionTreeNode(node) #print "Recursing on value",v #print " ids:",vids errors += self.greedy_learn(c,db,labels,vids+noneids) node.children[v] = c if c.depth > self.deepest: self.deepest = c.depth print "Decision tree learner: Reached node with depth",self.deepest return errors else: #do an inequality split assert node.type == 'i' #print "Picked feature",node.feature,"inequality value",node.value,"cost",cost leftids = [] rightids = [] for id in ids: if db[node.feature,id] is not None: if db[node.feature,id] <= node.value: leftids.append(id) else: rightids.append(id) else: leftids.append(id) rightids.append(id) if len(rightids) == len(ids) or len(leftids) == len(ids): #due to missing values, this split is useless errors = misclassification_error([labels[id] for id in ids]) print "useless split on feature",node.feature,"value",node.value,"misclassification error",errors print "Left size",len(leftids),"right size",len(rightids) raw_input() node.pick_best_label(db,labels,ids) return errors #clear memory associated with ids list del ids[:] ids = None #print "Left size",len(leftids),"right size",len(rightids) c1 = DecisionTreeNode(node) c2 = DecisionTreeNode(node) #left side errors = self.greedy_learn(c1,db,labels,leftids) #right side errors += self.greedy_learn(c2,db,labels,rightids) #restore index node.children = {0:c1,1:c2} if c1.depth > self.deepest: self.deepest = c1.depth print "Decision tree learner: Reached node with depth",self.deepest return errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_split( df, attribute, split ):\n mask = df[attribute] <= split\n \n # split the dataset on the split attribute\n dfl = df[mask]\n dfr = df[~mask]\n \n \n # calculate weighting factors for child\n weighting_factor_left = float(dfl.shape[0])/df.shape[0]\n weighting_factor_right = float(dfr.shape[0])/df.shape[0]\n\n # calculate gini for left and right\n gini_parent = gini_impurity(df)\n gini_left = gini_impurity(dfl)\n gini_right = gini_impurity(dfr)\n \n # calculate weighted gini for this split \n weighted_gini = gini_parent - (weighting_factor_left*gini_left + weighting_factor_right*gini_right)\n return weighted_gini", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def _compute_best_split_and_push(self, node):\n\n node.split_info = self.splitter.find_node_split(\n node.sample_indices, node.histograms, node.sum_gradients,\n node.sum_hessians)\n\n if node.split_info.gain <= 0: # no valid split\n self._finalize_leaf(node)\n else:\n heappush(self.splittable_nodes, node)", "def _split_threshold(self, node):\n\n # define the score to improve upon\n if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:\n # split only if min(children scores) > node.score\n force_split = False\n best_score = node.score\n else:\n # force split: just take the best (even if children are worse)\n force_split = True\n best_score = None\n\n left, right = None, None\n\n # iterate over embedding dimensions (first ones are more reliable)\n # up to max_n_vec (included), until we found an improving split\n for _vec in range(self.n_vec):\n\n # get the candidate thresholds along this dimension\n threshs = self._get_candidate_thresholds(node, _vec)\n\n # look for an improving best split along this eigenvector\n for _t in threshs:\n # compute the split\n below_thresh = self.E[node.ids, _vec] < _t\n _lids = node.ids[below_thresh]\n _rids = node.ids[np.logical_not(below_thresh)]\n # check if the tubes are not too small\n _nl, _nr = len(_lids), len(_rids)\n is_valid = _nl >= self.min_leaf_size and _nr >= self.min_leaf_size\n if is_valid:\n # compute the score of the new tubes only\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # get the score of this split\n split_score = min(_sl, _sr)\n if best_score is None or split_score > best_score:\n # better split\n best_score = split_score\n node.has_children = True\n node.thresh = _t\n left = SpectralNode(\n _lids, _vec, score=_sl, name=node.name + \"0\")\n right = SpectralNode(\n _rids, _vec, score=_sr, name=node.name + \"1\")\n\n # check stopping criterion\n if node.has_children:\n # we found an improving split\n if _vec > 0 or not force_split:\n # found an improving non-forced split: stop here\n break\n\n return left, right", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def data_split(df, best_feature, info_gain_dict, dt_dict,\r\n curr_node, depth, continous = False):\r\n \r\n depth -= 1\r\n # decrease the depth count\r\n no_data = False\r\n # default flag for data check\r\n match_threshold_df = df[df[best_feature] == info_gain_dict[best_feature][0]]\r\n # subset the data if threshold is matched\r\n if not len(match_threshold_df):\r\n # no more data points\r\n no_data = True\r\n match_threshold_df = df\r\n # go back to prev dataframe\r\n else:\r\n pass\r\n \r\n mismatch_threshold_df = df[df[best_feature] != info_gain_dict[best_feature][0]]\r\n # subset the data if there is a mismatch\r\n if not len(mismatch_threshold_df):\r\n # if no more data points\r\n no_data = True\r\n mismatch_threshold_df = df\r\n # go back to prev dataframe\r\n else:\r\n pass\r\n decision_tree(match_threshold_df, dt_dict, curr_node, best_feature,\r\n align_dir = \"equal\", depth=depth, no_data = no_data)\r\n # function call to grow tree on the left side\r\n decision_tree(mismatch_threshold_df, dt_dict, curr_node, best_feature,\r\n align_dir = \"not_equal\", depth=depth, no_data = no_data)\r\n # function call to grow the tree on the right side\r", "def DecisionTreeAlgorithm(df, mltask, counter = 0, min_samples = 2, max_depth = 5, random_subspace = None):\n\n if counter == 0:\n global COLUMN_HEADERS, FEATURE_TYPE\n COLUMN_HEADERS = df.columns\n FEATURE_TYPE = hf.determine_type_of_feature(df)\n data = df.values\n else:\n data = df\n \n if (check_purity(data)) or (len(data) < min_samples) or (counter == max_depth):\n leaf = create_leaf(data, mltask)\n return leaf\n \n else:\n counter += 1\n \n potential_splits = get_potential_split(data, random_subspace)\n split_column,split_value = determine_best_split(data, potential_splits, mltask)\n data_below,data_above = split_data(data,split_column,split_value)\n \n if (len(data_below) == 0) or (len(data_above) == 0):\n leaf = create_leaf(data, mltask)\n return leaf\n \n feature_name = COLUMN_HEADERS[split_column]\n type_of_feature = FEATURE_TYPE[split_column]\n if type_of_feature == 'continuous':\n question = '{} <= {}'.format(feature_name,split_value)\n else:\n question = '{} = {}'.format(feature_name,split_value)\n sub_tree = {question:[]}\n \n yes_answer = DecisionTreeAlgorithm(data_below, mltask, counter, min_samples, max_depth, random_subspace)\n no_answer = DecisionTreeAlgorithm(data_above, mltask, counter, min_samples, max_depth, random_subspace)\n \n if yes_answer == no_answer :\n sub_tree = yes_answer\n else :\n sub_tree[question].append(yes_answer)\n sub_tree[question].append(no_answer)\n \n return sub_tree", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.66)", "def test_information_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertAlmostEqual(self.decision_tree.get_root_node().node_split.criterion_value,\n 2. * -0.3 * math.log2(0.3) - 0.4 * math.log2(0.4))", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)", "def leafScore(self) :\n return 0", "def __count_errors(node, testSet, res):\n training_results = __get_results(node) #Get a dictionary of labels and counts for the *training* data which made it to this node\n leaf_label = None #Initialize a label for this leaf\n majority_count = 0 #Initialize a variable to track the number of observations for the label with the most observations\n #Note that the steps below do not handle ties of the majority count in a nice way.\n for label, count in training_results.items(): #iterate through each pair of labels and counts from the training set\n if count > majority_count: #find the label with the highest count\n leaf_label = label #the label for the leaf is the label with the highest count\n majority_count = count #keep track of the count for the leaf_label\n \n wrong_labels = testSet[res].unique().tolist() #initialize wrong_labels to be all labels in the testSet\n if leaf_label in wrong_labels: #If the leaf label is in the list of labels for the part of the test set that got to this node\n wrong_labels.remove(leaf_label) #remove the leaf_label so that all which remains are incorrect labels\n \n wrong_count = 0 #Initialize a count of how many testSet observations will be classified incorrectly\n testCounts = testSet.groupby(res).size() #Get a series of the testSet labels and how many observations pertain to each label\n for label in wrong_labels: #Iterate over all the labels not equal to the leaf_label\n wrong_count += testCounts[label] #Sum up all of the observations with a label not equal to the leaf_label\n return wrong_count", "def split_count(self) -> int:\n return int(self.graph_tuple_stats.split_count or 0)", "def find_best_split(rows):\n best_gain = 0 # keep track of the best information gain\n best_question = None # keep train of the feature / value that produced it\n current_uncertainty = gini(rows)\n n_features = len(rows[0]) - 1 # number of columns\n #print(\"n_features:\", n_features)\n\n for col in range(1,n_features): # for each feature\n # for each iteration this is the set of all values of a specific column, eg, All pixels number 0\n values = set([row[col] for row in rows]) # unique values in the column\n for val in values: # for each value\n\n # Create a question object for each val under a column, holding the val and the col number\n question = Question(col, val)\n\n # try splitting the dataset\n true_rows, false_rows = partition(rows, question)\n\n # Skip this split if it doesn't divide the\n # dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(true_rows, false_rows, current_uncertainty)\n\n # You actually can use '>' instead of '>=' here\n # but I wanted the tree to look a certain way for our\n # toy dataset.\n if gain >= best_gain:\n best_gain, best_question = gain, question\n\n return best_gain, best_question", "def testTreeF(node, test):\n total = len(test)\n success = 0\n for d in test:\n i = searchTreeF(node, d)\n if i == d[-1]:\n success += 1\n return success / total", "def get_next_split ( self, feature_matrix: np.ndarray, target_array: np.ndarray, tree_split: TreeSplits):\n # If only 1 y value, make a leaf node\n if len ( set ( target_array ) ) == 1:\n tree_split.updateTreeValues (\n feature_column = None,\n feature_value = None,\n node_type = None,\n nodes = {},\n children = target_array,\n )\n return tree_split\n\n # Get the presplit entropy\n presplit_entropy = self.evaluate_function ( target_array )\n\n column_values = {}\n for k, v in self.map_column_node_type.items():\n # If there's only one value in feature matrix \"X\", set the split value to infinity\n if len ( set ( feature_matrix [ :, k ] ) ) == 1:\n value = np.inf\n split = None\n class_ratios = 1\n elif v == \"continuous\":\n # Get the best possible continuous split for the column\n split, value, class_ratios = self.get_optimal_continuous_feature_split (\n feature_matrix = feature_matrix, target_array = target_array, feature_column = k\n )\n else:\n # Get the split value for the discrete column\n value, class_ratios = self.get_optimal_discrete_feature_split (\n feature_matrix = feature_matrix, target_array = target_array, feature_column = k\n )\n split = None\n\n column_values [ k ] = ( split, value, class_ratios )\n\n # Get the column with the largest gain ratio\n col_idx_with_min_value = max (\n column_values,\n key = lambda x: ( presplit_entropy - column_values.get ( x ) [ 1 ] )\n / column_values.get ( x ) [ 2 ],\n )\n\n # If stopping criteria are met or all splits are infinite, terminate the process\n if (\n self.early_stopping_comparison (\n column_values.get ( col_idx_with_min_value ) [ 1 ], self.early_stopping_value\n )\n ) or not np.isfinite ( column_values.get ( col_idx_with_min_value ) [ 1 ] ):\n self.get_terminal_node (\n feature_column = col_idx_with_min_value,\n feature_value = column_values [ col_idx_with_min_value ] [ 0 ],\n node = tree_split,\n feature_matrix = feature_matrix ,\n target_array = target_array,\n )\n return tree_split\n\n # If the best split is continuous, add a continuous node\n if self.map_column_node_type.get ( col_idx_with_min_value ) == \"continuous\":\n return self.get_continuous_node (\n feature_column = col_idx_with_min_value,\n feature_value = column_values [col_idx_with_min_value ] [ 0 ],\n feature_matrix = feature_matrix,\n target_array = target_array,\n node = tree_split,\n )\n\n # Otherwise, add a discrete node.\n else:\n return self.get_discrete_node (\n feature_matrix = feature_matrix,\n target_array = target_array,\n feature_value = column_values [ col_idx_with_min_value ] [ 0 ],\n feature_column = col_idx_with_min_value,\n node = tree_split,\n )\n # End get_next_split", "def test_gain_ratio(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 1.0)", "def test_gain_ratio(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 1.0)", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def _fetch(tree, impurity_crit, dataSet, saved):\n\t\tif tree.cut_off is None:\n\t\t\treturn len(dataSet)*impurity_crit(dataSet), 1\n\n\t\telse:\n\t\t\tD1, D2 = DecisionTree._binarySplit(dataSet, *tree.cut_off)\n\t\t\tleft_impurity, left_leaves = DecisionTree._fetch(tree.left, impurity_crit, D1, saved)\n\t\t\tright_impurity, right_leaves = DecisionTree._fetch(tree.right, impurity_crit, D2, saved)\n\n\t\t\t# find node and set\n\t\t\tsaved.setdefault('node',[]).append(tree)\n\t\t\tsaved.setdefault('set', []).append(dataSet)\n\t\t\t# calculate g(t) for current TreeNode\n\t\t\tg = (len(dataSet)*impurity_crit(dataSet)-left_impurity-right_impurity) / \\\n\t\t\t\t(left_leaves + right_leaves - 1)\n\t\t\tsaved.setdefault('G',[]).append(g)\n\t\t\t\n\t\treturn left_impurity+right_impurity, left_leaves+right_leaves", "def test_depth_returns_correct_value_left_balanced_tree(bst_all_to_left):\n assert bst_all_to_left.depth() == 3", "def test_information_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 1.0)", "def __deep_count_errors(node, testSet, res):\n if node.results is not None: #Check if this node is a leaf node\n return __count_errors(node, testSet, res) #If so, return the test set classification errors made by this node.\n else:\n tbSet = testSet[testSet[node.col] >= node.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[node.col] < node.value] #find which test observations belong to this tree's false branch\n \n if node.tb.results is None: #Check if the true branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term1 = __deep_count_errors(node.tb, tbSet, res)\n else: #If the true branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term1 = __count_errors(node.tb, tbSet,res)\n if node.fb.results is None: #Check if the false branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term2 = __deep_count_errors(node.fb, fbSet, res)\n else: #If the false branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term2 = __count_errors(node.fb, fbSet, res) \n return term1 + term2 #Sum the classification errors made by this nodes descendant leaves.", "def findLeftContext(tree, start, ignore):\t\n nrOfClosingBrs = 0\n nrOfOpeningBrs = 0\n firstPass = True\n for currentIndex in range(start-1,-1,-1):\n if tree[currentIndex].symbol in ignore:\n continue\n elif tree[currentIndex].symbol == \"[\":\n if not firstPass:\n nrOfOpeningBrs = nrOfOpeningBrs + 1\n elif tree[currentIndex].symbol == \"]\":\n nrOfClosingBrs = nrOfClosingBrs + 1\n elif nrOfClosingBrs == nrOfOpeningBrs:\n return(tree[currentIndex])\n firstPass = False\n return(emptyModule())", "def test_tree_4_nodes_left_unbalanced_return_1(balanced_3_nodes):\n balanced_3_nodes.insert(8)\n assert balanced_3_nodes.balance() == 1", "def lmbd(self, lamb):\n\t n = self.nodes\n\n\t \t# The top_k_nodes is a list of all nodes in descending\n\t # order of influence\n\t top_k_nodes = self.top_k(self.nodes)\n\t for i in range(n):\n\t\t\tself.deactivate_all()\n\t\t\tinitially_active = top_k_nodes[:i]\n\n\t\t\ttotal_contrib = i + 1\n\t\t\tfor node in initially_active:\n\t\t\t\ttotal_contrib += self.v(node)\n\n\t\t\tcoverage = total_contrib*1.00/n\n\t\t\tif coverage >= lamb:\n\t\t\t\treturn top_k_nodes[:i]", "def decision_tree(df, dt_dict, curr_node,\r\n prev_attr = None, align_dir = None,\r\n depth = -1, no_data = False,\r\n ensemble = None):\r\n \r\n class_count = get_class_count(df)\r\n # get the class label counts for the given dataframe\r\n leaf_node_bool = check_leaf_node(df)\r\n # this function helps to check if we have a leaf node\r\n if leaf_node_bool:\r\n # if its leaf node\r\n curr_node[align_dir] = df['class'].values[0]\r\n # assign the leaf node value\r\n elif no_data:\r\n # if we are out of data points\r\n class_counts = df['class'].value_counts()\r\n # get the class counts\r\n curr_node[align_dir] = np.argmax(class_counts)\r\n # assign the majority class of prev node\r\n else:\r\n entropy_values_series = impurity.entropy_calc(df, ensemble = ensemble)\r\n # calculate the entropy values for each feature\r\n info_gain_dict = {}\r\n # empty dict for information gain\r\n for feature in entropy_values_series.index:\r\n # iterate over each features\r\n impurity.information_gain_calc(df, feature, info_gain_dict)\r\n # function call for information gain calculation\r\n for f in entropy_values_series.index:\r\n # iterate over each feature\r\n information_gain = entropy_values_series[f] - info_gain_dict[f][1]\r\n # calculation of information gain\r\n info_gain_dict[f] = (info_gain_dict[f][0], information_gain)\r\n # update the information gain dict\r\n best_feature = sorted(info_gain_dict, key = lambda x: info_gain_dict[x][1])[-1]\r\n # get the best feature on which to be splitted.\r\n #print(best_feature)\r\n node_value = (best_feature, info_gain_dict[best_feature], class_count[0],\r\n class_count[1])\r\n # get the node value\r\n \r\n if not leaf_node_bool and align_dir:\r\n # growing the tree\r\n if depth == 0:\r\n if node_value[2] > node_value[3]:\r\n node_value = 0\r\n else:\r\n node_value = 1\r\n curr_node[align_dir] = node_value\r\n return 0\r\n else:\r\n curr_node[align_dir] = {node_value:{}}\r\n curr_node = curr_node[align_dir][node_value]\r\n else:\r\n dt_dict[node_value] = {}\r\n curr_node = dt_dict[node_value]\r\n \r\n data_split(df, best_feature, info_gain_dict, \r\n dt_dict, curr_node, depth)\r\n # function call for data split\r", "def learn(self,db,labels):\n self.keys = db.keys[:]\n labelindex = -1\n if isinstance(labels,str):\n labelindex = db.keys.index(labels)\n assert labelindex >= 0,\"label does not exist in database keys\"\n labels = db.get_column(labelindex)\n elif isinstance(labels,int):\n labelindex = labels\n labels = db.get_column(labelindex)\n else:\n assert len(labels) == len(db.entries)\n self.root = DecisionTreeNode()\n if labelindex >= 0:\n raise NotImplementedError(\"Ooops, taking out indexed label broken\")\n entries = np.delete(entries,labelindex,1)\n db = IndexedDatabase(db)\n if self.maxnodes != None:\n return self.greedy_learn_search(db,labels)\n else:\n self.deepest = 0\n return self.greedy_learn(self.root,db,labels,range(len(labels)))", "def test_contains_returns_false_on_left_balanced_tree(bst_all_to_left):\n assert bst_all_to_left.contains(25) is False", "def create_subtree(self, criterion):\n\n def _get_values_to_split(splits_values):\n values_to_split = {}\n for split_index, split_values in enumerate(splits_values):\n for value in split_values:\n values_to_split[value] = split_index\n return values_to_split\n\n def _get_splits_samples_indices(num_splits, separation_attrib_index, values_to_split,\n valid_samples_indices, samples):\n splits_samples_indices = [[] for _ in range(num_splits)]\n for sample_index in valid_samples_indices:\n sample_value_in_split_attrib = samples[sample_index][separation_attrib_index]\n try:\n splits_samples_indices[values_to_split[\n sample_value_in_split_attrib]].append(sample_index)\n except KeyError:\n print('Should not get here. Sample {} has value {} at attribute # {}, '\n 'but this value is unknown to the decision tree.'.format(\n sample_index,\n sample_value_in_split_attrib,\n separation_attrib_index))\n sys.exit(1)\n return splits_samples_indices\n\n def _get_numeric_splits_samples_indices(separation_attrib_index, mid_point,\n valid_samples_indices, samples):\n splits_samples_indices = [[], []]\n for sample_index in valid_samples_indices:\n sample_value_in_split_attrib = samples[sample_index][separation_attrib_index]\n if sample_value_in_split_attrib <= mid_point:\n splits_samples_indices[0].append(sample_index)\n else:\n splits_samples_indices[1].append(sample_index)\n return splits_samples_indices\n\n def _has_multiple_nominal_values(values_num_samples):\n return sum(num_samples > 0 for num_samples in values_num_samples) > 1\n\n def _has_multiple_numeric_values(valid_samples_indices, sample, attrib_index):\n values_seen = set()\n for sample_index in valid_samples_indices:\n sample_value = sample[sample_index][attrib_index]\n if sample_value not in values_seen:\n if values_seen:\n return True\n values_seen.add(sample_value)\n return False\n\n def _has_enough_samples_in_second_largest_class(class_index_num_samples,\n most_common_int_class):\n second_largest = max(num_samples\n for class_index, num_samples in enumerate(class_index_num_samples)\n if class_index != most_common_int_class)\n return second_largest >= MIN_SAMPLES_SECOND_LARGEST_CLASS\n\n\n # Is it time to stop growing subtrees?\n if (self.max_depth_remaining <= 0\n or self.num_valid_samples < self._min_samples_per_node\n or self.number_non_empty_classes == 1\n or (USE_MIN_SAMPLES_SECOND_LARGEST_CLASS\n and not _has_enough_samples_in_second_largest_class(\n self.class_index_num_samples,\n self.most_common_int_class))):\n return None\n\n # If a valid attribute has only one value, it should be marked as invalid from this node on.\n num_valid_nominal_attributes = 0\n for attrib_index, is_valid_nominal_attribute in enumerate(self.valid_nominal_attribute):\n if not is_valid_nominal_attribute:\n continue\n if (not _has_multiple_nominal_values(\n self.contingency_tables[attrib_index].values_num_samples)):\n self.valid_nominal_attribute[attrib_index] = False\n else:\n num_valid_nominal_attributes += 1\n\n num_valid_numeric_attributes = 0\n for attrib_index in range(len(self.valid_numeric_attribute)):\n if not self.valid_numeric_attribute[attrib_index]:\n continue\n if not _has_multiple_numeric_values(self.valid_samples_indices,\n self.curr_dataset.samples,\n attrib_index):\n self.valid_numeric_attribute[attrib_index] = False\n else:\n num_valid_numeric_attributes += 1\n\n # If there are no valid attributes, this node should be a leaf.\n if not num_valid_nominal_attributes and not num_valid_numeric_attributes:\n return None\n\n if self._use_stop_conditions:\n num_valid_attributes = sum(self.curr_dataset.valid_numeric_attribute)\n # Attributes which are valid (`True`) in `new_valid_nominal_attribute` and invalid\n # (`False`) in `new_valid_nominal_attribute_incl_chi_sq_test` should not be used to\n # split at this node, but could be used to split in descendant nodes.\n new_valid_nominal_attribute = self.valid_nominal_attribute[:]\n new_valid_nominal_attribute_incl_chi_sq_test = self.valid_nominal_attribute[:]\n for (attrib_index,\n is_valid_nominal_attribute) in enumerate(self.valid_nominal_attribute):\n if is_valid_nominal_attribute:\n (is_valid_num_samples,\n is_valid_chi_sq_and_num_samples) = (self._is_attribute_valid(\n attrib_index,\n min_allowed_in_two_largest=MIN_SAMPLES_IN_SECOND_MOST_FREQUENT_VALUE))\n if is_valid_chi_sq_and_num_samples:\n num_valid_attributes += 1\n elif is_valid_num_samples:\n new_valid_nominal_attribute_incl_chi_sq_test[attrib_index] = False\n else:\n new_valid_nominal_attribute[attrib_index] = False\n new_valid_nominal_attribute_incl_chi_sq_test[attrib_index] = False\n self.valid_nominal_attribute = new_valid_nominal_attribute_incl_chi_sq_test\n if num_valid_attributes == 0:\n return None\n\n # Get best split. Note that self is the current TreeNode.\n best_split = criterion.select_best_attribute_and_split(self)\n\n if math.isinf(best_split.criterion_value):\n # Stop condition when there is no valid attribute with more than one value (then\n # best_split.criterion_value is default, which is +- inf).\n return None\n\n if self.curr_dataset.valid_numeric_attribute[best_split.attrib_index]:\n # NUMERIC ATTRIBUTE\n last_left_value = list(best_split.splits_values[0])[0]\n first_right_value = list(best_split.splits_values[1])[0]\n mid_point = 0.5 * (last_left_value + first_right_value)\n splits_samples_indices = _get_numeric_splits_samples_indices(\n best_split.attrib_index,\n mid_point,\n self.valid_samples_indices,\n self.curr_dataset.samples)\n # Save this node's split information.\n self.node_split = NodeSplit(best_split,\n None,\n mid_point)\n\n else:\n # NOMINAL ATTRIBUTE\n\n # Calculate a list containing the inverse information of best_split.splits_values: here,\n # given a value, we want to know to which split it belongs\n values_to_split = _get_values_to_split(best_split.splits_values)\n\n splits_samples_indices = _get_splits_samples_indices(len(best_split.splits_values),\n best_split.attrib_index,\n values_to_split,\n self.valid_samples_indices,\n self.curr_dataset.samples)\n # Save this node's split information.\n self.node_split = NodeSplit(best_split, values_to_split, None)\n\n # Create subtrees\n self.is_leaf = False\n if self._use_stop_conditions:\n # Any attribute that has enough samples in the second most frequent value could pass the\n # chi-square test in a descendant node, thus we don't send the information of chi-square\n # test to child nodes.\n old_valid_nominal_attribute = self.valid_nominal_attribute[:]\n self.valid_nominal_attribute = new_valid_nominal_attribute\n else:\n old_valid_nominal_attribute = self.valid_nominal_attribute\n for curr_split_samples_indices in splits_samples_indices:\n self.nodes.append(TreeNode(self.curr_dataset,\n curr_split_samples_indices,\n self.valid_nominal_attribute[:],\n self.valid_numeric_attribute[:],\n self.max_depth_remaining - 1,\n self._min_samples_per_node,\n self._use_stop_conditions,\n self._max_p_value_chi_sq))\n self.nodes[-1].create_subtree(criterion)\n self.valid_nominal_attribute = old_valid_nominal_attribute", "def _information_gain(self, y, X_column, split_thersh):\n # parent E\n parent_entropy = entropy(y)\n # generate split\n left_idxs, right_idxs = self._split(X_column, split_thersh)\n\n if len(left_idxs) == 0 or len(right_idxs) == 0:\n return 0\n # weighted avg child E\n n = len(y)\n n_left_samples, n_right_samples = len(left_idxs), len(right_idxs)\n entropy_left, entropy_right = entropy(y[left_idxs]), entropy(y[right_idxs])\n child_entropy = (n_left_samples/n) * entropy_left + (n_right_samples/n) * entropy_right\n\n # return IG\n ig = parent_entropy - child_entropy\n return ig", "def calcGiniSplitByColumn(self, data, structure, colIName):\n colIndex, giniSplit = structure[colIName]['index'], 0\n for value in structure[colIName][\"values\"]:\n newData = list(filter(lambda x: x[colIndex] == value, data))\n p = len(newData) / len(data)\n giniSplit += self.calcDataGini(newData, structure) * p\n return round(giniSplit, 3)", "def predict(tree, dataSet):\n\n\tcount = 0 #used for tracking how many times we've correctly classified our data\n\tfor index in range(len(dataSet)):\n\t\tdataPoint = dataSet[index]\n\t\tprint \"Current dataPoint: \", dataPoint.retrieve('id').getValue()\n\t\tnode = 0\n\t\tfor i in tree.fields[tree.nType].keys():\n\t\t\tif NodeType.ROOT == tree.getNodeType(i):\n\t\t\t\tnode = i #basically an index\n\t\t\t\tprint \"root node: \", node\n\t\t\t\tbreak\n\t\t\t#keep going down the tree until no children exist, then get output classification\n\n\t\tprint \"node type\", tree.getNodeType(node)\n\n\t\twhile tree.getNodeType(node) != NodeType.LEAF:\n\t\t\tsplitVal = tree.getSplitValue(node)\n\t\t\tprint \"tree split value: \", splitVal\n\t\t\tsplitAttribute = tree.getSplitAtribute(node)\n\t\t\tprint \"tree split attribute: \", splitAttribute\n\t\t\tval = dataPoint.retrieve(splitAttribute).getValue()\n\t\t\tif val == None:\t\t\n\t\t\t\tval = np.median(retrieveDataFromColumn(dataSet, splitAttribute))\n\n\t\t\tprint \"data point value for split attribute: \", val\n\t\t\tif FeatureType.CONTINUOUS == tree.getSplitType(node): \n\t\t\t\tif val >= splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\t\tprint \"greater than\", \"going to next node\", node\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"lesser than\", \"going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\telif FeatureType.DISCRETE == tree.getSplitType(node):\n\t\t\t\tif val != splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"not equal\", \" going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"equal\", \"goint to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\tleafClass = tree.getMajorityClassification(node)\n\t\tprint \"leaf classification: \", leafClass\n\t\tleafAttribute = tree.getSplitAtribute(node)\n\t\tprint \"leaf attribute: \", leafAttribute\n\t\t\n\t\t# Need to fill the last column (which is the same column as leafAttribute) with the \n\t\t# value of the leaf (i.e. classify as winner or not)\n\t\tdataPoint.retrieve(leafAttribute).addValue(leafClass)\n\t\tprint \"prediction is: \", dataPoint.retrieve(leafAttribute).getValue()\n\n\tcreateFileCSV(dataSet)\n\treturn dataSet", "def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right", "def count_half_nodes(self):\r\n queue = [self]\r\n half_nodes = 0\r\n half = False\r\n while queue:\r\n curr_node = queue.pop(0)\r\n if curr_node.left:\r\n queue.append(curr_node.left)\r\n half = not half\r\n if curr_node.right:\r\n queue.append(curr_node.right)\r\n half = not half\r\n if half:\r\n half_nodes += 1\r\n half = not half \r\n return half_nodes", "def dfs_search(board):\n goalcount = 0\n fringe = deque([])\n count = 0\n fringe.append(board)\n while(True):\n if len(fringe) is 0:\n print(\"Empty Fringe\")\n return\n n = fringe.pop()\n # print(n)\n goalcount = goalcount + 1\n if n.goal_test():\n print goalcount\n print count\n return\n column = n.get_next_unassigned_var()\n for val in n.choices[column]:\n count = count+1\n child = nQueens(copy.deepcopy(n.state), copy.deepcopy(n.choices), copy.deepcopy(n.n), n)\n child.assign(column, val)\n fringe.append(child)", "def split(self, node):\n # check node was not already split\n if node.has_children:\n raise SplitError(\"BUG: node was already split\")\n\n # early stopping (only if enough nodes already)\n if self.n_clusters >= self.min_leaves:\n # make a leaf if too small to split\n if node.size <= 2 * self.min_leaf_size:\n return None, None\n # special case: make a leaf if too deep already\n if len(node.name) > self.max_depth:\n # int(node.name, 2) is too big to be represented as a long (int64)\n # if len(node.name > 62)\n sys.stderr.write('# WARNING: early stopping too deep branch'\n ' {}\\n'.format(node.name))\n sys.stderr.flush()\n return None, None\n\n # bi-partition the node's samples\n if self.split_type == \"kmeans\":\n left, right = self._split_kmeans(node)\n else:\n left, right = self._split_threshold(node)\n\n # check if we have two leaves or none\n if (left is None and right is not None) or (left is not None and right is None):\n raise SplitError(\n \"BUG: both children should be simultaneously\"\n \"either None or not\")\n\n # check the post-conditions\n if left is None or right is None:\n # node is a leaf\n if node.has_children:\n raise SplitError(\"BUG: leaf node marked with (empty) children\")\n # check if it must have been split instead of being a leaf\n if node.size > self.max_leaf_size:\n # force the split\n left, right = self._split_forced(node)\n msg = 'WARNING: forced to split a must-split node that was'\n msg += ' too big to be a leaf ({0} > max_leaf_size={1})\\n'\n sys.stderr.write(msg.format(node.size, self.max_leaf_size))\n if self.n_clusters < self.min_leaves:\n # force the split\n left, right = self._split_forced(node)\n msg = 'WARNING: forced to split a must-split node that had'\n msg += ' not enough clusters ({0} < min_leaves={1})\\n'\n sys.stderr.write(msg.format(self.n_clusters, self.min_leaves))\n\n # finalize the split\n if node.has_children:\n # update the labels of right child only (left keeps the same)\n self.labels[right.ids] = self.n_clusters\n self.n_clusters += 1\n\n return left, right", "def eval_tree(tree: GPTree, dataset: Iterable) -> list:\n results = []\n for data in zip(*dataset):\n try:\n output = tree.compute_tree(data[0])\n results.append(\n 0 if output == data[1] else 1\n ) # right or wrong, but no error.\n except Exception:\n results.append(2) # Fails to run.\n\n return results", "def test_tree_4_nodes_right_unbalanced_return_1(balanced_3_nodes):\n balanced_3_nodes.insert(13)\n assert balanced_3_nodes.balance() == -1", "def split(self, thresh=0):\n\n new_tree_bounds = []\n new_tree_ids = []\n\n self.contains_null = False\n\n for qi, quad in enumerate(self.tree):\n\n left, bottom, right, top = quad.bounds\n xcenter = left + (right - left) / 2.0\n ycenter = top - (top - bottom) / 2.0\n\n quad_id = self.tree_ids[qi]\n\n for id_, bbox in zip(\n [1, 3, 0, 2],\n [\n (left, ycenter, xcenter, top),\n (xcenter, ycenter, right, top),\n (left, bottom, xcenter, ycenter),\n (xcenter, bottom, right, ycenter),\n ],\n ):\n\n id_list = list(self.sindex.intersection(bbox))\n\n if id_list:\n\n if len(id_list) > thresh:\n\n new_tree_bounds.append(bbox)\n new_tree_ids.append(quad_id + str(id_))\n\n else:\n self.contains_null = True\n\n else:\n self.contains_null = True\n\n self.tree_bounds = new_tree_bounds\n self.tree_ids = new_tree_ids\n\n return self", "def test_contains_returns_true_on_tree_with_value_left(bst_all_to_left):\n assert bst_all_to_left.contains(3) is True\n assert bst_all_to_left.contains(1) is True\n assert bst_all_to_left.contains(2) is True", "def num_leaves(tree):\n return ((tree.n_node_samples > 0) & (tree.feature == INVALID_VALUE)).sum()", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def buildTreePandas(rows, res, min_ppl = None, maxDepth=None, scoref=entropy, depth=0):\n minimum_ppl = deepcopy(min_ppl)\n num_ppl = len(rows)\n \n if min_ppl is not None and num_ppl <= min_ppl:\n #Extra protection to stop the recursion\n return decisionNode(results=__uniqueCountsPandas(rows, res)) \n if num_ppl==0: \n return decisionNode( )\n newDepth = depth + 1\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth):\n #print \"Hooray I got here.\"\n return decisionNode(results=__uniqueCountsPandas(rows, res))\n current_score=scoref(rows, resCol = res)\n # Set up some variables to track the best criteria\n best_gain=0.0\n best_criteria=None\n best_sets=None\n \n featColumns=rows.columns.tolist()\n featColumns.remove(res)\n for col in featColumns:\n # Generate the list of different values in\n # this column\n column_values=rows.loc[:,col].unique()\n # Now try dividing the rows up for each value\n # in this column\n copy = rows.sort(columns = col)\n for value in column_values:\n (set1,set2)=__dividePandas(copy,col,value)\n # Information gain\n p=float(len(set1))/len(rows)\n gain=current_score-p*scoref(set1, resCol = res)-(1-p)*scoref(set2, resCol = res)\n size_min = 0 if minimum_ppl is None else minimum_ppl - 1\n if gain>best_gain and len(set1)>size_min and len(set2)>size_min:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Create the subbranches\n if best_gain>0:\n trueBranch=buildTreePandas(best_sets[0], res, min_ppl = minimum_ppl, maxDepth = maxDepth, depth=newDepth)\n falseBranch=buildTreePandas(best_sets[1], res, min_ppl = minimum_ppl, maxDepth = maxDepth, depth=newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCountsPandas(rows, res))", "def best_cat_brute_force_split(self, ind, dep):\n split = Split(None, None, None, None, 0)\n all_dep = np.unique(dep.arr)\n for i, ind_var in enumerate(ind):\n ind_var = ind_var.deep_copy()\n unique = np.unique(ind_var.arr)\n\n freq = {}\n if dep.weights is None:\n for col in unique:\n counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)\n freq[col] = cl.defaultdict(int)\n freq[col].update(np.transpose(counts))\n else:\n for col in unique:\n counts = np.unique(np.compress(ind_var.arr == col, dep.arr), return_counts=True)\n freq[col] = cl.defaultdict(int)\n for dep_v in all_dep:\n freq[col][dep_v] = dep.weights[(ind_var.arr == col) * (dep.arr == dep_v)].sum()\n\n if len(list(ind_var.possible_groupings())) == 0:\n split.invalid_reason = InvalidSplitReason.PURE_NODE\n\n choice, highest_p_join, split_chi, dof = None, None, None, None\n for comb in ind_var.all_combinations():\n freqs = [ sum( [ cl.Counter(freq[key]) for key in c ], cl.Counter()) for c in comb ]\n\n if sum([ (sum(x.values()) < self.min_child_node_size) for x in freqs ] ) > 0:\n continue\n keys = set(sum([ list(f.keys()) for f in freqs ], []))\n\n n_ij = np.array(\n [ [ col.get(k, 0) for k in keys ] for col in freqs ]\n )\n\n chi, p_split, dof = chisquare(n_ij, dep.weights is not None)\n\n if (choice is None or p_split < highest_p_join or (p_split == highest_p_join and chi > split_chi)) and p_split < self.alpha_merge:\n choice, highest_p_join, split_chi = comb, p_split, chi\n\n temp_split = Split(i, choice, split_chi, highest_p_join, dof, split_name=ind_var.name)\n better_split = (not split.valid() or p_split < split.p or (p_split == split.p and chi > split.score)) and choice is not None\n if better_split: split, temp_split = temp_split, split\n\n if split.valid() and choice is not None:\n chi_threshold = self.split_threshold * split.score\n\n if temp_split.valid() and temp_split.score >= chi_threshold:\n for sur in temp_split.surrogates:\n if sur.column_id != i and sur.score >= chi_threshold:\n split.surrogates.append(sur)\n\n temp_split.surrogates = []\n split.surrogates.append(temp_split)\n\n split.sub_split_values(ind[split.column_id].metadata)\n\n return split", "def _split_forced(self, node):\n # compute the split\n _vec = 0\n sorted_idxs = np.argsort(self.E[node.ids, _vec]).squeeze()\n n = len(sorted_idxs) // 2\n _lids = node.ids[sorted_idxs[:n]]\n _rids = node.ids[sorted_idxs[n:]]\n # compute the score of the new tubes only\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # register the split\n node.has_children = True\n node.thresh = np.median(self.E[node.ids, _vec]) # arbitrary\n # Note: median would not ensure equal size (because of duplicate values)\n left = SpectralNode(_lids, _vec, score=_sl, name=node.name + \"0\")\n right = SpectralNode(_rids, _vec, score=_sr, name=node.name + \"1\")\n\n return left, right", "def compute_splits(feature_df, target_col, max_num_splits):\n tree_estimator = DecisionTreeClassifier(max_leaf_nodes=max_num_splits+1,\n class_weight='balanced',\n random_state=1407)\n\n tree_estimator.fit(feature_df, target_col)\n thresholds = tree_estimator.tree_.threshold[tree_estimator.tree_.children_left != _tree.TREE_LEAF]\n return sorted(thresholds)", "def test_get_tree_count_query(self): # noqa\n # from datetime import datetime\n self.session.commit()\n\n # Get tree by for cycle\n self.start_query_counter()\n self.assertEqual(0, len(self.stmts))\n # startTime = datetime.now()\n self.model.get_tree(self.session)\n # delta = datetime.now() - startTime\n # print(\"Get tree: {!s:>26}\".format(delta))\n self.assertEqual(1, len(self.stmts))\n self.stop_query_counter()", "def calcGiniSplitBySplitValue(self, data, structure, colIndex, splitValue):\n dataBellow = list(filter(lambda x: float(x[colIndex]) <= splitValue, data))\n dataAbove = list(filter(lambda x: float(x[colIndex]) > splitValue, data))\n giniSplit = (len(dataBellow) / len(data)) * self.calcDataGini(dataBellow, structure) +\\\n (len(dataAbove) / len(data)) * self.calcDataGini(dataAbove, structure)\n return round(giniSplit, 3)", "def validate_tree(self, treenum, tree, halonum=None):\n # Check size\n if treenum == -1:\n nhalos = self.totnhalos\n halonum = None\n else:\n if halonum is not None:\n nhalos = 1\n else:\n nhalos = self.nhalos_per_tree[treenum]\n fields = list(tree.keys())\n for k in fields:\n assert (len(tree[k]) == nhalos)\n # Check fields\n for k in self.raw_fields:\n assert (k in fields)\n # Don't check tree indices for a single halo\n if halonum is not None:\n return\n # For all trees get list of local nhalos for every halo\n if treenum == -1:\n treenum_arr = self.treenum_arr\n nhalos = self.nhalos_per_tree[treenum_arr]\n # Check that halos are within tree\n for k in halo_fields:\n assert ((tree[k] < nhalos).all())\n # Check FOF central exists and all subs in one snapshot\n central = tree['FirstHaloInFOFgroup']\n assert ((central >= 0).all())\n if treenum == -1:\n central = self.get_total_index(treenum_arr, central)\n assert ((tree['SnapNum'] == tree['SnapNum'][central]).all())\n # Check that progenitors/descendants are back/forward in time\n descend = tree['Descendant'].astype('i4')\n has_descend = (descend >= 0)\n not_descend = np.logical_not(has_descend)\n # Not strictly True\n # assert ((tree['SnapNum'][not_descend] ==\n # (len(self.scale_factors) - 1)).all())\n if treenum == -1:\n descend = self.get_total_index(treenum_arr, descend)\n assert ((tree['SnapNum'][descend[has_descend]] >\n tree['SnapNum'][has_descend]).all())\n # Check progenitors are back in time\n descend[not_descend] = np.where(not_descend)[0]\n progen1 = tree['FirstProgenitor']\n progen2 = tree['NextProgenitor']\n has_progen1 = (progen1 >= 0)\n has_progen2 = (progen2 >= 0)\n if treenum == -1:\n progen1 = self.get_total_index(treenum_arr, progen1)\n progen2 = self.get_total_index(treenum_arr, progen2)\n assert ((tree['SnapNum'][progen1[has_progen1]] <=\n tree['SnapNum'][descend[has_progen1]]).all())\n assert ((tree['SnapNum'][progen2[has_progen2]] <=\n tree['SnapNum'][descend[has_progen2]]).all())", "def performBacktrackSearch(self, rootNode, node):\r\n \r\n print (\"-- proc --\", node.state.assignment)\r\n \r\n #check if we have reached goal state\r\n if node.state.checkGoalState():\r\n print (\"reached goal state\")\r\n return True\r\n \r\n else:\r\n \r\n #check if there is a case of early failure\r\n #if node.state.forwardCheck(): \r\n if node.state.arcConsistency():\r\n \r\n #find an unassigned variable \r\n variable = node.state.selectUnassignedVariable()\r\n \r\n #for all values in the domain\r\n for value in node.state.orderDomainValues():\r\n \r\n #check if constraints are satisfied\r\n if CSP.checkConstraints(node.state.assignment,\r\n variable, value):\r\n \r\n #create child node\r\n childNode = Node(State(node.state.assignment, \r\n node.state.possibleValues, variable, value))\r\n \r\n node.addChild(childNode)\r\n \r\n #show the search tree explored so far\r\n treeplot = TreePlot()\r\n treeplot.generateDiagram(rootNode, childNode)\r\n \r\n result = self.performBacktrackSearch(rootNode, childNode)\r\n if result == True:\r\n return True\r\n return False", "def failed_leaves(self) -> list[int] | list[str]:\n if not self._is_analyzed:\n raise ValueError(\n \"It appears the PF image has not been analyzed yet. Use .analyze() first.\"\n )\n failing_sets = Enumerable(self.mlc_meas).where(lambda m: not all(m.passed))\n if not self.separate_leaves:\n return failing_sets.select(lambda m: m.leaf_num).distinct().to_list()\n else:\n return (\n failing_sets.select_many(\n lambda m: [\n m.full_leaf_nums[idx]\n for idx, passed in enumerate(m.passed)\n if not passed\n ]\n )\n .distinct()\n .to_list()\n )", "def test_twoing(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([2]), set([0, 1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:1, 1:1, 2:0})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.48)", "def check_db_matches():\n FIRST_RUN = False\n #ALL_FILE = \"all_queries_big\"\n #DB_FILE = \"all_dbs_big\"\n ALL_FILE = \"all_queries\"\n DB_FILE = \"all_dbs\"\n START_FROM = \"number\"\n ALL_NUM = \"all_num_from_new\"\n ALL_NUM = \"all_num_from_4_5_full_17\"\n\n ALL_FIXED_q = \"all_fixed_queries\" + str(17)\n ALL_FIXED_dbs = \"all_fixed_dbs\" + str(17)\n biggest = 20\n max_db_size = 20\n all_queries = {}\n db = [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}]\n found = [0] * biggest\n ret_val = []\n if FIRST_RUN:\n #raw_input(\"are you sure you want to rewrite the db?!\")\n storage_main = FileStorage(INDEX_DIR_CODE)\n ix_main = storage_main.open_index()\n try:\n \"\"\"\n with open(START_FROM, \"rb\") as file_h:\n (curr_db, count, db_sizes) = pickle.load(file_h)\n with open(ALL_FIXED_q, \"rb\") as file_h:\n all_queries = pickle.load(file_h)\n with open(ALL_FIXED_dbs, \"rb\") as file_h:\n db = pickle.load(file_h)\n print len(all_queries.keys())\n print \"Real size\", [len(e.keys()) for e in db]\n print \"left\", db_sizes\n print curr_db, count\n \"\"\"\n with open(START_FROM, \"rb\") as file_h:\n (curr_db, count, db_sizes) = pickle.load(file_h)\n print \"read\", curr_db, count\n with open(ALL_FILE+str(curr_db - 1), \"rb\") as file_h:\n all_queries = pickle.load(file_h)\n with open(DB_FILE+str(curr_db - 1), \"rb\") as file_h:\n db = pickle.load(file_h)\n print \"Real size\", [len(e.keys()) for e in db]\n except:\n curr_db = 0\n count = 0\n db_sizes = [2 ** i for i in range(1, biggest + 1)]\n new_count = 0\n print \"start reading posts\"\n q_db = POSTS_DB.find({}, timeout=False)\n print \"done reading posts\"\n print \"start with\", curr_db\n for question in q_db:\n if curr_db == max_db_size:\n print \"break\"\n break\n new_count += 1\n if new_count < count:\n continue\n if db_sizes[curr_db] % 1000 == 0:\n print \"BUILD:\", curr_db, \"I'm Alive, more\", db_sizes[curr_db], \"togo!\"\n snips = get_possible_snippets(question['Id'])\n if snips is None or len(snips) == 0:\n continue\n (db[curr_db])[question['Id']] = snips[0]\n db_sizes = db_sizes[:curr_db] + [e-1 for e in db_sizes[curr_db:]]\n if db_sizes[curr_db] == 0:\n t = time.time()\n print \"find matches for\", curr_db, \"size is\", len(db[curr_db].keys())\n for place, key in enumerate(db[curr_db].keys()):\n if place % 1000 == 0:\n print \"FIND: I'm Alive\", place\n code = db[curr_db][key][0]\n res_dict, tokens, q_scores = fast_from_code_to_question(code, ix_main)\n if all_queries.get(key, None) is None:\n all_queries[key] = (tokens, res_dict)\n curr_db += 1\n try:\n print \"saved\", time.time() - t\n with open(ALL_FILE + str(curr_db), \"wb\") as file_h:\n pickle.dump(all_queries, file_h)\n with open(DB_FILE + str(curr_db), \"wb\") as file_h:\n pickle.dump(db, file_h)\n with open(START_FROM, \"wb\") as file_h:\n pickle.dump((curr_db, new_count, db_sizes), file_h)\n except:\n print \"to much to write\"\n print \"start\", 2 ** (curr_db + 1)\n q_db.close()\n num = 0\n else:\n print \"reading files..\"\n t = time.time()\n \"\"\"with open(ALL_FILE+str(max_db_size), \"rb\") as file_h:\n all_queries = pickle.load(file_h)\n with open(DB_FILE+str(max_db_size), \"rb\") as file_h:\n db = pickle.load(file_h)\"\"\"\n with open(ALL_FIXED_q, \"rb\") as file_h:\n all_queries = pickle.load(file_h)\n with open(ALL_FIXED_dbs, \"rb\") as file_h:\n db = pickle.load(file_h)\n print \"done reading\", time.time() - t\n print [len(e.keys()) for e in db]\n\n try:\n with open(ALL_NUM, \"rb\") as file_h:\n num, found = pickle.load(file_h)\n print \"read\", num, found\n except:\n num = 0\n\n curr_num = 0\n print num, len(all_queries.keys())\n for query in all_queries.keys():\n curr_num += 1\n if curr_num < num:\n continue\n if curr_num % 1000 == 0:\n print \"MATCHES: I'M Alive!\", curr_num, query\n\n matches = get_matches(query, all_queries[query])\n flag_f = False\n for match in matches:\n if flag_f:\n break\n for i in range(len(db)):\n if match in db[i].keys() and query in db[i].keys():\n found[i] += 1\n flag_f = True\n break\n\n if curr_num - 1 > num:\n with open(ALL_NUM, \"wb\") as file_h:\n pickle.dump((curr_num, found), file_h)\n print found\n \"\"\"\n #saved in _n\n small_db = [0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 8] # 3/5\n small_db = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 4] # 4/5\n for i, val in enumerate(small_db):\n try:\n found[i] += val\n except:\n print \"shorter db\"\n\n print found\"\"\"\n for i in range(len(found) - 1):\n found[i + 1] += found[i]\n print(found)\n for place, i in enumerate([2 ** i for i in range(1, max_db_size + 1)]):\n ret_val.append(float(found[place])/i * 100)\n print ret_val", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def test_tree_2_nodes_left_unbalanced(one_t):\n one_t.insert(9)\n assert one_t.balance() == 1", "def check_db(db, nb_exps, nb_algos, nb_trials, nb_benchmarks, nb_child_exps=0):\n experiments = db.read(\"experiments\")\n assert len(experiments) == nb_exps\n assert len(db.read(\"algo\")) == nb_algos\n assert len(db.read(\"trials\")) == nb_trials\n assert len(db.read(\"benchmarks\")) == nb_benchmarks\n\n # Check we have expected number of child experiments.\n exp_map = {exp[\"_id\"]: exp for exp in experiments}\n assert len(exp_map) == nb_exps\n child_exps = []\n for exp in experiments:\n parent = exp[\"refers\"][\"parent_id\"]\n if parent is not None:\n assert parent in exp_map\n child_exps.append(exp)\n assert len(child_exps) == nb_child_exps", "def split_at(self, i):\n\n if i in self.forced:\n inconsistent_generation()\n\n assert not self.is_exhausted\n\n key = self.values[i]\n\n child = TreeNode(\n bit_lengths=self.bit_lengths[i + 1 :],\n values=self.values[i + 1 :],\n transition=self.transition,\n )\n self.transition = Branch(bit_length=self.bit_lengths[i], children={key: child})\n if self.__forced is not None:\n child.__forced = {j - i - 1 for j in self.__forced if j > i}\n self.__forced = {j for j in self.__forced if j < i}\n child.check_exhausted()\n del self.values[i:]\n del self.bit_lengths[i:]\n assert len(self.values) == len(self.bit_lengths) == i", "def detect_splits(self):\n logg.info(' abstracted graph will have {} nodes'.format(self.n_splits+1))\n indices_all = np.arange(self.X.shape[0], dtype=int)\n segs = [indices_all]\n if False: # this is safe, but not compatible with on-the-fly computation\n tips_all = np.array(np.unravel_index(np.argmax(self.Dchosen), self.Dchosen.shape))\n else:\n if self.iroot is not None:\n tip_0 = np.argmax(self.Dchosen[self.iroot])\n else:\n tip_0 = np.argmax(self.Dchosen[0]) # just a random index, here fixed to \"0\"\n tips_all = np.array([tip_0, np.argmax(self.Dchosen[tip_0])])\n # we keep a list of the tips of each segment\n segs_tips = [tips_all]\n if self.clusters_precomputed_names:\n self.segs_names_original = [', '.join(self.clusters_precomputed_names)]\n segs_undecided = [True]\n segs_adjacency = [[]]\n segs_distances = np.zeros((1, 1))\n segs_adjacency_nodes = [{}]\n # logg.info(' do not consider groups with less than {} points for splitting'\n # .format(self.min_group_size))\n for ibranch in range(self.n_splits):\n if self.clusters == 'unconstrained_segments':\n iseg, new_tips = self.select_segment(segs, segs_tips, segs_undecided)\n if iseg == -1:\n logg.info('... partitioning converged')\n break\n logg.info('... branching {}:'.format(ibranch + 1),\n 'split group', iseg)\n segs_distances = self.do_split(segs, segs_tips,\n segs_undecided,\n segs_adjacency,\n segs_distances,\n iseg, new_tips)\n else:\n logg.msg(' split', ibranch + 1, v=4)\n stop, segs_distances = self.do_split_constrained(segs, segs_tips,\n segs_adjacency,\n segs_adjacency_nodes,\n segs_distances)\n if stop: break\n\n # segments\n self.segs = segs\n self.segs_tips = segs_tips\n self.segs_sizes = []\n for iseg, seg in enumerate(self.segs): self.segs_sizes.append(len(seg))\n\n # the full, unscaled adjacency matrix\n self.segs_adjacency_full_attachedness = 1/segs_distances\n # if self.attachedness_measure == 'connectedness':\n # norm = np.sqrt(np.multiply.outer(self.segs_sizes, self.segs_sizes))\n # self.segs_adjacency_full_attachedness /= norm\n self.segs_adjacency_full_confidence, self.segs_adjacency_tree_confidence \\\n = self.compute_adjacency_confidence(\n self.segs_adjacency_full_attachedness,\n segs_adjacency,\n self.tree_based_confidence)\n np.fill_diagonal(self.segs_adjacency_full_attachedness, 0)", "def test_twoing(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)", "def decision_tree_prediction(example, root, attributes):\n # If reached a leaf node, return the label\n if isinstance(root, str):\n return root\n\n # Attribute that was split on\n attribute = root.attribute\n # Column of the attribute that was split on\n i = get_index(attribute, attributes)\n testValue = example[i]\n # Check every child to see what path the example must take in the decision tree\n for child in root.children:\n if isinstance(child.branch, int):\n if int(testValue) <= child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n elif isinstance(child.branch, float):\n if int(testValue) > child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n# -----------------------------------------------Naive Bayes-------------------------------------------------\n # Naive bayes\n elif child.branch == \"Naive\":\n yes_probability = child.histogram[0]\n no_probability = child.histogram[2]\n i = 0\n for feature in example:\n if feature == \"yes\" or feature == \"no\":\n continue\n if i == 0 or i == 2 or i == 4 or i == 10 or i == 11 or i == 12:\n j = 0\n # Its a float so check\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n else:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][feature]\n no_probability = no_probability * child.histogram[3][attribute_index[i]][feature]\n i += 1\n if yes_probability > no_probability:\n return \"yes\"\n elif no_probability >= yes_probability:\n return \"no\"\n# -----------------------------------------------End Naive Bayes-------------------------------------------------\n else:\n if child.branch == testValue:\n return decision_tree_prediction(example, child.nextTree, attributes)", "def n_trees(self):\n return len(self.data_kd)", "def test_tree_2_nodes_right_unbalanced(one_t):\n one_t.insert(11)\n assert one_t.balance() == -1", "def checkTree(self, root):\n\t\tif len(self.graph.nodes()) > 100: # TODO why this arbitrary limit on the size of the tree?\n\t\t\tNJTree.logger.debug(\"Too big graph detected and arbitrarly split\")\n\t\t\tself.OK = \"false\"\n\t\t\treturn self.OK\n\t\t# check species of root node neighbors\n\t\tedge = root[1]\n# \t\tloss = root[2]\n\t\tset_species = set([])\n\t\tspecies = []\n# \t\tedge_weight = self.graph[edge[0]][edge[1]]['homology_weight']\n\t\t# TODO what is the point of having twice the results?\n\t\tset_species.add(self.rootedTree.node[edge[0]]['species'])\n\t\tset_species.add(self.rootedTree.node[edge[1]]['species'])\n\t\tspecies.append(self.rootedTree.node[edge[0]]['species'])\n\t\tspecies.append(self.rootedTree.node[edge[1]]['species'])\n\t\tif len(species) == 1 or len(set_species) == 1 and (self.mrca not in set_species):\n\t\t\t# all leaf nodes get added to orphan pile\n\t\t\t# NJTree.logger.debug(\"Orphan in checkTree still happens\")\n\t\t\tself.OK = \"orphan\"\n\t\t\treturn self.OK\n\t\tif len(species) == 2:\n\t\t\tif self.mrca in species:\n\t\t\t\tself.OK = \"false\"\n\t\t\t\treturn self.OK\n\t\t\telse:\n\t\t\t\tself.OK = \"true\"\n\t\t\t\treturn self.OK\n\t\t# should add a verification that there are no more than 2 species", "def depth_breadth_eval_function(individual, test_data, truth_data, name=None):\r\n # Compute the string representation of the individual\r\n string_rep = str(individual)\r\n # Generate array for appearances of ARG0\r\n data_appearances = [match.start() for match in re.finditer('ARG0', string_rep)]\r\n # Computed by counting occurrences of GTMOEPDataPair per primitive\r\n # Let's make this dynamic in the future by querying all functions\r\n max_breadth = 3.0\r\n # Tradeoff formula\r\n return individual.height - len(data_appearances)/(max_breadth**individual.height + 1.0)", "def test_balance_known_tree_2(known_bst):\n known_bst[0].insert_non_balance(12)\n assert known_bst[0].balance() == known_bst[2] - 1", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost", "def split(root, Dk, maxDepth, minRows, currDepth):\n \n left, right = root['branches']\n del(root['branches'])\n \n# if not left and not right:\n# return\n \n # Check if the node is a leaf\n if not len(left): \n root['left'] = root['right'] = getLeafClass(right)\n return\n elif not len(right):\n root['left'] = root['right'] = getLeafClass(left)\n return\n \n # Check for max depth\n if(currDepth >= maxDepth):\n root['left'], root['right'] = getLeafClass(left), getLeafClass(right)\n return\n \n # Process left branch\n if(len(left) <= minRows):\n root['left'] = getLeafClass(left)\n else:\n root['left'] = findNextSplit(left, Dk)\n split(root['left'], Dk, maxDepth, minRows, currDepth + 1)\n \n # Process right branch\n if(len(right) <= minRows):\n root['right'] = getLeafClass(right)\n else:\n root['right'] = findNextSplit(right, Dk)\n split(root['right'], Dk, maxDepth, minRows, currDepth + 1)", "def height_helper(self, node: object) -> int:\n #current node == a leaf\n if self.is_leaf(node):\n return 0\n\n #current node == a single child\n if node.left is not None and node.right is None:\n return 1 + self.height_helper(node.left)\n if node.left is None and node.right is not None:\n return 1 + self.height_helper(node.right)\n\n #node ==2 child leaf\n if self.height_helper(node.left) > self.height_helper(node.right):\n return 1 + self.height_helper(node.left)\n else:\n return 1 + self.height_helper(node.right)", "def test_balance_left_tree(bst_all_to_left):\n assert bst_all_to_left.balance() == -1", "def classify(observations, tree, dataMissing=False):\n\n def classifyWithoutMissingData(observations, tree):\n if tree.results != None: # leaf\n return tree.results\n else:\n v = observations[tree.col]\n branch = None\n #if isinstance(v, int) or isinstance(v, float):\n #if v >= tree.value: branch = tree.trueBranch\n #else: branch = tree.falseBranch\n #else:\n if v == tree.value: branch = tree.trueBranch\n else: branch = tree.falseBranch\n return classifyWithoutMissingData(observations, branch)\n\n\n def classifyWithMissingData(observations, tree):\n if tree.results != None: # leaf\n return tree.results\n else:\n v = observations[tree.col]\n if v == None:\n tr = classifyWithMissingData(observations, tree.trueBranch)\n fr = classifyWithMissingData(observations, tree.falseBranch)\n tcount = sum(tr.values())\n fcount = sum(fr.values())\n tw = float(tcount)/(tcount + fcount)\n fw = float(fcount)/(tcount + fcount)\n result = collections.defaultdict(int) # Problem description: http://blog.ludovf.net/python-collections-defaultdict/\n for k, v in tr.items(): result[k] += v*tw\n for k, v in fr.items(): result[k] += v*fw\n return dict(result)\n else:\n branch = None\n #if isinstance(v, int) or isinstance(v, float):\n # if v >= tree.value: branch = tree.trueBranch\n # else: branch = tree.falseBranch\n #else:\n if v == tree.value: branch = tree.trueBranch\n else: branch = tree.falseBranch\n return classifyWithMissingData(observations, branch)\n\n # function body\n if dataMissing:\n return classifyWithMissingData(observations, tree)\n else:\n return classifyWithoutMissingData(observations, tree)", "def treeThreat():\n\tglobal treeThreatList\n\ttreeThreatList = []\n\tcount = 0\n\tfor name in threatList:\n\t\tif name in treeList:\n\t\t\tcount += 1\n\t\t\ttreeThreatList.append(name)\n\treturn count", "def ski(right, down):\n matrix = read_map_into_matrix()\n outer_selector = (i % down == 0 for i in range(len(matrix)))\n\n column = 1\n trees = 0\n\n for row in compress(matrix, outer_selector):\n inner_selector = (i == column - 1 for i in range(column))\n\n col = next(compress(row, inner_selector))\n\n if col == \"#\":\n trees += 1\n\n column += right\n\n return trees", "def solve_part1(input):\n\n def is_tree(cell):\n return 1 if cell == \"#\" else 0\n\n def get_cell(row, pos):\n return row[pos % len(row)]\n\n pos = 0\n trees = 0\n for row in input:\n trees += is_tree(get_cell(row, pos))\n pos += 3\n return trees", "def treeCheck(file):\n\tglobal treeList\n\ttreeList = []\n\twith open(file) as f:\n\t\tfor line in f.readlines():\n\t\t\tentry = line.split(',')\n\t\t\tif \"Tree\" in entry[4]:\n\t\t\t\ttreeList.append(entry[2])\n\treturn len(treeList)", "def search(state, cut_value, game, prune=True):\n print (game.step)\n f = open(\"search_tree_\" + str(game.step) + \".txt\", 'w')\n\n def max_value(state, alpha, beta, depth, node):\n start = time.time()\n if game.cutoff(state, depth):\n return game.evaluate(state), None\n val = -inf\n action = None\n pre_val = game.evaluate(state)\n print (\"pre \" + str(pre_val))\n for a, s in game.successors(state):\n #print (str(a))\n cur_val = game.evaluate(s)\n #print (str(a) + ':' + str(cur_val))\n node_child = Search_node(node, a, cur_val)\n node.add_child(node_child)\n if cur_val > pre_val + cut_value:\n v, _ = min_value(s, alpha, beta, depth + 1, node_child)\n f.write(\"a: \" + str(a) + \"; v: \" + str(v) + \"; depth:\" + \\\n str(depth) + \"; alpha:\" + str(alpha) + \"; beta:\" + str(beta) \\\n + \" \\n\")\n else:\n v = cur_val\n if v > val:\n val = v\n action = a\n if prune:\n if v >= beta:\n return v, a\n alpha = max(alpha, v)\n end = time.time()\n print(\"max t:\" + str(end - start))\n return val, action\n\n def min_value(state, alpha, beta, depth, node):\n if game.cutoff(state, depth):\n return game.evaluate(state), None\n val = inf\n action = None\n pre_val = game.evaluate(state)\n print (\"min pre \" + str(pre_val))\n for a, s in game.successors(state):\n cur_val = game.evaluate(s)\n node_child = Search_node(node, a, cur_val)\n node.add_child(node_child)\n if cur_val < pre_val - cut_value:\n v, _ = max_value(s, alpha, beta, depth + 1, node_child)\n # f.write(\"a: \" + str(a) + \"; v: \" + str(v) + \"; depth:\" + \\\n # str(depth) + \"; alpha:\" + str(alpha) + \"; beta:\" + str(beta) + \" \\n\")\n else:\n v = cur_val\n if v < val:\n val = v\n action = a\n if prune:\n if v <= alpha:\n return v, a\n beta = min(beta, v)\n return val, action\n\n root_node = Search_node(None, None, 0)\n\n _, action = max_value(state, -inf, inf, 0, root_node)\n root_node.print_tree()\n f.close()\n return action", "def k_fold_tree(data: pd.DataFrame, algorithm, folds: int = 5) -> (float, float):\n d = data.sample(frac=1)\n segments = np.array_split(d, folds)\n acc_test = []\n acc_train = []\n for i in range(folds):\n temp = segments.copy()\n test = temp.pop(i)\n\n test_labels = list(test['Labels'])\n\n train = pd.concat(temp)\n\n train_labels = list(train['Labels'])\n\n model = algorithm(train)\n\n test_predictions = predict_data(test, model)\n train_predictions = predict_data(train, model)\n\n acc_test.append(accuracy(test_predictions, test_labels))\n acc_train.append(accuracy(train_predictions, train_labels))\n\n return avg(acc_train), avg(acc_test)", "def analyze_path(dbase: database.WikiDatabase) -> Tuple[List[int], List[int], List[List[Tuple[int, ...]]]]:\r\n\tarticle1 = dbase.get_random_page()\r\n\tarticle2 = dbase.get_random_page()\r\n\tpaths = pathfinder.bidirectional_BFS(dbase, article1, article2)\r\n\tpaths_reversed = pathfinder.bidirectional_BFS(dbase, article2, article1)\r\n\treturn [article1, article2], [article2, article1], [paths, paths_reversed]", "def __traverse_tree(self, node, sample_instance):\n if node.is_leaf:\n return node.predicted_class\n split = node.integer_splitting_rule\n feature = node.feature_index_split\n\n # left node gets assigned to data that is less than the integer\n # splitting rule within that feature\n if sample_instance[feature] < split:\n prediction = self.__traverse_tree(node.left_child,\n sample_instance)\n else:\n prediction = self.__traverse_tree(node.right_child,\n sample_instance)\n return prediction", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()", "def test_split_adds_children(mock_amg):\n\n mock_amg.cells[0].split()\n assert mock_amg.cells[0].children['bl'] is mock_amg.cells[-4]\n assert mock_amg.cells[0].children['br'] is mock_amg.cells[-3]\n assert mock_amg.cells[0].children['tl'] is mock_amg.cells[-2]\n assert mock_amg.cells[0].children['tr'] is mock_amg.cells[-1]", "def calcGainRatioSplitByColumn(self, data, structure, colIName):\n splitInfo, colIndex = 0, structure[colIName]['index']\n for value in structure[colIName]['values']:\n newData = list(filter(lambda x: x[colIndex] == value, data))\n p = len(newData) / len(data) if len(newData) != 0 else 1\n splitInfo += (-1) * p * log2(p)\n splitInfo = 1 if splitInfo == 0 else splitInfo\n return round(self.calcInfoGainByColumnSplit(data, structure, colIName) / splitInfo, 3)", "def test_contains_returns_true_on_tree_with_value_right(bst_right_balance):\n assert bst_right_balance.contains(6) is True\n assert bst_right_balance.contains(2) is True", "def test_balanced_tree_with_3_nodes_childens_values(balanced_3_nodes):\n assert balanced_3_nodes.root.right.value == 15\n assert balanced_3_nodes.root.left.value == 5", "def implement_SQLtree(X, y, labels, k, alpha_range, criterion, d, cur):\r\n\r\n kf = KFold(n_splits=k)\r\n min_avg_error = math.inf\r\n best_alpha = None\r\n avg_time_build_tree = 0\r\n for alpha in alpha_range:\r\n total_error = 0\r\n for train_idx, test_idx in kf.split(X):\r\n X_train, X_test = X[train_idx], X[test_idx]\r\n y_train, y_test = y[train_idx], y[test_idx]\r\n SQL_train = SQLTree(criterion, X_train, y_train, labels, \"articles\", cur)\r\n SQL_test = SQLTree(criterion, X_test, y_test, labels, \"articles\", cur)\r\n start = time.time()\r\n SQL_train_tree = SQL_train.build_tree(SQL_train.root, 0, d, SQL_train.labels, None, \"\")\r\n test_all(SQL_train)\r\n end = time.time()\r\n SQL_train.is_valid(SQL_train_tree)\r\n period = end - start\r\n pruned_SQLtree, pruned_alpha, error = SQL_train.prune_tree(SQL_train_tree, alpha, SQL_test.X, SQL_test.y,\r\n SQL_test.labels)\r\n total_error += error\r\n avg_time_build_tree += period\r\n avg_error = total_error / k\r\n if avg_error < min_avg_error:\r\n min_avg_error = avg_error\r\n best_alpha = alpha\r\n avg_time_build_tree = avg_time_build_tree / (k * len(alpha_range))\r\n\r\n return min_avg_error, best_alpha, avg_time_build_tree", "def is_height_balanced(self):\n\n return self.is_height_balanced_helper(self.root)[0]", "def findRightContext(tree,start,ignore):\n nrOfClosingBrs = 0\n nrOfOpeningBrs = 0\n firstPass = True\n if start+1 < len(tree):\n for currentIndex in range(start+1,len(tree)):\n if tree[currentIndex].symbol in ignore:\n continue\n elif tree[currentIndex].symbol == \"]\":\n if firstPass:\n return(emptyModule())\n else:\n nrOfClosingBrs = nrOfClosingBrs + 1\n elif tree[currentIndex].symbol == \"[\":\n nrOfOpeningBrs = nrOfClosingBrs + 1\n elif nrOfOpeningBrs == nrOfClosingBrs:\n return tree[currentIndex]\n firstPass = False\n else:\n return(emptyModule())", "def test_balance_known_tree(known_bst):\n assert known_bst[0].balance() == 0", "def test_contains_returns_true_on_tree_with_value(bst_balanced):\n assert bst_balanced.contains(6) is True\n assert bst_balanced.contains(3) is True", "def test_value_not_in_tree_returns_false(balanced_7_nodes):\n assert not balanced_7_nodes.contains(4)", "def test_evaluate_hierarchical(backend):\n # skip test for dask backend if dask is not installed\n if backend == \"dask\" and not _check_soft_dependencies(\"dask\", severity=\"none\"):\n return None\n\n y = _make_hierarchical(\n random_state=0, hierarchy_levels=(2, 2), min_timepoints=12, max_timepoints=12\n )\n X = _make_hierarchical(\n random_state=42, hierarchy_levels=(2, 2), min_timepoints=12, max_timepoints=12\n )\n y = y.sort_index()\n X = X.sort_index()\n\n forecaster = DirectReductionForecaster(LinearRegression())\n cv = SlidingWindowSplitter()\n scoring = MeanAbsolutePercentageError(symmetric=True)\n out_exog = evaluate(\n forecaster, cv, y, X=X, scoring=scoring, error_score=\"raise\", backend=backend\n )\n out_no_exog = evaluate(\n forecaster, cv, y, X=None, scoring=scoring, error_score=\"raise\", backend=backend\n )\n\n scoring_name = f\"test_{scoring.name}\"\n assert np.all(out_exog[scoring_name] != out_no_exog[scoring_name])", "def dfs(node, i, lo=0, hi=n):\n if not node: return \n mid = lo + hi >> 1\n ans[i][mid] = str(node.val)\n dfs(node.left, i+1, lo, mid) or dfs(node.right, i+1, mid+1, hi)", "def deepest(self,\r\n entrylist=None,\r\n is_string=False,\r\n abridged=False,\r\n always=False):\r\n\r\n if not always:\r\n\r\n if abridged and self.abr_maxdepth_found>0:\r\n return self.abr_maxdepth_found\r\n if not abridged and self.maxdepth_found>0:\r\n return self.maxdepth_found\r\n\r\n\r\n if entrylist is None:\r\n entrylist = self.default_dict['indexlist_indexes'].list\r\n maxdepth = 1\r\n\r\n for i_temp in entrylist:\r\n if not is_string:\r\n if i_temp.level() > maxdepth:\r\n maxdepth = i_temp.level()\r\n else:\r\n if abridged:\r\n if len(index_reduce(str(i_temp))) > maxdepth:\r\n maxdepth = len(index_reduce(str(i_temp)))\r\n else:\r\n if len(str(i_temp)) > maxdepth:\r\n maxdepth = len(str(i_temp))\r\n if not abridged:\r\n self.maxdepth_found = maxdepth\r\n if abridged:\r\n self.abr_maxdepth_found = maxdepth\r\n\r\n return maxdepth", "def impurity_reduction(self, xj, S):\r\n # Determine number of rows in left and right children and calculate respective impurities for parent, \r\n # left, and right \r\n if len(self.path) == 0:\r\n\r\n self.cur.execute(\"SELECT COUNT(*) FROM \" + self.table_name + \" WHERE \" + xj + \" <= \" + str(S) + \";\")\r\n n_left = self.cur.fetchone()[0]\r\n\r\n self.cur.execute(\"SELECT COUNT(*) FROM \" + self.table_name + \" WHERE \" + xj + \" > \" + str(S) + \";\")\r\n n_right = self.cur.fetchone()[0]\r\n\r\n\r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \";\")\r\n I_A = float(self.cur.fetchone()[0])\r\n\r\n if n_left == 0 or n_right == 0:\r\n return 0\r\n else: \r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + xj + \" <= \" + str(S) + \";\")\r\n I_L = float(self.cur.fetchone()[0])\r\n\r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + xj + \" > \" + str(S) + \";\")\r\n I_R = float(self.cur.fetchone()[0])\r\n\r\n\r\n else:\r\n\r\n self.cur.execute(\"SELECT COUNT(*) FROM \" + self.table_name + \" WHERE \" + xj + \" <= \" + str(S) + \" AND \" + \" AND \".join(self.path) + \";\")\r\n n_left = self.cur.fetchone()[0]\r\n\r\n self.cur.execute(\"SELECT COUNT(*) FROM \" + self.table_name + \" WHERE \" + xj + \" > \" + str(S) + \" AND \" + \" AND \".join(self.path) + \";\")\r\n n_right = self.cur.fetchone()[0]\r\n \r\n if n_left == 0 or n_right == 0:\r\n return 0\r\n \r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + \" AND \".join(self.path) + \";\")\r\n I_A = float(self.cur.fetchone()[0])\r\n\r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + \" AND \".join(self.path) + \" AND \" + xj + \" <= \" + str(S) + \";\")\r\n I_L = float(self.cur.fetchone()[0])\r\n\r\n self.cur.execute(\"SELECT \" + self.criterion + \"(AVG(\" + self.y_name + \")) FROM \" + self.table_name + \" WHERE \" + \" AND \".join(self.path) + \" AND \" + xj + \" > \" + str(S) + \";\")\r\n I_R = float(self.cur.fetchone()[0])\r\n\r\n \r\n # Calculate change in impurity\r\n frac_left = n_left / (n_left + n_right)\r\n frac_right = n_right / (n_left + n_right)\r\n\r\n change_impurity = I_A - frac_left*I_L - frac_right*I_R\r\n \r\n return change_impurity", "def analyze_trees(fname, only_mb=False, slow_mb=False):\n cols = get_col_names(fname)\n tnums = get_tree_nums(fname)\n df = read_data(fname, cols)\n df['tree'] = make_tree_col(df, tnums)\n if slow_mb:\n df['TotalMass_mmp'] = False\n if HAVE_PBAR:\n tnums = tqdm(tnums, desc='Main Branches')\n for tn in tnums:\n mmps = main_trees(df.loc[df.tree == tn])\n mmps = np.isin(df.loc[df.tree == tn].index, mmps)\n df.loc[df.tree == tn, 'TotalMass_mmp'] = mmps\n else:\n df['TotalMass_mmp'] = main_trees_quick(df)\n\n df = verify_main_branches(df)\n if only_mb:\n return df.loc[df.TotalMass_mmp == 1]\n return df", "def count_binary_search_tree(self, test_cases):\n number_of_bst = []\n for i in test_cases:\n fact1 = 1\n for j in range(1, (i * 2) + 1):\n fact1 = fact1 * j\n\n fact2 = 1\n num = i + 1\n for l in range(1, num + 1):\n fact2 = fact2 * l\n\n nfact = 1\n\n for k in range(1, i + 1):\n nfact = nfact * k\n\n number_of_bst.append((fact1 // (fact2 * nfact)) % 100000007)\n return number_of_bst", "def _estimate_subtree(node: Tree.Node, data_matrix, idx, result_matrix):\n\n if node.is_leaf:\n result_matrix[idx,:] = node.item.model.estimate(data_matrix[idx,:])\n \n else:\n branches = node.item.split.pick_branches(data_matrix[idx,:])\n for b in np.unique(branches):\n _estimate_subtree(node[b], data_matrix, idx[branches==b], result_matrix)\n\n return result_matrix" ]
[ "0.55779785", "0.5408219", "0.5375125", "0.52919394", "0.5285259", "0.5265397", "0.523007", "0.5206214", "0.5131763", "0.5119118", "0.5102969", "0.50822806", "0.50254524", "0.50055486", "0.5000578", "0.498862", "0.49706918", "0.4968559", "0.49658915", "0.49245366", "0.48860818", "0.48827252", "0.4872405", "0.48620558", "0.48172337", "0.4815391", "0.4813557", "0.4813403", "0.4786351", "0.47734585", "0.4772007", "0.4767227", "0.47562397", "0.47282362", "0.47259483", "0.47232804", "0.4720387", "0.4718871", "0.47165138", "0.4715505", "0.47121015", "0.46947402", "0.46858677", "0.46837765", "0.46827394", "0.46827394", "0.46800506", "0.46729955", "0.46674237", "0.46561816", "0.46433657", "0.46389458", "0.46340764", "0.46326303", "0.4631233", "0.46205598", "0.46017432", "0.46010098", "0.46005046", "0.45865938", "0.4584149", "0.458268", "0.45785174", "0.45779487", "0.45772204", "0.45762825", "0.45737907", "0.4568489", "0.45592445", "0.4546605", "0.45371273", "0.4534256", "0.45270786", "0.45265484", "0.45202768", "0.45106015", "0.45047262", "0.4503886", "0.45030046", "0.4501645", "0.44969562", "0.4495858", "0.44956055", "0.44955218", "0.44944155", "0.4488448", "0.448666", "0.44835302", "0.4478066", "0.44704393", "0.44660747", "0.4454073", "0.4448903", "0.44474024", "0.44456798", "0.44443837", "0.44401908", "0.4438405", "0.44253016", "0.4424928" ]
0.5963019
0
Identifies the list of example indices that would follow the decision tree to node.
def identify_examples(self,db,labels,node): path = [] while node.parent != None: nkey = None for (k,c) in node.parent().children.iteritems(): if c is node: nkey = k break assert nkey != None path.append((node.parent(),nkey)) node = node.parent() path = path[::-1] nids = len(labels) ids = [] for id in xrange(nids): valid = True for n,ckey in path: f = n.feature val = featureMatrix[f,id] if val is None: #it's a None value, just continue on continue else: key = None if n.type == 'i': key = (0 if val <= n.value else 1) else: key = val if key != ckey: valid = False break if valid: ids.append(id) return ids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def target_nodes_indexes(self) -> _TargetNodes:\n return self.__target_nodes_indexes", "def reference_nodes_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_idx_references", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def labeled_indices(self):\n return self._labeled_indices", "def getLandmarkindices(self):\n return self.subsetnodes_indices", "def inspectedIndex(self):\n if self.inspectedNodeIsVisible:\n return self.createIndex(0, 0, self._inspected_item)\n else:\n return self.rootIndex()", "def ligand_idxs(self):\n return self._ligand_idxs", "def get_indications(self):\n indications = np.zeros_like(self.predictions)\n for i in range(self.predictions.shape[0]):\n ind = np.where(self.predictions[i, :] - self.labels != 0.0)[0]\n indications[i, ind] = 1.0\n\n return indications", "def agent_locs_idx(self):\n return tuple(self.agent_locs.T)", "def _dofidxs(self):\n return [const['dofidxs'] for i, const in self._constraints_df.iterrows()]", "def output_node_ids(self):\n return [\n i\n for i in range(\n self.n_inputs + self.n_hidden,\n self.n_inputs + self.n_hidden + self.n_outputs,\n )\n ]", "def input_node_ids(self):\n return [i for i in range(self.n_inputs)]", "def _get_child_indices(self, current_index: int) -> List[int]:\n multiplier = current_index * 2\n left_index = multiplier + 1\n right_index = multiplier + 2\n\n return [left_index, right_index]", "def get_child_indices(idx: int):\n return 2 * idx + 1, 2 * idx + 2", "def _get_indexes(self, participants):\n tr_idx = int(np.floor(self.tr_size*len(participants)))\n j = self.val_size + self.tr_size\n val_idx = int(np.floor(j*len(participants)))\n return tr_idx, val_idx", "def tree_idx(tree,j1,J1,J2):\n j = j1\n for k in np.arange(J1+1,J2+1,1):\n j = tree[k]['IDX'][j]\n \n j2 = j\n return j2", "def refractive_index(self):\n wd = np.arange(80,820,10)\n nd = self.boundary.imat.refractive_index(wd) \n\n plt.plot(wd, nd)\n\n return wd, nd", "def index(self):\n return self._ll_tree.get_index()", "def _tree_field_indices(self):\n\n if self._tfi is not None:\n return self._tfi\n\n self.arbor._grow_tree(self)\n self._tfi = np.array([node.tree_id for node in self._tree_nodes])\n return self._tfi", "def get_relevant_indices(dataset, classes, target_classes):\n indices = []\n for i in range(len(dataset)):\n # Check if the label is in the target classes\n label_index = dataset[i][1] # ex: 3\n label_class = classes[label_index] # ex: 'cat'\n if label_class in target_classes:\n indices.append(i)\n return indices", "def indices(self):\n i, j, _edge = self.indicesAndEdge()\n return i, j", "def py_enumerate_list_index_target():\n target = [None]\n for target[0],k in enumerate(range(1,5)):\n print(target, k)", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n index = [np.random.randint(0, len(dataset)) for _ in range(1)]\n\n return index", "def mainIndices(self):\n return self.i1, self.i2", "def hidden_node_ids(self):\n return [i for i in range(self.n_inputs, self.n_inputs + self.n_hidden)]", "def _fixed_indicies(self):\n fixed_inds = self.constraints == 'fixed'\n return fixed_inds", "def main_rep_idxs(self):\n\n if '{}/{}'.format(SETTINGS, MAIN_REP_IDXS) in self.h5:\n return self.h5['{}/{}'.format(SETTINGS, MAIN_REP_IDXS)][:]\n else:\n return None", "def get_custom_indices():\n return [i for i, val in enumerate(all_topics) if val[2] == \"1\"]", "def childWellIndices(self):\n return self._wellIndices", "def all_node_ids(self):\n return [i for i in range(0, self.n_inputs + self.n_hidden + self.n_outputs)]", "def eligible_edges_with_indexes(self):\n return enumerate(self.edges)", "def indices(self, _user=None):\n return [p.index for p in self.get_active_smallvariant_cases()]", "def arg_indices(concept, mentions, toks):\r\n indices = []\r\n for i, tok in enumerate(toks):\r\n for m in mentions:\r\n if 'start' not in m:\r\n logging.warning('%s', m)\r\n if m['id'] == concept and m['start'] <= tok['start'] and tok['end'] <= m['end']:\r\n indices.append(i)\r\n break\r\n return indices", "def get_gt_hom_idxs(alt_num):\n last = -1\n hom_idxs = []\n for a in range(alt_num + 1):\n last = last + (a + 1)\n hom_idxs.append(last)\n return hom_idxs", "def _identify_determinism_check(parents_of, descendants_of , root_indices, observed_index):\n list_extract_and_union = lambda list_of_lists, indices: set().union(\n chain.from_iterable(list_of_lists[v] for v in indices))\n parents_of_observed = set(parents_of[observed_index])\n # descendants_of_roots = [self.descendants_of[v] for v in root_indices]\n # descendants_of_roots = set().union(*descendants_of_roots)\n descendants_of_roots = list_extract_and_union(descendants_of, root_indices)\n U1s = list(root_indices)\n Y = observed_index\n Xs = list(parents_of_observed.intersection(descendants_of_roots))\n return (U1s, [Y], Xs)", "def pt_index(*args):\n index = []\n x = check_pt_data(args[0])\n i = 0\n for line in args[0].Data.PTData.pt_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def tc_index(*args):\n index = []\n x = check_tc_data(args[0])\n i = 0\n for line in args[0].Data.TCData.tc_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n indexes = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexes", "def get_index_list(self, relative_to, of_particles=None):\n\n # Implementation for base snapshot\n\n if self is not relative_to:\n raise RuntimeError(\"Not a descendant of the specified simulation\")\n if of_particles is None:\n of_particles = np.arange(len(self))\n\n return of_particles", "def node_ids(self):\n return [self.node_id]", "def get_valid_indices():\n return [i for i, val in enumerate(all_topics) if val[1] == \"1\"]", "def specified_unchanging_attributes(self) -> List[int]:\n indices = []\n\n for idx, (cpi, epi) in enumerate(zip(self.condition, self.effect)):\n if isinstance(epi, ProbabilityEnhancedAttribute):\n if cpi != self.cfg.classifier_wildcard and \\\n epi.does_contain(cpi):\n indices.append(idx)\n else:\n if cpi != self.cfg.classifier_wildcard and \\\n epi == self.cfg.classifier_wildcard:\n indices.append(idx)\n\n return indices", "def idx_adjacency_lists(self) -> List[List[int]]:\n result = []\n\n for intersection in self._intersection_list:\n nbs = []\n\n for nb in self.adj_dict[intersection]:\n nbs.append(self._intersection_to_idx[nb])\n\n result.append(nbs)\n\n return result", "def get_active_register_indices(self):\n assert self.sketch.ndim == 1, 'Currently only support 1-dimensional sketch.'\n return np.flatnonzero(self.sketch)", "def reference_nodes_graph_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_graph_idx_reference", "def index_nodes(self):\n index_nodes = []\n for node in self.nodes:\n if 'indexnode' == node.get('infos').get('type'):\n index_nodes.append(node)\n return index_nodes", "def get_ner_hallucination_idx(self, y_true, y_pred):\n true_O = (y_true==3).astype(\"int\")\n predicted_ners = (y_pred!=3).astype(\"int\")\n hallucination_filter = np.all([true_O, predicted_ners], axis=0).astype(\"int\")\n hallucination_idx = np.nonzero(hallucination_filter)[0]\n return hallucination_idx", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def get_node_indices_and_levels(nd: np.ndarray):\n indices = []\n lvs = []\n for j in range(1, nd.shape[0]):\n if j == 1:\n indices = nd[j]\n lvs = nd[j + 1]\n elif j % 2 != 0 and j > 1:\n indices = np.append(indices, nd[j])\n elif j % 2 == 0 and j > 2:\n lvs = np.append(lvs, nd[j])\n return indices, lvs", "def get_indexes(self, dataset):\n\n indexs = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexs", "def indices_of_label(self, label_name):\n return self.indices_of('label', label_name)", "def originalData(self): \n self.__exampleIndices = array(list(range(0, self.__numExamples)))", "def prob_cl_indices(self, cl_idxs, cl_losses):\n log.info(\"Finding problematic structures\")\n loss_bound = np.mean(cl_losses) # Initial minimum loss\n loss_step = loss_bound / 500\n loss_bound += loss_step\n idxs = []\n while len(idxs) < 1.5 * self.refine_n_cl:\n log.info(\"Minimum cluster loss : %.4f\", loss_bound)\n cl_idxs_prob = np.concatenate(np.argwhere(cl_losses >= loss_bound))\n clusters = np.array(cl_idxs, dtype=object)[cl_idxs_prob]\n idxs = np.concatenate(clusters)\n loss_bound -= loss_step\n log.info(\"N structures included : %d\\n\", len(idxs))\n return idxs", "def FindIdxValues(X):\n data = X.select_dtypes(include=[\"float64\"])\n idx = np.argwhere(~np.isnan(data.values))\n idx[:, 1] += 4 # add ID variable columns\n StoE = pd.read_csv(\"msresist/data/MS/CPTAC/IDtoExperiment.csv\")\n assert all(StoE.iloc[:, 0] == data.columns), \"Sample labels don't match.\"\n StoE = StoE.iloc[:, 1].values\n tmt = [[StoE[idx[ii][1] - 4]] for ii in range(idx.shape[0])]\n return np.append(idx, tmt, axis=1)", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def parent_id(neuron, selected_index):\n parent_id = np.array([], dtype=int)\n for i in selected_index:\n p = neuron.parent_index[i]\n while(~np.any(selected_index == p)):\n p = neuron.parent_index[p]\n (ind,) = np.where(selected_index == p)\n parent_id = np.append(parent_id, ind)\n return parent_id", "def run_idxs(self):\n return list(range(len(self._h5[RUNS])))", "def get_training_index():\n return list(range(0, 305))", "def isect_index(self):\n return self._lazy_isect_index()", "def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])", "def exercise_indexes():\n print(exercise_indexes.__doc__)\n print(\"The indexes of 'data' are:\", data.index)\n print(data, \"\\n\")\n print(\"Changing the indexes of 'data'\")\n print(data.reindex([2, 0, 1]), \"\\n\")\n print(\"Changing the indexes of 'data' randomly\")\n print(data.reindex(np.random.permutation(data.index)))", "def assert_stored_idxs(self):\n if type(self.idxs) == list:\n assert(type(self.idxs[0]) in [list, np.ndarray])\n if not self.staticneighs:\n assert(type(self.idxs[0][0]) in [list, np.ndarray])\n else:\n if '__len__' in dir(self.idxs[0]):\n if len(self.idxs[0]):\n assert(type(self.idxs[0][0]) in inttypes)\n else:\n assert(not any(self.idxs[0]))\n elif type(self.idxs) == np.ndarray:\n if self.staticneighs:\n assert(len(self.idxs.shape) == 2)\n else:\n assert(len(self.idxs.shape) == 3)\n# if self.ks is not None and not self.staticneighs:\n# assert(len(self.idxs) == len(self.ks))\n# else:\n# assert(len(self.idxs.shape) == 2)\n if self.staticneighs:\n assert(len(self.idxs) == len(self.iss))\n else:\n assert(len(self.idxs[0]) == len(self.iss))\n elif type(self.idxs) == slice:\n pass\n else:\n ### Probably redundant (Only testing purposes)\n# print type(self.idxs), self.idxs\n types = str(type(self.idxs))\n raise Exception(\"Not proper type in self.idxs. Type: %s.\" % types)", "def all_sampled_nodes_indexes(self) -> torch.LongTensor:\n all_sampled_nodes_indexes: _typing.Any = self.__all_sampled_nodes_indexes\n return all_sampled_nodes_indexes", "def test_label_indices_sample():\n expected = {0, 1, 2, 3}\n actual = util.get_label_indices(4, \"sample\")\n assert expected.intersection(set(actual))", "def alt_reps_idxs(self):\n\n idxs_grp = self.h5['{}/{}'.format(SETTINGS, ALT_REPS_IDXS)]\n return {name : ds[:] for name, ds in idxs_grp.items()}", "def indices(self):\n return self.index.indices", "def create_tip_index(tree):\r\n if hasattr(tree, '_tip_index'):\r\n return\r\n else:\r\n tree._tip_index = {n.Name: n for n in tree.tips()}", "def getLandmarkindices(self):\n return self.subsetindices", "def SectionIndicesConnectedToSoma(self):\n indices = []\n index = 0\n for each_section in self._section_list:\n if each_section.ParentId() == -1:\n indices.append(index)\n index += 1\n return indices", "def annihilation_list(self,other):\n if not isinstance(other,SlaterDeterminant):\n raise TypeError(\"Parameter other must be a SlaterDeterminant instance.\")\n diff = np.array(other) - np.array(self)\n if np.sum(np.abs(diff)) == 0: \n indices = []\n else:\n indices = np.where(diff==-1)[0].tolist()\n return indices", "def get_img_indices():\n if K.image_dim_ordering() == 'th':\n return 0, 1, 2, 3\n else:\n return 0, 3, 1, 2", "def _get_indices_1(image_set, num_labels=2, num_protected=2):\r\n indices = [[[] for _ in range(num_protected)] for _ in range(num_labels)]\r\n for _, label, cluster, index in image_set:\r\n indices[label][cluster].append(index)\r\n\r\n return indices", "def geneIds(self):\n\t\treturn self._dataframe.index.tolist()", "def get_list_index(self):\r\n return self.n", "def narration_target(self):", "def test_label_indices_all():\n expected = [0, 1, 2]\n actual = util.get_label_indices(3, \"all\")\n assert expected == actual", "def sentences_2_idxs(self):\n fo_pos = open(self.config.parsed_train_file_pos, 'w')\n fo_neg = open(self.config.parsed_train_file_neg, 'w')\n self.load_dicts()\n labels = pd.read_csv(self.config.train_file, usecols=[\"target\"])\n\n labels = list(labels.values[:, 0])\n questions = pd.read_csv(self.config.train_file,\n usecols=[\"question_text\"], index_col=False)\n unk_idx = self.word2idx.get(self.config.unknown_token)\n\n for label, quest in zip(labels, questions.question_text):\n tokens = preprocess_text(quest)\n\n if self.config.include_unknown:\n idxs = [self.word2idx.get(token, unk_idx) for token in\n tokens]\n else:\n idxs = [self.word2idx.get(token) for token in tokens]\n idxs = [idx for idx in idxs if idx]\n out_line = (str(\" \".join(str(num) for num in idxs)) + \"\\n\")\n if label == 1:\n fo_pos.write(out_line)\n else:\n fo_neg.write(out_line)", "def get_indices(self):\r\n return self._indices", "def get_main_points(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(branch_index + neuron.n_soma,\n endpoint_index + neuron.n_soma)\n selected_index = np.append(range(neuron.n_soma), selected_index)\n return selected_index", "def receptor_idxs(self):\n\n return self._receptor_idxs", "def get_index_from_well(self, well):\n pass", "def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]", "def _iter_indices(self, frame, y):\n pass", "def index(self) -> int:", "def get_main_branch_indices(self):\n\n assert self.halt is not None\n prog_main_index = self.halt_index\n prog_main_indices = self.halt.prop(\n 'progenitor.main.indices', self.halt_index)\n self.main_branch_indices = prog_main_indices\n return prog_main_indices", "def eligible_edges_with_indexes(self):\n return list(map(lambda e: (self.edges.index(e), e), self.eligible_edges))", "def print_ids(self):\n ids = [self.data[x][DATA_ID_INDEX] for x in self.index_list]\n print(ids)", "def _code_indices(self) -> Tuple[int, ...]:\n return tuple(idx for idx, seg in enumerate(self.segments) if seg.is_code)", "def get_indices(self):\n selection_model = self.selectionModel()\n return selection_model.selectedRows()", "def indices(self):\n return range(len(self))", "def _create_img_id_to_idx(self):\n with h5py.File(self.image_features_path, 'r') as features_file:\n coco_ids = features_file['ids'][()]\n coco_id_to_index = {id: i for i, id in enumerate(coco_ids)}\n return coco_id_to_index", "def get_pulling_indices(self, weight):\n pass", "def get_unlabeled_idx(X_train, labeled_idx):\n return np.arange(X_train.shape[0])[np.logical_not(np.in1d(np.arange(X_train.shape[0]), labeled_idx))]", "def extract_test_nodes(data, num_samples, seed):\n np.random.seed(seed)\n test_indices = data.test_mask.cpu().numpy().nonzero()[0]\n node_indices = np.random.choice(test_indices, num_samples, replace=False).tolist()\n\n return node_indices", "def _forest_field_indices(self):\n return self._ffi", "def activeChildWellIndices(self):\n return self._activeWellIndices", "def demo_select_indexes():\n\n ps = tf.constant([[0.2, 0.8],\n [0.4, 0.6],\n [0.25, 0.75]])\n\n idxs = tf.constant([1, 0, 1])\n\n y = tf.gather_nd(\n ps,\n tf.transpose(tf.stack([tf.range(idxs.shape[0]), idxs]))) # [[0, 1], [1, 0], [2, 1]]\n\n with tf.Session('') as sess:\n print(sess.run(y))", "def get_data_idx(self)->list:\n return self.__data_idx", "def support_idx(self, j):\n return (j, j+self.p+1)" ]
[ "0.65589315", "0.63866156", "0.6244448", "0.6186481", "0.60354745", "0.5978617", "0.5964173", "0.5956777", "0.589274", "0.5866264", "0.58425367", "0.58398837", "0.58153033", "0.58101755", "0.58045644", "0.57759035", "0.57590544", "0.57380056", "0.57340384", "0.5718878", "0.57145804", "0.5710891", "0.5710064", "0.5708704", "0.56982917", "0.56888497", "0.56849015", "0.56807256", "0.56720734", "0.5664907", "0.5660559", "0.5657456", "0.5651805", "0.56404114", "0.56372017", "0.5633479", "0.5633352", "0.56331617", "0.56168", "0.5615726", "0.55875564", "0.5586731", "0.55798906", "0.5569111", "0.5555958", "0.55550736", "0.5543166", "0.5534975", "0.5533313", "0.5521196", "0.55184615", "0.55102646", "0.5510004", "0.5483168", "0.5482802", "0.5481627", "0.54806256", "0.54762065", "0.5466191", "0.54611546", "0.54565877", "0.5453525", "0.5446332", "0.5438052", "0.543677", "0.54294854", "0.54282075", "0.5427726", "0.54217166", "0.54158103", "0.54149705", "0.54076403", "0.54023486", "0.5398488", "0.53895277", "0.53873146", "0.5387155", "0.5385636", "0.5383938", "0.5375347", "0.53699696", "0.536442", "0.5362093", "0.53566504", "0.53547096", "0.5352686", "0.534075", "0.53347296", "0.53321797", "0.5330139", "0.5326533", "0.5324994", "0.53212947", "0.5320536", "0.5317269", "0.53160024", "0.5315687", "0.53137136", "0.53124774", "0.5310638" ]
0.68075645
0
Same as greedy learn, but with a maximum number of nodes. Rather than a DFS, this uses a priority queue that at each step splits the node with the maximum improvement in misclassification error. At most maxnodes are in the resulting tree, and the depth is limited to maxdepth. Returns the total number of misclassifications of the training set. There is a lowmemory mode when self.lowmem == True or self.lowmem == 'auto' and the number of saved ids at a node grows beyond a certain number (self.lowmem_threshold, 10m by default). In lowmemory mode, the subset of of examples at a given node is determined dynamically, which incurs a O(|D|d) cost per node, where d is the depth of the node. Overall this raises running time by a factor of approximately O(|D| log_2 |D|).
def greedy_learn_search(self,db,labels): queue = PriorityQueue() dolowmem = (self.lowmem == True) numidsets = 0 root_ids = range(len(labels)) queue.push((self.root,root_ids),len(labels)) numnodes = 1 deepest = 0 err = 0 while len(queue) > 0 and numnodes+2 <= self.maxnodes: #print "%d nodes, priority %d"%(numnodes,queue.nextkey()) nerr = queue.nextkey() (node,trainingset) = queue.pop() #print "Greedy learn",len(trainingset) if trainingset is None: trainingset = self.identify_examples(db,labels,node) if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples: #print " Hit depth or training set limit" node.pick_best_label(db,labels,trainingset) err += misclassification_error([labels[id] for id in trainingset]) continue features = self.feature_subset(node,db,labels,trainingset) cost = node.pick_best_split(db,labels,trainingset,features) numidsets -= len(trainingset) #do a split if node.type == 'v': continue elif node.type == 's': #discrete split node.children = dict() #select sub-indices Eids = defaultdict(list) noneids = [] for id in trainingset: v = db[node.feature,id] if v is None: #item doesn't exist, it's a missing value noneids.append(id) else: Eids[v].append(id) #determine whether to switch to low-memory mode if not dolowmem and self.lowmem=='auto': for v,vids in Eids.iteritems(): numidsets += len(vids)+len(noneids) if numidsets > self.lowmem_threshold: print "Decision tree learner switching to low-memory mode" dolowmem = True trainingset = None numnodes += len(Eids) #print "Split sizes",[len(v) for v in Eids.itervalues()] #print "None size",len(noneids) for v,vids in Eids.iteritems(): #print "->",len(vids),"+",len(noneids) #recurse c = DecisionTreeNode(node) node.children[v] = c err = misclassification_error([labels[id] for id in vids+noneids]) cids = (None if dolowmem else vids+noneids) queue.push((c,cids),err) if c.depth > deepest: deepest = c.depth print "Decision tree learner: Reached node with depth",deepest else: #do an inequality split assert node.type == 'i',"Got a weird type? "+str(node.type) leftids = [] rightids = [] for id in trainingset: val = db[node.feature,id] if val is not None: if val <= node.value: leftids.append(id) else: rightids.append(id) else: leftids.append(id) rightids.append(id) if len(leftids)==0 or len(rightids)==0: print "node feature "+str(node.feature)+" doesn't have a valid split value "+str(node.value) vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None] print "min,max of training set:",min(vals),max(vals) print "cost is",cost raw_input() assert len(leftids) > 0 and len(rightids) > 0 if not dolowmem and self.lowmem=='auto': numidsets += len(leftids) + len(rightids) if numidsets > self.lowmem_threshold: print "Decision tree learner switching to low-memory mode" dolowmem = True trainingset = None numnodes += 2 c1 = DecisionTreeNode(node) c2 = DecisionTreeNode(node) node.children = {0:c1,1:c2} #print "->",len(leftids) #print "->",len(rightids) err1 = misclassification_error([labels[id] for id in leftids]) err2 = misclassification_error([labels[id] for id in rightids]) if dolowmem: leftids = None rightids = None queue.push((c1,leftids),err1) queue.push((c2,rightids),err2) if c1.depth > deepest: deepest = c1.depth print "Decision tree learner: Reached node with depth",deepest #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes if len(queue) > 0: print "%d nodes remaining in queue, setting to leaves"%(len(queue),) for (node,trainingset) in queue: node.pick_best_label(db,labels,trainingset) err += misclassification_error([labels[id] for id in trainingset]) return err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_count_max(self) -> int:\n return int(self.graph_tuple_stats.node_count_max or 0)", "def max_node_count(self) -> int:\n return pulumi.get(self, \"max_node_count\")", "def max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_node_count\")", "def max_nodes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max_nodes\")", "def total_max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"total_max_node_count\")", "def num_actual_nodes(tree):\n return (tree.n_node_samples > 0).sum()", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def __call__(self, graph: Data, n_min: int, nodes_to_keep: List[int] = None, exhaustive: bool = False):\n nodes_to_keep = nodes_to_keep if nodes_to_keep is not None else []\n mcts = self._get_mcts(graph, n_min, nodes_to_keep, exhaustive)\n\n for iteration in range(self.m):\n mcts.search_one_iteration()\n\n explanation = mcts.best_leaf_node()\n\n return explanation.node_set, mcts", "def __count_errors(node, testSet, res):\n training_results = __get_results(node) #Get a dictionary of labels and counts for the *training* data which made it to this node\n leaf_label = None #Initialize a label for this leaf\n majority_count = 0 #Initialize a variable to track the number of observations for the label with the most observations\n #Note that the steps below do not handle ties of the majority count in a nice way.\n for label, count in training_results.items(): #iterate through each pair of labels and counts from the training set\n if count > majority_count: #find the label with the highest count\n leaf_label = label #the label for the leaf is the label with the highest count\n majority_count = count #keep track of the count for the leaf_label\n \n wrong_labels = testSet[res].unique().tolist() #initialize wrong_labels to be all labels in the testSet\n if leaf_label in wrong_labels: #If the leaf label is in the list of labels for the part of the test set that got to this node\n wrong_labels.remove(leaf_label) #remove the leaf_label so that all which remains are incorrect labels\n \n wrong_count = 0 #Initialize a count of how many testSet observations will be classified incorrectly\n testCounts = testSet.groupby(res).size() #Get a series of the testSet labels and how many observations pertain to each label\n for label in wrong_labels: #Iterate over all the labels not equal to the leaf_label\n wrong_count += testCounts[label] #Sum up all of the observations with a label not equal to the leaf_label\n return wrong_count", "def n_trees(self):\n return len(self.data_kd)", "def data_flow_positive_node_count_max(self) -> Optional[int]:\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_max or 0)", "def get_n_leaves(clf):\n leaves = clf.tree_.children_left == -1\n leaves = np.arange(0,clf.tree_.node_count)[leaves]\n return len(leaves)", "def num_trees(self) -> int:\n\n return len(self.nodes)", "def max_node_size(self):\n return self.max_node_capacity", "def get_n_best(self):\n pass", "def number_of_nodes(self):\n return int(self._data['number_of_nodes'])", "def prune_trivial_subtrees(self):\n num_pruned = 0\n if not self.is_leaf:\n children_classes = set()\n num_trivial_children = 0\n for child_node in self.nodes:\n num_pruned += child_node.prune_trivial_subtrees()\n if child_node.is_leaf:\n num_trivial_children += 1\n children_classes.add(child_node.most_common_int_class)\n if num_trivial_children == len(self.nodes) and len(children_classes) == 1:\n self.is_leaf = True\n num_pruned += num_trivial_children\n self.nodes = []\n return num_pruned", "def num_trees(self):\n return self._ll_tree_sequence.get_num_trees()", "def score_max_depths(graph, max_depths):\n ###TODO\n pass", "def number_of_nodes(self) -> int:\n return self.graph.number_of_nodes()", "def get_max_depth(clf):\n tree =clf.tree_\n def get_node_depths_(current_node, current_depth, l, r, depths):\n depths += [current_depth]\n if l[current_node] != -1 and r[current_node] != -1:\n get_node_depths_(l[current_node], current_depth + 1, l, r, depths)\n get_node_depths_(r[current_node], current_depth + 1, l, r, depths)\n\n depths = []\n get_node_depths_(0, 0, tree.children_left, tree.children_right, depths) \n return max(depths)", "def __deep_count_errors(node, testSet, res):\n if node.results is not None: #Check if this node is a leaf node\n return __count_errors(node, testSet, res) #If so, return the test set classification errors made by this node.\n else:\n tbSet = testSet[testSet[node.col] >= node.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[node.col] < node.value] #find which test observations belong to this tree's false branch\n \n if node.tb.results is None: #Check if the true branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term1 = __deep_count_errors(node.tb, tbSet, res)\n else: #If the true branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term1 = __count_errors(node.tb, tbSet,res)\n if node.fb.results is None: #Check if the false branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term2 = __deep_count_errors(node.fb, fbSet, res)\n else: #If the false branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term2 = __count_errors(node.fb, fbSet, res) \n return term1 + term2 #Sum the classification errors made by this nodes descendant leaves.", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def min_node_count(self) -> int:\n return pulumi.get(self, \"min_node_count\")", "def essential_node_count(self) -> int:\n return sum(\n 1 for n in self.graph.nodes() if n.kind() not in self._EXCLUDED_NODE_KINDS\n )", "def _num_nodes(self):\n return int(self._node_map[-1])", "def total_nodes(self)->int:\n\t\tqueue=[]\n\t\tsum=0\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tnode=queue.pop(0)\n\t\t\tsum+=1\n\t\t\tif(node.right!=None):\n\t\t\t\tqueue.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tqueue.append(node.left)\n\t\treturn sum", "def Test_NumNodes(Graph_MD):\n N_Knoten = Graph_MD.number_of_nodes()\n \n return N_Knoten", "def fraction_mislabeled_nodes(labels, labels_pred):\n G1 = partition_indicator(labels)\n G2 = partition_indicator(labels_pred)\n\n # cost is minimized, overlap maximized\n cost_matrix = -G1.T.dot(G2)\n row_ind, col_ind = linear_sum_assignment(cost_matrix.A)\n cost = -cost_matrix[row_ind, col_ind].sum()\n\n return 1 - (cost / len(labels))", "def Nnodes(self):\n return len(self.nodes)", "def num_nodes(self) -> Optional[int]:\n return super().num_nodes", "def class_size(self):\n if not self.is_mutation_finite():\n return infinity\n else:\n components = []\n multiplicities = []\n for x in self.irreducible_components():\n if components.count(x) == 0:\n components.append(x)\n multiplicities.append(1)\n else:\n y = components.index(x)\n multiplicities[y] = multiplicities[y]+1\n\n sizes = [ x.class_size() for x in components ]\n if NotImplemented in sizes:\n print(\"Size unknown\")\n return NotImplemented\n else:\n return prod( [binomial(sizes[i]+multiplicities[i]-1,\n multiplicities[i] ) for i in range (0,len(sizes))])", "def fixed_node_count(self) -> Optional[int]:\n return pulumi.get(self, \"fixed_node_count\")", "def getbestnumberoftrees(features: ndarray, target: ndarray, limit:int) -> tuple:\n\n # Defining the initial accuracy value to compare with different number of trees in training\n accuracy = 0\n accuracyList = []\n\n for n in range(1, limit+1, 1):\n # Training\n trained_model = InternalRandomForest.modeltrain(features, target, n)\n\n # Calculating the percentual accuracy of the training\n accuracy_t = accuracy_score(target, trained_model.predict(features), normalize=True)\n\n # Build accuracy array for this set of number of trees\n accuracyList.append(accuracy_t)\n\n # Verifying if the current training is better than the last one\n if accuracy_t > accuracy:\n bestNumberTrees = n\n accuracy = accuracy_t\n\n # Obtain best trained model\n best_model = InternalRandomForest.modeltrain(features, target, bestNumberTrees)\n\n return bestNumberTrees, accuracyList, best_model", "def numNodes(self):\n res = 0\n for n in self.iternodes():\n res += 1\n return res", "def node_impurity(self):\n\t\t\n\t\tgini = 0.0\n\t\ttotal = 0.0\n\n\t\tfor c in range(self.n_classes):\n\t\t\ttmp = self.label_count_total[c]\n\t\t\tgini += tmp * tmp\n\n\t\tgini = 1.0 - gini / (self.weighted_n_node_samples *\n\t\t\t\t\t\t\t self.weighted_n_node_samples)\n\n\t\treturn gini", "def set_nb_clusters(self):\n \n print(\"Finding the optimal number of clusters...\")\n \n sample = ro.r.matrix(self.df[self.df[\"filename\"].between(1, 4)][\"active_power\"].to_numpy())\n \n r=ro.r(\"\"\"\n check = function(matrix) {\n n_clust = fviz_nbclust(matrix, kmeans, k.max = 15)\n\n n_clust = n_clust$data\n\n max_cluster = as.numeric(n_clust$clusters[which.max(n_clust$y)])\n return(max_cluster)\n }\n \"\"\")\n\n result = r(sample)\n self.conf[\"nb_clust\"] = int(result[0])\n \n print(f\"Optimal number of clusters is {self.conf['nb_clust']}\\n\")", "def detect():\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod", "def nbr_nodes(tree_depth):\n return 2**(tree_depth+1)-1", "def ncore(self):", "def max_network_performance(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_network_performance\")", "def cross_validate(self, curr_dataset, num_folds, max_depth, min_samples_per_node,\n is_stratified=True, print_tree=False, seed=None, print_samples=False,\n use_stop_conditions=False, max_p_value_chi_sq=0.1):\n classifications = [0] * curr_dataset.num_samples\n num_correct_classifications = 0\n num_correct_classifications_wo_unkown = 0\n total_cost = 0.0\n total_cost_wo_unkown = 0.0\n classified_with_unkown_value_array = [False] * curr_dataset.num_samples\n num_unkown = 0\n unkown_value_attrib_index_array = [0] * curr_dataset.num_samples\n max_depth_per_fold = []\n num_nodes_per_fold = []\n num_valid_nominal_attributes_in_root_per_fold = []\n num_values_root_attribute_list = []\n num_trivial_splits = 0\n time_taken_pruning_per_fold = []\n num_nodes_pruned_per_fold = []\n num_correct_trivial_classifications = 0\n\n fold_count = 0\n\n sample_indices_and_classes = list(enumerate(curr_dataset.sample_class))\n if seed is not None:\n random.seed(seed)\n np.random.seed(seed)\n random.shuffle(sample_indices_and_classes)\n shuffled_sample_indices, shuffled_sample_classes = zip(*sample_indices_and_classes)\n\n if is_stratified:\n for (training_randomized_indices,\n validation_randomized_indices) in StratifiedKFold(n_splits=num_folds).split(\n shuffled_sample_indices,\n shuffled_sample_classes):\n\n training_samples_indices = [shuffled_sample_indices[index]\n for index in training_randomized_indices]\n validation_sample_indices = [shuffled_sample_indices[index]\n for index in validation_randomized_indices]\n\n if print_samples:\n print('Samples used for validation in this fold:')\n print(validation_sample_indices)\n print()\n\n ((curr_classifications,\n curr_num_correct_classifications,\n curr_num_correct_classifications_wo_unkown,\n curr_total_cost,\n curr_total_cost_wo_unkown,\n curr_classified_with_unkown_value_array,\n curr_num_unkown,\n curr_unkown_value_attrib_index_array),\n curr_max_depth,\n curr_time_taken_pruning,\n curr_num_nodes_pruned) = self.train_and_test(curr_dataset,\n training_samples_indices,\n validation_sample_indices,\n max_depth,\n min_samples_per_node,\n use_stop_conditions,\n max_p_value_chi_sq)\n max_depth_per_fold.append(curr_max_depth)\n num_nodes_per_fold.append(self.get_root_node().get_num_nodes())\n num_valid_nominal_attributes_in_root_per_fold.append(\n sum(self._root_node.valid_nominal_attribute))\n try:\n root_node_split_attrib = self.get_root_node().node_split.separation_attrib_index\n if curr_dataset.valid_nominal_attribute[root_node_split_attrib]:\n num_values_root_attribute_list.append(sum(\n num_samples > 0\n for num_samples in self.get_root_node().contingency_tables[\n root_node_split_attrib].values_num_samples))\n except AttributeError:\n num_trivial_splits += 1\n for curr_index, validation_sample_index in enumerate(validation_sample_indices):\n classifications[validation_sample_index] = curr_classifications[curr_index]\n classified_with_unkown_value_array[validation_sample_index] = (\n curr_classified_with_unkown_value_array[curr_index])\n unkown_value_attrib_index_array[validation_sample_index] = (\n curr_unkown_value_attrib_index_array[curr_index])\n num_correct_classifications += curr_num_correct_classifications\n num_correct_classifications_wo_unkown += curr_num_correct_classifications_wo_unkown\n total_cost += curr_total_cost\n total_cost_wo_unkown += curr_total_cost_wo_unkown\n num_unkown += curr_num_unkown\n num_correct_trivial_classifications += round(\n len(validation_sample_indices) *\n (self.get_trivial_accuracy(validation_sample_indices) / 100.0))\n\n fold_count += 1\n time_taken_pruning_per_fold.append(curr_time_taken_pruning)\n num_nodes_pruned_per_fold.append(curr_num_nodes_pruned)\n\n if print_tree:\n print()\n print('-' * 50)\n print('Fold:', fold_count)\n self.save_tree()\n else:\n for (training_samples_indices,\n validation_sample_indices) in KFold(n_splits=num_folds).split(\n shuffled_sample_indices):\n\n ((curr_classifications,\n curr_num_correct_classifications,\n curr_num_correct_classifications_wo_unkown,\n curr_total_cost,\n curr_total_cost_wo_unkown,\n curr_classified_with_unkown_value_array,\n curr_num_unkown,\n curr_unkown_value_attrib_index_array),\n curr_max_depth,\n curr_time_taken_pruning,\n curr_num_nodes_pruned) = self.train_and_test(curr_dataset,\n training_samples_indices,\n validation_sample_indices,\n max_depth,\n min_samples_per_node,\n use_stop_conditions,\n max_p_value_chi_sq)\n max_depth_per_fold.append(curr_max_depth)\n num_nodes_per_fold.append(self.get_root_node().get_num_nodes())\n num_valid_nominal_attributes_in_root_per_fold.append(\n sum(self._root_node.valid_nominal_attribute))\n try:\n root_node_split_attrib = self.get_root_node().node_split.separation_attrib_index\n if curr_dataset.valid_nominal_attribute[root_node_split_attrib]:\n num_values_root_attribute_list.append(sum(\n num_samples > 0\n for num_samples in self.get_root_node().contingency_tables[\n root_node_split_attrib].values_num_samples))\n except AttributeError:\n num_trivial_splits += 1\n for curr_index, validation_sample_index in enumerate(validation_sample_indices):\n classifications[validation_sample_index] = curr_classifications[curr_index]\n classified_with_unkown_value_array[validation_sample_index] = (\n curr_classified_with_unkown_value_array[curr_index])\n unkown_value_attrib_index_array[validation_sample_index] = (\n curr_unkown_value_attrib_index_array[curr_index])\n num_correct_classifications += curr_num_correct_classifications\n num_correct_classifications_wo_unkown += curr_num_correct_classifications_wo_unkown\n total_cost += curr_total_cost\n total_cost_wo_unkown += curr_total_cost_wo_unkown\n num_unkown += curr_num_unkown\n num_correct_trivial_classifications += round(\n len(validation_sample_indices) *\n (self.get_trivial_accuracy(validation_sample_indices) / 100.0))\n\n fold_count += 1\n time_taken_pruning_per_fold.append(curr_time_taken_pruning)\n num_nodes_pruned_per_fold.append(curr_num_nodes_pruned)\n\n if print_tree:\n print()\n print('-' * 50)\n print('Fold:', fold_count)\n self.save_tree()\n\n return (classifications,\n num_correct_classifications,\n num_correct_classifications_wo_unkown,\n total_cost,\n total_cost_wo_unkown,\n classified_with_unkown_value_array,\n num_unkown,\n unkown_value_attrib_index_array,\n time_taken_pruning_per_fold,\n num_nodes_pruned_per_fold,\n max_depth_per_fold,\n num_nodes_per_fold,\n num_valid_nominal_attributes_in_root_per_fold,\n num_values_root_attribute_list,\n num_trivial_splits,\n 100.0 * num_correct_trivial_classifications / curr_dataset.num_samples)", "def get_free_nodes(self):\n return len(api.node.Node.list(self.workflow.request, False))", "def num_node_groups(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"num_node_groups\")", "def complexity(self, mode='#nodes'):\n if mode == '#nodes':\n return len(self.nodes)", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def maxTasksAchievable(self):\n maxTasks = 0\n for w in self._workers:\n maxTasks = maxTasks + w.multitask\n return maxTasks", "def kuhn_munkres(G): # maximum profit bipartite matching in O(n^4)\n assert len(G) == len(G[0])\n n = len(G)\n mu = [None] * n # Empty matching\n mv = [None] * n\n lu = [max(row) for row in G] # Trivial labels\n lv = [0] * n\n for u0 in range(n):\n if mu[u0] is None: # Free node\n while True:\n au = [False] * n # Empty alternating tree\n av = [False] * n\n if improve_matching(G, u0, mu, mv, au, av, lu, lv):\n break\n improve_labels(G, au, av, lu, lv)\n return (mu, sum(lu) + sum(lv))", "def batch_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_node_count\")", "def num_nodes(self) -> int:\n return pulumi.get(self, \"num_nodes\")", "def lazy_greedy_max(self, budget):\r\n\r\n classes, no_elements = torch.unique(self.y_trn, return_counts=True)\r\n len_unique_elements = no_elements.shape[0]\r\n per_class_bud = int(budget / len(classes))\r\n final_per_class_bud = []\r\n _, sorted_indices = torch.sort(no_elements, descending = True)\r\n\r\n if self.selection_type == 'PerClass':\r\n \r\n total_idxs = 0\r\n for n_element in no_elements:\r\n final_per_class_bud.append(min(per_class_bud, torch.IntTensor.item(n_element)))\r\n total_idxs += min(per_class_bud, torch.IntTensor.item(n_element))\r\n \r\n if total_idxs < budget:\r\n bud_difference = budget - total_idxs\r\n for i in range(len_unique_elements):\r\n available_idxs = torch.IntTensor.item(no_elements[sorted_indices[i]])-per_class_bud \r\n final_per_class_bud[sorted_indices[i]] += min(bud_difference, available_idxs)\r\n total_idxs += min(bud_difference, available_idxs)\r\n bud_difference = budget - total_idxs\r\n if bud_difference == 0:\r\n break\r\n\r\n total_greedy_list = []\r\n for i in range(len_unique_elements):\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n \r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=final_per_class_bud[i])\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn[idxs].numpy())\r\n greedyList = self.get_index(self.x_trn[idxs].numpy(), x_sub)\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n greedyList = list(np.argmax(sim_sub, axis=1))\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n elif self.selection_type == 'Supervised':\r\n \r\n \r\n if self.submod == 'feature_based':\r\n \r\n class_map = {}\r\n for i in range(len_unique_elements):\r\n class_map[torch.IntTensor.item(classes[i])] = i #Mapping classes from 0 to n\r\n \r\n sparse_data = torch.zeros([self.x_trn.shape[0], self.x_trn.shape[1]*len_unique_elements])\r\n for i in range(self.x_trn.shape[0]):\r\n \r\n start_col = class_map[torch.IntTensor.item(self.y_trn[i])]*self.x_trn.shape[1]\r\n end_col = start_col+self.x_trn.shape[1]\r\n sparse_data[i, start_col:end_col] = self.x_trn[i, :]\r\n\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n x_sub = fl.fit_transform(sparse_data.numpy())\r\n total_greedy_list = self.get_index(sparse_data.numpy(), x_sub)\r\n\r\n else:\r\n for i in range(len(classes)):\r\n \r\n if i == 0:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = idxs.repeat_interleave(N)\r\n col = idxs.repeat(N)\r\n data = self.dist_mat.cpu().numpy().flatten()\r\n else:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = torch.cat((row, idxs.repeat_interleave(N)), dim=0)\r\n col = torch.cat((col, idxs.repeat(N)), dim=0)\r\n data = np.concatenate([data, self.dist_mat.cpu().numpy().flatten()], axis=0)\r\n \r\n \r\n sparse_simmat = csr_matrix((data, (row.numpy(), col.numpy())), shape=(self.N_trn, self.N_trn))\r\n #self.dist_mat = sparse_simmat\r\n\r\n if self.submod == 'facility_location':\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n sim_sub = fl.fit_transform(sparse_simmat)\r\n total_greedy_list = list(np.array(np.argmax(sim_sub, axis=1)).reshape(-1))\r\n\r\n\r\n if self.selection_type == 'Full':\r\n \r\n\r\n total_greedy_list = []\r\n idx_end = self.x_trn.shape[0] - 1\r\n idxs = torch.linspace(0, idx_end, self.x_trn.shape[0]).long()\r\n\r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn.numpy())\r\n total_greedy_list = self.get_index(self.x_trn.numpy(), x_sub)\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n total_greedy_list = list(np.argmax(sim_sub, axis=1))\r\n\r\n return total_greedy_list", "def get_num_nodes(self):\n assert self.is_fitted_\n nthreads = _process_nthreads(self.nthreads)\n n_nodes, n_terminal = self._cpp_obj.get_n_nodes(ctypes.c_bool(self._is_extended_).value,\n ctypes.c_int(nthreads).value)\n return n_nodes, n_terminal", "def find_max(self):\n return max(self.nodes, key=int)", "def get_lcc_size(G,seed_nodes):\n\n # getting subgraph that only consists of the black_nodes\n g = nx.subgraph(G,list(seed_nodes))\n\n if g.number_of_nodes() != 0:\n # get all components \n max_CC = max(nx.connected_component_subgraphs(g), key=len)\n return len(max_CC.nodes()) # size of largest connected component\"\n\n else:\n return 0", "def count(self):\n\t\treturn len(list(self.nodes))", "def fit(self, X_train, Y_train, X_unlabeled, I_train=None):\n\n logger.info('Fitting semi-supervised classifier ...')\n\n start = time.time()\n\n # Creating a subgraph\n self.subgraph = Subgraph(X_train, Y_train, I_train)\n\n # Finding prototypes\n self._find_prototypes()\n\n # Gather current number of nodes\n current_n_nodes = self.subgraph.n_nodes\n\n for i, feature in enumerate(X_unlabeled):\n node = Node(current_n_nodes + i, 0, feature)\n\n self.subgraph.nodes.append(node)\n\n # Creating a minimum heap\n h = Heap(size=self.subgraph.n_nodes)\n\n for i in range(self.subgraph.n_nodes):\n if self.subgraph.nodes[i].status == c.PROTOTYPE:\n # If yes, it does not have predecessor nodes\n self.subgraph.nodes[i].pred = c.NIL\n\n # Its predicted label is the same as its true label\n self.subgraph.nodes[i].predicted_label = self.subgraph.nodes[i].label\n\n # Its cost equals to zero\n h.cost[i] = 0\n\n # Inserts the node into the heap\n h.insert(i)\n\n else:\n # Its cost equals to maximum possible value\n h.cost[i] = c.FLOAT_MAX\n\n while not h.is_empty():\n # Removes a node\n p = h.remove()\n\n # Appends its index to the ordered list\n self.subgraph.idx_nodes.append(p)\n\n # Gathers its cost\n self.subgraph.nodes[p].cost = h.cost[p]\n\n for q in range(self.subgraph.n_nodes):\n if p != q:\n if h.cost[p] < h.cost[q]:\n if self.pre_computed_distance:\n weight = self.pre_distances[self.subgraph.nodes[p]\n .idx][self.subgraph.nodes[q].idx]\n\n else:\n weight = self.distance_fn(self.subgraph.nodes[p].features, self.subgraph.nodes[q].features)\n\n # The current cost will be the maximum cost between the node's and its weight (arc)\n current_cost = np.maximum(h.cost[p], weight)\n\n if current_cost < h.cost[q]:\n # `q` node has `p` as its predecessor\n self.subgraph.nodes[q].pred = p\n\n # And its predicted label is the same as `p`\n self.subgraph.nodes[q].predicted_label = self.subgraph.nodes[p].predicted_label\n\n # As we may have unlabeled nodes, make sure that `q` label equals to `q` predicted label\n self.subgraph.nodes[q].label = self.subgraph.nodes[q].predicted_label\n\n # Updates the heap `q` node and the current cost\n h.update(q, current_cost)\n\n # The subgraph has been properly trained\n self.subgraph.trained = True\n\n end = time.time()\n\n train_time = end - start\n\n logger.info('Semi-supervised classifier has been fitted.')\n logger.info('Training time: %s seconds.', train_time)", "def num_classes():\n return NUM_CLASSES", "def num_nodes(self):\n return len(self._node_reg)", "def num_tree(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumTree(self.handle, ctypes.byref(out)))\n return out.value", "def numa_nodes(self):\n return int(self.num_numa_nodes) # type: ignore", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def fit(self, X, y=None):\n super().fit(X, y)\n cur_state = self._last_state_\n\n cur_state[\"n_clusters\"] = int(self.n_clusters)\n if cur_state[\"n_clusters\"] < 0:\n raise ValueError(\"n_clusters must be >= 0\")\n\n cur_state[\"gini_threshold\"] = float(self.gini_threshold)\n if not (0.0 <= cur_state[\"gini_threshold\"] <= 1.0):\n raise ValueError(\"gini_threshold not in [0,1]\")\n\n _postprocess_options = (\"boundary\", \"none\", \"all\")\n cur_state[\"postprocess\"] = str(self.postprocess).lower()\n if cur_state[\"postprocess\"] not in _postprocess_options:\n raise ValueError(\"postprocess should be one of %r\"%_postprocess_options)\n\n cur_state[\"compute_full_tree\"] = bool(self.compute_full_tree)\n cur_state[\"compute_all_cuts\"] = bool(self.compute_all_cuts)\n\n\n # apply the Genie++ algorithm (the fast part):\n res = internal.genie_from_mst(self._mst_dist_, self._mst_ind_,\n n_clusters=cur_state[\"n_clusters\"],\n gini_threshold=cur_state[\"gini_threshold\"],\n noise_leaves=(cur_state[\"M\"]>1),\n compute_full_tree=cur_state[\"compute_full_tree\"],\n compute_all_cuts=cur_state[\"compute_all_cuts\"])\n\n self.n_clusters_ = res[\"n_clusters\"]\n self.labels_ = res[\"labels\"]\n self._links_ = res[\"links\"]\n self._iters_ = res[\"iters\"]\n\n if self.labels_ is not None:\n self._postprocess(cur_state[\"M\"], cur_state[\"postprocess\"])\n\n if cur_state[\"compute_full_tree\"]:\n Z = internal.get_linkage_matrix(self._links_,\n self._mst_dist_, self._mst_ind_)\n self.children_ = Z[\"children\"]\n self.distances_ = Z[\"distances\"]\n self.counts_ = Z[\"counts\"]\n\n return self", "def node_count(self, *n_labels):\n if not n_labels:\n return len(self._nodes)\n elif len(n_labels) == 1:\n return len(self._nodes_by_label.get(n_labels[0], ()))\n else:\n return sum(1 for _ in self.nodes(*n_labels))", "def tree_size(self) -> int:\n Q = Queue()\n count = 0\n Q.put(self.root)\n while not Q.empty():\n node = Q.get()\n count += 1\n for child in node.children.values():\n Q.put(child)\n return count", "def num_nodes(self):\n return len(self.nodes)", "def find_optimal_model(\n self, training_set, use_aic=False, min_node=10, \n max_node=90, start_size=20, end_size=5, node_samples=4, \n check_decreasing_ll=False, missing_residues=None): \n if not self.show_warnings:\n warning_list = warnings.filters[:]\n warnings.filterwarnings('ignore', category=TorusDBNWarning)\n \n if missing_residues is not None:\n missing_residues = read_missing_residues(missing_residues)\n \n self.seq_list, self.mismask_list = self._create_sequence_and_mismask(\n training_set, missing_residues) \n \n max_position = 0\n start_res = start_size\n avg_full_LL = []\n \n IC_array = [[]*n for n in xrange(node_samples + 2)]\n \n # Decrease size resolution until threshold (end_size)\n while start_size >= end_size:\n # Loop over node sizes\n for i in xrange(min_node, max_node + 1, start_size):\n \n # Continues if at the maximum node size from the previous resolution \n if (len(IC_array[0]) > 0 and i == IC_array[0][max_position]) or i <= 0:\n continue\n\n # Add node-size value to header\n IC_array[0].append(i)\n IC_cum = 0\n \n if start_res == start_size:\n avg_full_LL.append(0)\n \n for j in xrange(1, node_samples + 1):\n self.info(\"Training with node size = %d (sample %d)\" % (i, j))\n self.model.create_dbn(hidden_node_size=i)\n IC = self._train(use_aic)\n IC_array[j].append(IC)\n IC_cum += IC\n \n if (check_decreasing_ll):\n # Save forward likelihoods in order to infer if it is decreasing\n hmm_ll_calculator = LikelihoodInfEngineHMM(\n dbn=self.model.dbn, hidden_node_index=0, check_dbn=False)\n ll_full = hmm_ll_calculator.calc_ll(self.seq_list, self.mismask_list)\n avg_full_LL[-1] = avg_full_LL[-1] + ll_full/self._get_observation_count()\n \n # Calculate mean IC for each node-size and add to array\n IC_array[node_samples + 1].append(IC_cum / node_samples)\n \n # Check if log-likelihood is decreasing \n if (len(avg_full_LL) > 1) and (avg_full_LL[-1] < avg_full_LL[-2]) and \\\n (start_res == start_size) and check_decreasing_ll:\n self.info(\"Log-likelihood is decreasing. There is no reason to test higher node sizes.\")\n break\n \n # Column number for maximum IC value\n max_position = IC_array[node_samples + 1].index(max(IC_array[node_samples + 1])) \n self.info(\"Optimal node size: %s\\n\" % (IC_array[0][max_position]))\n \n # Update resolution\n start_size = start_size / 2\n \n # Update node limits\n min_node = IC_array[0][max_position] - start_size\n max_node = IC_array[0][max_position] + start_size\n \n IC_max_node = IC_array[0][max_position]\n \n # Final train to the optimal model\n dbn_list = []\n IC_list = []\n \n for j in xrange(node_samples):\n self.model.create_dbn(hidden_node_size=IC_max_node)\n IC = self._train(use_aic)\n IC_list.append(IC)\n dbn_list.append(self.model.dbn)\n \n IC_max = max(IC_list)\n self.model.dbn = dbn_list[IC_list.index(IC_max)]\n \n self.info(\"Optimal Model:\\nHidden node size = %s\\nIC = %s\\n\" % (IC_max_node, IC_max)) \n \n if not self.show_warnings:\n warnings.filters = warning_list \n return IC_max_node, IC_max", "def num_node_groups(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_node_groups\")", "def num_node_groups(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_node_groups\")", "def count_matrix_largest(self, effective=False):\n return self.count_matrix(connected_set=0, effective=effective)", "def number_of_nodes(self, ntype: str = None) -> int:\n return self.num_nodes(ntype)", "def max_depth(self) -> int:\n return 0", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def get_tree_size(self, node):\n\n # If the tree has not been created yet.\n if node == None:\n return 0\n n_nodes = 1\n for child in node.children:\n n_nodes += self.get_tree_size(node.children[child])\n return n_nodes", "def count_dead_node(self):\n count = 0\n for node in self.node:\n if node.energy < 0:\n count += 1\n return count", "def num_leaves(tree):\n return ((tree.n_node_samples > 0) & (tree.feature == INVALID_VALUE)).sum()", "def numprocesses(self):\n info = self.info()\n return info['max_processes']", "def nb_errors_nb(self, input_data, target):\n input_data_resize = input_data.view(2000, 1, 14, 14)\n number_output = self(input_data_resize)\n number_output = number_output.view(1000, 2, 10)\n predicted_classes = number_output.argmax(2)\n predictions = predicted_classes[:, 0] <= predicted_classes[:, 1]\n target_labels = target\n nb_errors = torch.sum(predictions.type(torch.LongTensor) != target_labels)\n return float(nb_errors) * 100 / input_data.size(0)", "def monitor_milp_nodes(model):\n nodecnt = model.cbGet(GRB.Callback.MIP_NODCNT)\n if nodecnt > MILPSolver.params.BRANCH_THRESHOLD:\n MILPSolver.status = SolveResult.BRANCH_THRESHOLD\n model.terminate()", "def get_num_nodes(self):\n return len(self._nodes)", "def get_num_nodes(self):\n return len(self._nodes)", "def count_nodes(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 1 + self.get_left().count_nodes() + self.get_right().count_nodes()\n else:\n return 1 + self.get_left().count_nodes()\n else:\n return 1 + self.get_right().count_nodes()", "def number_of_files_per_node(files, number_of_nodes):\n\n files_per_node = float(len(files))/float(number_of_nodes)\n if files_per_node > 0.:\n return int(math.floor(files_per_node))\n else:\n return int(math.ceil(files_per_node))", "def num_nodes(self) -> Optional[int]:\n return pulumi.get(self, \"num_nodes\")", "def get_optimal_rounds(dtrain, param):\n num_round = 1000\n bst = xgb.cv(param, dtrain, num_round, nfold=10,\n metrics={'logloss', 'auc'}, seed=0,\n callbacks=[xgb.callback.print_evaluation(show_stdv=True),\n xgb.callback.early_stop(10)])\n return len(bst)-1", "def snmpqosqos_sch_node_regulated_count(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_sch_node_regulated_count\n\t\texcept Exception as e:\n\t\t\traise e", "def __init__(self, capacity):\n assert isinstance(capacity, int)\n if capacity <= 0:\n raise ValueError(\n 'Sum tree capacity should be positive. Got: {}'.format(capacity))\n\n self.nodes = []\n self.depth = int(np.ceil(np.log2(capacity)))\n self.low_idx = (2**self.depth) - 1 # pri_idx + low_idx -> tree_idx\n self.high_idx = capacity + self.low_idx\n self.nodes = np.zeros(2**(self.depth + 1) - 1) # Double precision.\n self.capacity = capacity\n\n self.highest_set = 0\n\n self.max_recorded_priority = 1.0", "def max_pool_size(self) -> ConfigNodePropertyInteger:\n return self._max_pool_size", "def _num_nodes(self):\n return len(self._nid2partid)", "def get_mostFrequent(self, n=5):\r\n pass", "def get_mostFrequent(self, n=5):\r\n pass", "def modularity():\n\n q = 0.0\n for idx in range(0, node_count):\n if _tot[idx] > 0.0:\n q += (_in[idx] / m - math.pow(_tot[idx] / m, 2))\n return q", "def _max_depth(self):\n max_depth = 0\n for node, data in self.traverse():\n max_depth = max(max_depth, data['level'])\n return max_depth", "def max_cardinality():\r\n #create a list containing the number of each vertex involvement.\r\n array = []\r\n for i in adj:\r\n array += [i[0],i[1]]\r\n\r\n #compute the degree by counting the involment\r\n degree = Counter(array).most_common()\r\n\r\n #retrieve the degree only\r\n degree_ = [ i[1] for i in degree]\r\n\r\n degree_ = np.array(degree_)\r\n \r\n max_m = None\r\n \r\n #check if m is valid\r\n for i in range(degree[0][1]+2)[2:]:\r\n \r\n #valid if there are at least m vertex with degree equals to at least m-1 \r\n if i < len(np.where(degree_>=i-1)[0]):\r\n max_m = i\r\n else:\r\n break\r\n max_m += 1\r\n print(f'maximum possible clique cardinality :{max_m}')\r\n return max_m", "def prune_path(clf, X, y, max_n_leaves=10, n_iter=10,\n test_size=0.1, random_state=None, n_jobs=1):\n \n\n from sklearn.base import clone\n from sklearn.cross_validation import StratifiedShuffleSplit,ShuffleSplit\n from sklearn.metrics import roc_auc_score,mean_squared_error\n from multiprocessing.dummy import Pool as ThreadPool\n from itertools import repeat\n import pandas as pd\n #import copy\n \n #classification score\n def my_auc(estimator, X, y):\n y_score = estimator.predict_proba(X)[:,1] # You could also use the binary predict, but probabilities should give you a more realistic score.\n return roc_auc_score(y, y_score)\n \n #regression score\n def my_nmse(estimator, X, y):\n y_pre = estimator.predict(X) # You could also use the binary predict, but probabilities should give you a more realistic score.\n return -mean_squared_error(y, y_pre)\n \n\n if len(np.unique(y)) == 2: \n scoring_fuc = my_auc\n \n else:\n scoring_fuc = my_nmse\n \n def multip_run(fuction,task_zip,n_jobs = 1):\n\n #Multi-process Run\n\n pool = ThreadPool(processes=n_jobs)\n results = pool.starmap(fuction, task_zip)\n pool.close()\n pool.join()\n return results \n\n def OneFoldCut(clf,X_train, y_train,X_test,y_test,max_n_leaves):\n estimator = clone(clf)\n \n fitted = estimator.fit(X_train, y_train)\n \n if max_n_leaves < get_n_leaves(fitted):\n n_leaves = max_n_leaves\n \n else:\n n_leaves = get_n_leaves(fitted)\n \n print('###### Iters true start leaves is %d #######' % n_leaves)\n \n #cut_num = list(range(2,n_leaves, 1))\n cut_num = list(range(n_leaves-1,1,-1))\n #n = len(cut_num)\n loc_indexs = []\n loc_scores = []\n for i in cut_num:\n #clf1 = copy.deepcopy(fitted)\n #clf1 = clone(fitted)\n #clf1.prune(i)\n fitted.prune(i)\n onescore = scoring_fuc(fitted,X_test,y_test)\n #onescore = scoring_fuc(clf1,X_test,y_test)\n loc_scores.append(onescore)\n loc_indexs.append(i)\n \n S = pd.DataFrame(loc_scores,index=loc_indexs)\n\n return S\n\n\n #scores = list()\n if len(np.unique(y)) == 2: \n kf = StratifiedShuffleSplit(y,\n n_iter = n_iter, \n test_size= test_size,\n random_state=random_state)\n else:\n kf = ShuffleSplit(len(y),\n n_iter = n_iter, \n test_size= test_size,\n random_state=random_state)\n \n X_trains = [X[tr] for tr,ts in kf]\n y_trains = [y[tr] for tr,ts in kf]\n \n X_tests = [X[ts] for tr,ts in kf]\n y_tests = [y[ts] for tr,ts in kf]\n \n task_zip = zip(repeat(clf),\n X_trains,\n y_trains,\n X_tests,\n y_tests,\n repeat(max_n_leaves))\n \n scores = multip_run(OneFoldCut,task_zip,n_jobs = n_jobs)\n \n df = pd.concat(scores,axis=1)\n df.columns = range(len(df.columns))\n\n return df #zip(*scores)", "def run_inference(self,isMax = 1,findZ = 0):\r\n# st=time.time()\r\n self.make_connected()\r\n self.nop = 0 # number of operations\r\n T=CliqueTree(self,isMax,findZ)\r\n if isMax == 0:\r\n self.marg_clique_tree = T\r\n elif isMax==1:\r\n self.MAP_clique_tree = T\r\n# print time.time()-st'=\r\n self.nop += T.nop\r\n M=[]\r\n for i in self.g.nodes(): # assuming nodes are labeled 0..N-1\r\n for s,data in T.nodes_iter(data=True):\r\n f = data['fac']\r\n if i in f.var:\r\n if isMax==0:\r\n dummy = f.Marginalize(scipy.setdiff1d(f.var,i))\r\n if findZ == 0:\r\n dummy.val = dummy.val/sum(dummy.val)\r\n else:\r\n dummy = f.MaxMarginalize(scipy.setdiff1d(f.var,i))\r\n self.nop += scipy.prod(f.card)\r\n M.append(dummy)\r\n break\r\n# print time.time()-st\r\n return M", "def min_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min_node_count\")", "def node_count(self) -> int:\n return pulumi.get(self, \"node_count\")", "def calcNumberOfMajorityClassRows(self, data, structure):\n maxCount, classIndex = 0, structure['class']['index']\n for value in structure['class']['values']:\n newData = list(filter(lambda y: y[classIndex] == value, data))\n if len(newData) >= maxCount:\n maxCount = len(newData)\n return maxCount", "def find_nonexceed(trainy, train_tree_node_ID, pred_tree_node_ID, thres):\n \n npred = pred_tree_node_ID.shape[0]\n out = np.zeros((npred, thres.shape[0]))*np.nan\n for i in prange(pred_tree_node_ID.shape[0]):\n for j in prange(thres.shape[0]):\n idxs = np.where(train_tree_node_ID == pred_tree_node_ID[i, :])[0]\n sample = trainy[idxs]\n out[i, j] = (sample < thres[j]).sum() / float(sample.shape[0])\n return out" ]
[ "0.6156422", "0.6155967", "0.59827256", "0.5911448", "0.5853999", "0.583166", "0.5827011", "0.5772355", "0.5760408", "0.570417", "0.5690565", "0.5642428", "0.55889016", "0.556948", "0.5526726", "0.5512847", "0.5484053", "0.54723763", "0.54671174", "0.5442169", "0.54306436", "0.54249865", "0.54195684", "0.54076916", "0.5398807", "0.5395521", "0.53886503", "0.5386159", "0.5379833", "0.53706694", "0.5364482", "0.53596795", "0.5338462", "0.5303687", "0.53020746", "0.52964956", "0.5283708", "0.5258031", "0.5245907", "0.5243828", "0.5234427", "0.5228542", "0.5216902", "0.5214565", "0.52111", "0.52017194", "0.5197478", "0.5192105", "0.51906997", "0.5190176", "0.518958", "0.51797146", "0.516165", "0.51607025", "0.5159052", "0.51584744", "0.51389337", "0.5137795", "0.5137325", "0.5136019", "0.51352674", "0.5132464", "0.51270515", "0.5120067", "0.51186997", "0.51174223", "0.51117593", "0.5107439", "0.5107439", "0.5102459", "0.5100052", "0.5098897", "0.50969005", "0.50955534", "0.509545", "0.50948024", "0.50946516", "0.509027", "0.508577", "0.5075877", "0.5075877", "0.5075487", "0.5070735", "0.5069273", "0.50634146", "0.5061162", "0.5059809", "0.504753", "0.50470996", "0.5045348", "0.5045348", "0.50387216", "0.5036986", "0.5032529", "0.50322425", "0.50322366", "0.503058", "0.5030186", "0.502718", "0.50248766" ]
0.62972176
0
Initializes the list. If entries is given, this initializes the entries of the list. If memoized = True, any lazy evaluated entries are saved after their first evaluation.
def __init__(self,entries=None,memoized=False): if entries is not None: self.entries = entries[:] else: self.entries = [] self.memoized = memoized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, contents=()):\n self._data = [self._Item(k, v) for k,v in contents] # empty by default\n if len(self._data) > 1:\n self._heapify()", "def init_all_entries(self) -> bool:\n raise NotImplementedError", "def __init__(self, owner, entries=None):\n\n self.owner = owner\n # self.entries = EntriesDict({})\n self.entries = EntriesDict(self)\n\n if entries is None:\n return\n\n # self.add_entries(entries)", "def __init__(self, list_of_entry_names, screen_size, width=100, height=30, x_offset=0, y_offset=0):\n self.entry_names = list_of_entry_names\n self.screen_size = screen_size\n self.x_offset = x_offset\n self.y_offset = y_offset\n self.main_list = ListObj(list_of_entry_names, screen_size, width=width, height=height, x_offset=x_offset,\n y_offset=y_offset)\n self.entry_value_map = dict()\n for _ in list_of_entry_names:\n self.entry_value_map[_] = 0\n self.values_list = None\n self.update_values()", "def __init__(self, contents=()):\n self. data = [ self._Item(k,v) for k,v in contents ] # empty by default\n if len(self._data) > 1:\n self._heapify()", "def __init__(self, entries=[]):\n\n for item in entries:\n self.append(_WebObject(item))", "def _create_temp_cache(self, num_traced_tensors, num_signatures, graph):\n init_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE,\n dtype=dtypes.float32,\n shape=[num_signatures])\n self._temp_cache_var[graph] = [\n init_value for _ in range(num_traced_tensors)]", "def __init__(self, items=None):\n\n if items is None:\n items = []\n self.set = dict((item, []) for item in items)\n self.heap = list(self.set.keys())\n hpq.heapify(self.heap)\n self.counter = itertools.count()", "def __init__(self, initial_data=[]):\n hdict.__init__(self)\n\n for elt in initial_data:\n self.add(elt)", "def __init__(self, init_size=8):\n # Create a new list (used as fixed-size array) of empty linked lists\n self.buckets = [LinkedList() for _ in range(init_size)]", "def __init__(self):\n self.hashmap = [[[],[]] for _ in range(self.N)]", "def init_items(self, lazy=False):\n return []", "def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(next(self._iterable)) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)", "def __init__(self, values=None):\n self.values = list()\n self.populate(values)", "def __init__(self, cache=None, num_args=None):\n self.cache = cache if cache is not None else {}\n self.num_args = num_args", "def init_cache(self):\n self.left_lane_cache = list()\n self.right_lane_cache = list()", "def __init__(self, items=None):\n # type: (Optional[List[object]]) -> _WeakList\n list.__init__(self, self._refs(items or []))", "def initialize(self):\n self.assmts = {}\n\n offset = 0\n for entry in self.entries:\n assmts = AssignmentList()\n assmts.bit = 1 << offset\n assmts.mask = assmts.bit\n self.assmts[entry] = assmts\n offset += 1\n\n for block in self.blocks:\n block.stats = block.phis.values() + block.stats\n for stat in block.stats:\n if isinstance(stat, (PhiNode, NameAssignment)):\n stat.bit = 1 << offset\n assmts = self.assmts[stat.entry]\n assmts.stats.append(stat)\n assmts.mask |= stat.bit\n offset += 1\n\n for block in self.blocks:\n for entry, stat in block.gen.items():\n assmts = self.assmts[entry]\n if stat is Uninitialized:\n block.i_gen |= assmts.bit\n else:\n block.i_gen |= stat.bit\n block.i_kill |= assmts.mask\n block.i_output = block.i_gen\n for entry in block.bound:\n block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.itervalues():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def test_cache_init(case, method):\n if method == \"init\":\n cache = CacheDict(case.init, cache_len=case.cache_len)\n elif method == \"assign\":\n cache = CacheDict(cache_len=case.cache_len)\n for (key, val) in case.init:\n cache[key] = val\n else:\n assert False\n\n # length is max(#entries, cache_len)\n assert cache.__len__() == case.len\n\n # make sure the first entry is the one ejected\n if case.cache_len > 1 and case.init:\n assert \"one\" in cache.keys()\n else:\n assert \"one\" not in cache.keys()", "def __init__(self, initial_length=2, resizing_factor=2):\n\n # Initialise underlying list, with no elements\n self.main_list = [None] * initial_length\n\n # Initialise variable to store number of elements inserted into\n # main_list, which will always be less than or equal to list length\n self.num_elements = 0\n\n self.resizing_factor = resizing_factor", "def initialize(self):\n self.assmts = {}\n\n bit = 1\n for entry in self.entries:\n assmts = AssignmentList()\n assmts.mask = assmts.bit = bit\n self.assmts[entry] = assmts\n bit <<= 1\n\n for block in self.blocks:\n for stat in block.stats:\n if isinstance(stat, NameAssignment):\n stat.bit = bit\n assmts = self.assmts[stat.entry]\n assmts.stats.append(stat)\n assmts.mask |= bit\n bit <<= 1\n\n for block in self.blocks:\n for entry, stat in block.gen.items():\n assmts = self.assmts[entry]\n if stat is Uninitialized:\n block.i_gen |= assmts.bit\n else:\n block.i_gen |= stat.bit\n block.i_kill |= assmts.mask\n block.i_output = block.i_gen\n for entry in block.bounded:\n block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.values():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def __init__(self, items=[]):\n self.set = dict((item, True) for item in items)\n self.heap = self.set.keys()\n heapq.heapify(self.heap)", "def __init__(self, iterable=None):\n self._seen = set()\n self._list = []\n if iterable is not None:\n self.extend(iterable)", "def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable))) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)", "def __init__(self, iterable=None):\n self.list = LinkedList()\n\n if iterable:\n for item in iterable:\n self.push(item)", "def __init__(self, inference_state, lazy_value_list):\n super(_FakeSequence, self).__init__(inference_state)\n self._lazy_value_list = lazy_value_list", "def __init__(self):\n        self.list=[]\n        self.hashmap={}\n        ", "def _populate(self):\n if not self._populated:\n logging.debug(\"Populating lazy list %d (%s)\" % (id(self), self.__class__.__name__))\n self.populate()\n self._populated = True", "def initialize(self):\r\n for cell in self.free_cell_list:\r\n cell.unlock()\r\n self.add_cell(cell)\r\n self.free_cell_list.clear()", "def init_cache(self):\n if self.cacheable:\n self._instance._cache[self.name] = {}", "def __init__(self, networks=None):\n self.entries = []\n self._lock = threading.RLock()\n\n # if not explicitly specified determine network from interfaces\n if networks:\n self.networks = set(networks)\n else:\n self.networks = set(interface_networks())", "def __init__(self, term_list, links=[]):\n\t\t# do type check\n\t\tif not isinstance(term_list, list):\n\t\t\traise TypeError('term_list must be of type list')\n\t\tif not isinstance(links, list):\n\t\t\traise TypeError('links must be of type list')\n\t\tself.term_dict = {x: term_list.count(x) for x in term_list}\n\t\tself.links = copy.deepcopy(links)", "def _make_versions_(self):\n\n try:\n self._versions_ = self._get_versions_()\n except AttributeError:\n self._versions_ = [self._entries_]\n\n if self._hash_versions_ is None:\n\n try:\n self._hash_versions_ = self._get_hash_versions_()\n except AttributeError:\n self._hash_versions_ = self._versions_", "def __init__(self):\n self.size = 10000\n self.hashmap = [None] * self.size", "def __init__(self, values: list[int] = []):\n self._len = 0\n self._head = None\n\n for value in values:\n self.push(value)", "def __init__(self, iterable=None):\n # Initialize a new list (dynamic array) to store the items\n self.list = list()\n if iterable is not None:\n for item in iterable:\n self.push(item)", "def __init__(self):\n self.li=[[False *100000] for i in range(100000)]", "def add_iterator(self,\r\n entrylist,\r\n keyset=None):\r\n\r\n if keyset is None:\r\n keyset = set()\r\n\r\n self.default_dict['iterators'].append(entrylist)\r\n self.iter_list_iterator = cycle(list\r\n (range\r\n (len\r\n (self.default_dict['iterators']))))\r\n## display.noteprint((alerts.ADDED,\r\n## formkeys(entrylist)))\r\n\r\n iname = EMPTYCHAR\r\n keylist = self.return_least_keys(keyset)\r\n if len(keylist) > 5:\r\n keylist = keylist[0:5]\r\n\r\n for t_temp in keylist:\r\n iname += t_temp+DASH\r\n iname = iname[:-1]\r\n\r\n self.add_iterator_name(len(self.default_dict['iterator_names']),\r\n iname)\r\n self.dd_changed = True", "def __init__(self, iterable=None):\n self.list = LinkedList()\n\n if iterable:\n for item in iterable:\n self.enqueue(item)", "def __init__(self):\n self.hashset = [[] for _ in range(self.N)]", "def mapper_get_items_init(self):\n if int(self.options.iteration) > 1:\n with open(self.options.f, 'r') as fh:\n self.frequent_items = set(fh.read().splitlines())\n else:\n self.frequent_items = {}", "def __init__(self, list = []):\n # initialize empty heap\n self.heap = []\n\n # initialize heap with provided list\n for element in list:\n self.add(element)", "def load_history_entries(self, *entries):\n # Simplified version:\n for entry in entries:\n try:\n self[entry.url.host] += [entry]\n except KeyError:\n self[entry.url.host] = [entry]\n \n \n temp_dict = {entry.url.host: [] for entry in entries} \n for entry in entries:\n temp_dict[entry.url.host] += [entry]\n\n # Update the dictionary\n # self.update(temp_dict) # Will override any lists with the same host name\n for host, entry in temp_dict.items():\n #try:\n self[host] += [entry]\n #except IndexError:\n #self[host] = [entry]", "def __init__(self, iterable=None):\n self.heap = []\n if iterable is not None:\n for item in iterable:\n self.push(item)", "def __init__(self, iterable=None):\n # Initialize a new linked list to store the items\n self.list = LinkedList()\n if iterable is not None:\n for item in iterable:\n self.push(item)", "def __init__(self):\n self.file_name = \"entries.csv\"\n self.csv_header = \"date,name,minutes,note\"\n try:\n with open(self.file_name) as file:\n reader = csv.DictReader(file)\n self.entries = list(reader)\n except FileNotFoundError:\n with open(self.file_name, \"a\") as file:\n file.write(f'{self.csv_header}\\n')\n\n try:\n for i in range(len(self.entries)):\n entry = self.entries[i]\n self.entries[i] = Entry(\n entry[\"date\"],\n entry[\"name\"],\n entry[\"minutes\"],\n entry[\"note\"]\n )\n print(f\"Worklog with {len(self.entries)} entries has been loaded.\\n\")\n print(\"Starting program...\")\n time.sleep(.75)\n except TypeError:\n raise TypeError(\"Could not read data file.\"\n + \" Ensure that CSV is properly formatted.\")\n except AttributeError:\n print(\"No existing worklog found.\\nNew worklog has been created.\\n\")\n self.entries = []\n print(\"Starting program...\")\n time.sleep(.75)", "def __init__(self, visible_caches: List[Cache], request_sequence: List[int]):\n self.visible_caches = sorted(visible_caches, key=lambda c: c.token)\n self.request_sequence = request_sequence\n self.complete_misses = 0\n pass", "def __init__(self, persistent=True):\n super().__init__()\n self.name_cache = {}\n self.run_cache = {}\n self.row_cache = {}\n self.persistent = persistent\n\n if self.persistent:\n self.load_cache()", "def init_cache(data_list: list, sr: int, cache_level: int, audio_settings: dict, n_cache_workers: int = 4) -> list:\n\n cache = []\n loader_fn = functools.partial(cache_item_loader, sr=sr, cache_level=cache_level, audio_settings=audio_settings)\n\n pool = mp.Pool(n_cache_workers)\n\n for audio in tqdm(pool.imap(func=loader_fn, iterable=data_list), total=len(data_list)):\n cache.append(audio)\n \n pool.close()\n pool.join()\n\n return cache", "def __init__(self, nodes, thunks, pre_call_clear):\r\n if len(nodes) != len(thunks):\r\n raise ValueError()\r\n self.nodes = nodes\r\n self.thunks = thunks\r\n self.pre_call_clear = pre_call_clear\r\n self.call_counts = [0] * len(nodes)\r\n self.call_times = [0] * len(nodes)\r\n self.time_thunks = False\r\n\r\n # This variable (self.need_update_inputs) is overshadowed by\r\n # CLazyLinker in CVM which has an attribute of the same name that\r\n # defaults to 0 (aka False).\r\n self.need_update_inputs = True", "def init_iterable(self, inp):\n inp = make_iter(inp)\n nsize = len(inp)\n self._npages = nsize // self.height + (0 if nsize % self.height == 0 else 1)\n self._data = inp", "def __init__(self, node=None, limit=10):\n self.head = node # Head of cache is most recent\n self.tail = node # Tail of cache is oldest\n self.limit = limit\n self.length = 1 if node is not None else 0\n # self.storage = DoublyLinkedList()", "def memoize(maxsize=None, *args, **kwargs):\n return _coconut.functools.lru_cache(maxsize, *args, **kwargs)", "def __init__(self):\n # Initializing an empty list.\n self.mylist = []", "def __init__(self):\n # HashMap内部元素个数\n self.elements_count: int = 0\n # HashMap内部bucket数组的长度\n self.capacity: int = 16384\n # HashMap内部的数组, 用dummyHead的好处是Python没有显示指出引用修改,还是固定bucket数组,只修改数组各元素的next指针更好,不会出现UB\n # 缺点是初始化好慢啊,容易超时\n self.bucket: List[ListNode] = [ListNode(key=-1, value=0)] * self.capacity", "def __init__(self, iterable=None):\n self.linked_list = LinkedList()\n if iterable:\n for item in iterable:\n self.push(item)", "def __init__(self):\n self._list = []\n self._dict = {}", "def add_entries(entries_list):\r\n #| - add_entries\r\n sum_tot = 0.\r\n for entry in entries_list:\r\n if entry is None:\r\n summand = 0.\r\n else:\r\n summand = entry\r\n sum_tot += summand\r\n\r\n return(sum_tot)\r\n #__|\r", "def initialize(self):\n cards = [Card(rank, suit) for suit in self._suits\n for rank in self._ranks]\n self.setAllCards(cards)", "def __reduce__(self):\n init_args = (\n self.__class__,\n self._pack_items(),\n )\n if self.cache_timeout:\n init_kwargs = {'cache_timeout': self.cache_timeout}\n else:\n init_kwargs = {}\n return (_unpickle_cached_list, init_args, init_kwargs)", "def __init__(self):\r\n self._items = [[] for _ in range(20)]", "def __init__(self, lazy = False, **args):\n self.initialized = False\n if not lazy:\n self.set_up(**args)", "def update_entries(entries: Entries, data: dict) -> None:\n # TODO: Is mutating the list okay, making copies is such a pain in the ass\n for entry in entries:\n entry.update(data)", "def populate_hash_list(self):\n checkout = 'tmp/repo/tmp/keylime-checkout'\n\n import_ostree_commit(\n os.getcwd(),\n self._metadata.build_dir,\n self._metadata)\n subprocess.check_call([\n 'ostree', 'checkout',\n '--repo=tmp/repo', '-U',\n self._metadata['ostree-commit'], checkout])\n self.hash_from_path(checkout)\n\n # Extract initramfs contents\n initramfs_path = ensure_glob(\n os.path.join(\n checkout, 'usr/lib/modules/*/initramfs.img'))[0]\n initramfs_path = os.path.realpath(initramfs_path)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n skipcpio = subprocess.Popen(\n ['/usr/lib/dracut/skipcpio', initramfs_path],\n stdout=subprocess.PIPE)\n gunzip = subprocess.Popen(\n ['gunzip', '-c'],\n stdin=skipcpio.stdout,\n stdout=subprocess.PIPE)\n cpio = subprocess.Popen(\n ['cpio', '-idmv'],\n stdin=gunzip.stdout,\n cwd=tmpdir)\n cpio.wait(timeout=300) # timeout of 5 minutes\n self.hash_from_path(tmpdir)", "def __init__(self, items: list) -> None:\n if items == []:\n self._first = None\n self._rest = None\n else:\n self._first = items[0]\n self._rest = RecursiveList(items[1:])", "def start_memo(self, attributes):\n self.memo = True", "def __init__(self):\n self.hashmap = [[] for _ in range(self._cap)]", "def __init__(self, list, store_none=True):\n super().__init__()\n self.list = list\n self.store_none = store_none", "def __init__(self, init_size=8):\n self.size = 0\n self.buckets = [LinkedList() for i in range(init_size)]", "def __init__(self):\n self.list = []\n self.dict = {}", "def __init__(self):\n self.list = []\n self.dict = {}", "def __init__(self, values=None):\n\n self.dict = {} # each instance of Set has its own dict property\n # which is what we'll use to track memnerships\n if values is not None:\n for value in values:\n self.add(value)", "def __init__(self, *values, cmp=DEFAULT_CMP):\n\n self.cmp = cmp\n\n if len(values) == 0:\n self.a = []\n return\n\n x = values[0]\n if not is_series(x):\n self.a = self.build_heap([x])\n return\n\n assert is_series(x)\n assert len(x) > 0\n\n self.a = self.build_heap(x)", "def __init__(self, lst=[]):\n self.__length = 0 # current length of the linked list\n self.__head = None # pointer to the first node in the list\n for e in lst: # initialize the list,\n self.add(e) # by adding elements one by one", "def __init__(self, f):\n self.f = f\n self.memo = {}\n print('Calling __init__()')", "def __init__(self,max_size=0):\n\n object.__init__(self)\n self._maxsize=max_size\n self._dict={}\n self._lock=Lock()\n\n # Header of the access list\n if self._maxsize:\n self._head=Entry(None)\n self._head._previous=self._head\n self._head._next=self._head", "def __init__(self, entries):\n # objects representing database records\n self.entries = entries", "def __init__(self):\n self.size = 1000\n self.hash_table = [None] * self.size", "def __init__(self, input_list=None, enabled=True, enabled_full=True):\n self.enabled = enabled\n self.enabled_full = enabled_full\n\n self._lock = Lock()\n self._entities = {} # marked_id: user|chat|channel\n\n if input_list:\n self._input_entities = {k: v for k, v in input_list}\n else:\n self._input_entities = {} # marked_id: hash\n\n # TODO Allow disabling some extra mappings\n self._username_id = {} # username: marked_id\n self._phone_id = {} # phone: marked_id", "def fillCache(self):\n items = self.source.getRecent()\n items.reverse() # make sure the most recent ones are added last to the cache\n for item in items:\n self.cache.append(item.title)", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def __init__(self, **entries):\n\n self.jsonOptions = {}\n for (key, value) in entries.iteritems():\n webObj = _WebObject(value)\n self.jsonOptions[key] = webObj\n self.__dict__[key] = webObj", "def __init__(self):\n self.ItemHashList = {}\n self.ItemList = []\n self.lock = threading.Lock()", "def fill_cache_table():\n products = []\n for query in ['bread', 'milk', 'rice']:\n grocery = grocery_search(query)\n products += get_all_products_from_grocery_search(grocery)\n\n orm = ORM()\n for product in products:\n orm.add_cache(**product)", "def add_entries(self, *entries: Entry):\n for entry in entries:\n self.add_entry(entry)", "def __init__(self, items=None):\n\n if items is None:\n self.items = []\n else:\n self.items = items", "def initialize(self,settings = None):\n self.evaluated = 0\n self.evals = 0\n self.initializer.evaluate(self)", "def __init__(self):\n self.dict = {}\n self.list = []", "def __init__(self):\n self.dict = {}\n self.list = []", "def __init__(self):\n self.dict = {}\n self.list = []", "def init(l):\n global lock\n lock = l", "def init(l):\n global lock\n lock = l", "def memoize(obj):\r\n cache = obj.cache = {}\r\n\r\n @functools.wraps(obj)\r\n def memoizer(*args, **kwargs):\r\n key = str(args) + str(kwargs)\r\n if key not in cache:\r\n cache[key] = obj(*args, **kwargs)\r\n # only keep the most recent 100 entries\r\n if len(cache) > 100:\r\n cache.popitem(last=False)\r\n return cache[key]\r\n return memoizer", "def __init__(self, cache_dir: str, cache_size: int):\n self.cache_dir = cache_dir\n self.cache_size = int(cache_size * 1e6)\n self.index = {}\n self.touch_list = []\n self._populate_index()", "def __init__(self, vallist=[]):\n self.data = vallist[:]\n self.size = len(self.data)", "def start_bulk_input(\n self,\n keys: Collection[KT],\n callback: Optional[Callable[[], None]] = None,\n ) -> \"CacheMultipleEntries[KT, VT]\":\n\n entry = CacheMultipleEntries[KT, VT]()\n entry.add_global_invalidation_callback(callback)\n\n for key in keys:\n self._pending_deferred_cache[key] = entry\n\n return entry", "def __init__(self, deferred):\n self._values = []\n self._deferred = deferred", "def __init__(self, iterable=None):\n # Initialize a new linked list to store the items\n # print(\"self __init__\", self)\n self.list = LinkedList()\n # self.top = self.list.head\n if iterable is not None:\n for item in iterable:\n self.push(item)", "def __init__(self, iterable=None):\n # Initialize a new list (dynamic array) to store the items\n self.list = list()\n print(\"iterable\", iterable)\n print(\"self.list\", self.list)\n if iterable is not None:\n for item in iterable:\n self.push(item)", "def __init__(self, entries):\r\n if isinstance(entries, str): # filename\r\n self.contigs = readstatsFile(entries)\r\n else:\r\n self.contigs = OrderedDict()\r\n for entry in entries:\r\n self.contigs[entry.contigName] = entry" ]
[ "0.5535386", "0.54829353", "0.5454826", "0.53897613", "0.5307603", "0.5187028", "0.5180785", "0.514409", "0.50772923", "0.5034808", "0.5021943", "0.5018495", "0.49956906", "0.49621394", "0.49150985", "0.49012715", "0.4892579", "0.48834223", "0.48831782", "0.48757192", "0.485907", "0.48379377", "0.48367697", "0.48344493", "0.4796964", "0.47895494", "0.47819415", "0.4774125", "0.47710106", "0.47707924", "0.47640717", "0.47574615", "0.4752443", "0.47512633", "0.4723961", "0.4722004", "0.47083986", "0.47046536", "0.46843958", "0.46689036", "0.4657498", "0.46501973", "0.46442956", "0.46394458", "0.46383286", "0.46335903", "0.46293393", "0.46267313", "0.46230933", "0.4621055", "0.46123552", "0.46035567", "0.45935792", "0.45927468", "0.4585088", "0.4582491", "0.45672265", "0.45593914", "0.45580253", "0.45577222", "0.45534125", "0.4545854", "0.45447668", "0.45434582", "0.45393625", "0.45392218", "0.45382726", "0.45351163", "0.4529838", "0.45216623", "0.45216623", "0.45205146", "0.45197675", "0.45172364", "0.45045334", "0.45034352", "0.45017418", "0.44993782", "0.44987914", "0.44839838", "0.448136", "0.44787318", "0.4464341", "0.44503954", "0.4438271", "0.44344252", "0.44340795", "0.4430872", "0.4430872", "0.4430872", "0.44297007", "0.44297007", "0.44216612", "0.44210005", "0.44185856", "0.44184786", "0.43981805", "0.43848377", "0.43822128", "0.43792662" ]
0.7950624
0
Given a training and testing dataset, builds a decision tree and tests it
def test_decision_tree(train,test,maxnodes=None): tree = DecisionTree() tree.maxnodes = maxnodes errors = tree.learn(train,'label') print "Decision tree makes",errors,"errors" print "Depth",tree.depth(),"nodes",tree.numNodes() if tree.numNodes() < 100: tree.pprint() if errors > 0: print "Training errors:" for id,e in enumerate(train.entries): res = tree.predict(e[:-1]) if res != e[-1]: if len(e[:-1]) > 10: print " Error on",id,"prediction",res else: print " Error on",e[:-1],"prediction",res print "Testing error:" tp,tn,fp,fn = 0,0,0,0 for e in test.entries: res = tree.predict(e[:-1]) if res and e[-1]: tp += 1 elif res and not e[-1]: fp += 1 elif not res and e[-1]: fn += 1 else: tn += 1 Ntest = len(test.entries) print "True +: %g, True -: %g"%(float(tp)/Ntest,float(tn)/Ntest) print "False -: %g, False +: %g"%(float(fn)/Ntest,float(fp)/Ntest) print "Overall error: %g"%(float(fn+fp)/Ntest,)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_and_evaluate_decision_tree(X_train, y_train, X_test, y_test):\n model = DecisionTreeClassifier(criterion='entropy')\n model.fit(X_train, y_train)\n y_pred = model.predict(X_train)\n y_heldPred = model.predict(X_test)\n acc_train = accuracy_score(y_train, y_pred)\n acc_heldOut = accuracy_score(y_test, y_heldPred)\n return acc_train, acc_heldOut", "def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)", "def run_train_test(training_file, testing_file):\n\n training = parse_file(training_file)\n training = np.array(training)\n\n X_train = training[:,:4]\n Y_train = training[:,4]\n\n testing = parse_file(testing_file)\n testing = np.array(testing)\n\n X_test = testing[:,:4]\n Y_test = testing[:,4]\n\n gini_clf = DecisionTreeClassifier(random_state=0)\n gini_clf.fit(X_train, Y_train)\n gini_Y_hat = gini_clf.predict(X_test)\n gini_tp, gini_tn, gini_fp, gini_fn, gini_err = eval_results(Y_test, gini_Y_hat)\n\n entropy_clf = DecisionTreeClassifier(criterion=\"entropy\", random_state=0)\n entropy_clf.fit(X_train, Y_train)\n entropy_Y_hat = entropy_clf.predict(X_test)\n entropy_tp, entropy_tn, entropy_fp, entropy_fn, entropy_err = eval_results(Y_test, entropy_Y_hat)\n\n return {\n \"gini\":{\n 'True positives': gini_tp,\n 'True negatives': gini_tn,\n 'False positives': gini_fp,\n 'False negatives': gini_fn,\n 'Error rate': gini_err\n },\n \"entropy\":{\n 'True positives': entropy_tp,\n 'True negatives': entropy_tn,\n 'False positives': entropy_fp,\n 'False negatives': entropy_fn,\n 'Error rate': entropy_err\n }\n }", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def create_tree(f_train, f_test, l_train, l_test):\n # initialize model\n model = DecisionTreeClassifier(max_depth=2)\n\n # train it on training data\n model.fit(f_train, l_train)\n\n # gather the model's predictions for train\n train_predictions = model.predict(f_train)\n\n # gather the model's predictions for test\n test_predictions = model.predict(f_test)\n\n # calculate accuaracy of train\n print('Tree Train Accuracy: ', accuracy_score(l_train, train_predictions))\n\n # calculate accuracy of test\n print('Tree Test Accuracy: ', accuracy_score(l_test, test_predictions))\n\n return model", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def main():\n \n # 1. Learn a decision tree from the data in training.txt\n print \"--Building trees--\"\n train_examples = read_file('training.txt')\n print(train_examples)\n attrs = range(len(train_examples[0])-1)\n rand_tree = decision_tree_learning(train_examples, attrs, use_gain=False)\n gain_tree = decision_tree_learning(train_examples, attrs, use_gain=True)\n print \"--Done building--\"\n print\n\n # 2. Document the tree you got\n print \"--Random tree--\"\n print_tree(rand_tree)\n print\n print \"--Learn tree--\"\n print_tree(gain_tree)\n print\n\n # 3. Classify all examples in the test-set\n test_examples = read_file('test.txt')\n print \"--Testing random tree--\"\n test(rand_tree, test_examples, attrs)\n print\n print \"--Testing information gain tree--\"\n test(gain_tree, test_examples, attrs)\n print \"--Done testings--\"", "def train_and_evaluate_decision_stump(X_train, y_train, X_test, y_test):\n model = DecisionTreeClassifier(criterion='entropy', max_depth=4)\n model.fit(X_train, y_train)\n y_pred = model.predict(X_train)\n y_heldPred = model.predict(X_test)\n acc_train = accuracy_score(y_train, y_pred)\n acc_heldOut = accuracy_score(y_test, y_heldPred)\n return acc_train, acc_heldOut", "def test_twoing(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)", "def main(args):\n if args.train_test_split < 0.2 or args.train_test_split > 0.8:\n print(\"Bad value for train_test_split, range is 0.2 - 0.8\")\n sys.exit()\n\n dataset = pd.read_csv(args.train_file)\n\n x_data = dataset.loc[:, (dataset.columns != args.classification_column) \\\n & (dataset.columns != \"Survey_id\")]\n y_data = dataset[args.classification_column].to_numpy()\n dataset_headers = list(x_data.columns)\n x_data = x_data.fillna(0).to_numpy()\n\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, \\\n test_size=args.train_test_split)\n\n\n dtc = DecisionTreeClassifier(max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n dtc = dtc.fit(x_train, y_train)\n dtc_score = dtc.score(x_test, y_test)\n\n\n export_graphviz(dtc, out_file=\"decision_tree.dot\", feature_names=dataset_headers, \\\n rounded=True, precision=1, filled=True)\n os.system(\"dot -Tpng decision_tree.dot -o decision_tree.png\")\n\n\n rfc = RandomForestClassifier(n_estimators=args.estimators, max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n rfc.fit(x_train, y_train)\n rfc_score = rfc.score(x_test, y_test)\n\n file = open('result.txt', 'w')\n file.write(f'Decisions tree score = {dtc_score}\\n')\n file.write(f'Random forest score = {rfc_score}\\n')\n file.close()", "def test_twoing(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([2]), set([0, 1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:1, 1:1, 2:0})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.48)", "def classify_test(classifier, test_data):\n for d in test_data:\n test(d[\"name\"], d[\"attribute\"], classifier)", "def fit_decision_tree(model, x_train, y_train):\r\n model.fit(x_train, y_train)\r\n score = model.score(x_train, y_train)\r\n importance = model.feature_importances_\r\n return score, importance", "def test_machine_learning():", "def __init__(self, data, features, resulting_feature, criterion='entropy'):\n self.__train_data = data\n self.__features = features\n self.__resulting_feature = resulting_feature\n self.__criterion = criterion\n self.__tree = self.__built_tree(self.__train_data, self.__features, self.__resulting_feature, None)\n self.accuracy_of_previous_test = 0", "def buildAndTrain(trainingData):\n\tname = trainingData.drop(['count', 'casual', 'registered'], axis=1).columns\n\ttarget = trainingData['count'].values\n\tfeature = trainingData.drop(['count', 'casual', 'registered'], axis=1).values\n\t# feature scaling\n\tfeature_scaled = preprocessing.scale(feature)\n\t# 0.5 cross validate\n\tcv = cross_validation.ShuffleSplit(len(feature_scaled), n_iter=5, test_size=0.2, random_state=0)\n\t# build model, then training and get accuracy of it\n\tprint('\\n---------岭回归结果--------\\n')\n\tfor train, test in cv:\n\t\tregLR = linear_model.Ridge().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregLR.score(feature_scaled[train], target[train]),\n\t\t regLR.score(feature_scaled[test], target[test])))\n\tprint('\\n---------svm结果--------\\n')\n\tfor train, test in cv:\n\t\tregSvm = svm.SVR().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[test], target[test])))\n\tprint('\\n---------随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRF = RandomForestRegressor(n_estimators=100).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[test], target[test])))\n\t# reduce some low correction feature\n\tfeatureReduced = trainingData.drop(['count', 'casual', 'registered', 'holiday', 'workingday', 'day'], axis=1).values\n\tfeatureReduced_scaled = preprocessing.scale(featureReduced)\n\tprint('\\n---------减少特征维度以避免过拟合后的随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRFImpr = RandomForestRegressor(n_estimators=100).fit(featureReduced_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[test], target[test])))\n\t# use grid search algorithm to improve random forest regression\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_scaled, target, test_size=0.2, random_state=0)\n\ttuned_parameters = [{'n_estimators': [10,100,500], 'max_depth': [2,3,4,5,6,7,8,9,10]}]\n\tscores = ['r2']\n\n\tfor score in scores:\n\t\tprint(score)\n\t\tclf = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=5, scoring=score)\n\t\tclf.fit(X_train, y_train)\n\t\tprint(clf.best_estimator_)\n\t\tprint('each parameter combination is ')\n\t\tfor params, mean_score, scores in clf.grid_scores_:\n\t\t\tprint('{0:.3f} (+/-{1:.03f}) for {2}'.format(mean_score, scores.std()/2, params))\n\n\tprint('--------最优参数下的随机森林结果--------')\n\tfor train, test in cv:\n\t\tregRFBest = RandomForestRegressor(n_estimators=100, max_depth=10).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[test], target[test])))\n\treturn regRFBest, feature_scaled, target", "def decision_tree(data_frame, filename=0):\n\tprint \"Building decision tree...\"\n\tr = robjects.r\n\trpart = importr(\"rpart\")\n\tfit = rpart.rpart(\"category~bpm+speechiness+time_sig+key+duration+loudness+\\\n\t\t\tend_of_fade_in+start_of_fade_out+bpm_range+\\\n\t\t\tmax_bpm_spike+num_keys\", data=data_frame, method=\"class\", \n\t\t\tna_action='na.rpart', control='rpart.control(cp = .0001)')\n\trpart.printcp(fit)\n\tr.plot(fit, uniform=True, main=\"Classification Tree for Genre\")\n\tr.text(fit, use_n=True, all=True, cex=.8)\n\tif filename != 0:\n\t\trpart.post(fit, file=filename, title=\"Classification Tree for Genre\")\n\traw_input(\"> Press enter to continue.\")\n\treturn fit", "def decision_tree(self, min_impurity_splits = None, is_voice_data = True):\n title = \"Learning Curves (Decision Tree - voice dataset)\"\n if not is_voice_data:\n title = \"Learning Curves (Decision Tree - EEG dataset)\"\n estimators = []\n for min_impurity_split in min_impurity_splits:\n estimator = tree.DecisionTreeClassifier(criterion=\"entropy\", \\\n min_impurity_split = min_impurity_split)\n estimators.append(estimator)\n\n # set colors: r -red, g- green, b - blue, m - magenta\n colors = [(\"r\", \"g\"), (\"b\", \"m\")] \n labels = [(\"Training accuracy (unpruned tree)\", \n \"Cross-validation accuracy (unpruned tree)\"),\n (\"Training accuracy (pruned tree)\", \n \"Cross-validation accuracy (pruned tree)\")]\n \n # Cross validation with 100 iterations to get smoother mean test and train\n # score curves, each time with 30% data randomly selected as a validation set.\n cv = cross_validation.ShuffleSplit(self.X.shape[0], n_iter=100,\n test_size=0.3, random_state=42)\n self.plot_learning_curve(estimators, title, labels, colors, self.X, self.y, \\\n cv=cv, n_jobs=4)\n \n # plot validation curve\n estimator_val = tree.DecisionTreeClassifier (criterion=\"entropy\") \n param_name = \"min_impurity_split\"\n x_label = \"Number of nodes in decision tree\"\n val_title = \"Validation Curve with Decision Tree (voice dataset)\"\n params =[i/100.0 for i in range(1,50)]\n if not is_voice_data:\n val_title = \"Validation Curve with Decision Tree (EEG dataset)\"\n params = np.logspace(-0.25, 0, 50)\n number_of_nodes = []\n for param in params:\n clf = tree.DecisionTreeClassifier(criterion=\"entropy\", min_impurity_split = param)\n clf.fit(self.X, self.y)\n number_of_nodes.append(clf.tree_.node_count)\n print number_of_nodes\n self.plot_validation_curve(estimator_val, params, param_name, self.X, \n self.y, val_title, xtricks = number_of_nodes, x_label = x_label,\n cv=cv, n_jobs = 4)\n plt.show()", "def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"md\",\n type=int,\n help=\"maximum depth\")\n parser.add_argument(\"mls\",\n type=int,\n help=\"minimum leaf samples\")\n parser.add_argument(\"--xTrain\",\n default=\"q4xTrain.csv\",\n help=\"filename for features of the training data\")\n parser.add_argument(\"--yTrain\",\n default=\"q4yTrain.csv\",\n help=\"filename for labels associated with training data\")\n parser.add_argument(\"--xTest\",\n default=\"q4xTest.csv\",\n help=\"filename for features of the test data\")\n parser.add_argument(\"--yTest\",\n default=\"q4yTest.csv\",\n help=\"filename for labels associated with the test data\")\n\n args = parser.parse_args()\n # load the train and test data\n xTrain = pd.read_csv(args.xTrain)\n yTrain = pd.read_csv(args.yTrain)\n xTest = pd.read_csv(args.xTest)\n yTest = pd.read_csv(args.yTest)\n # create an instance of the decision tree using gini\n start = time.time()\n dt1 = DecisionTree('gini', args.md, args.mls)\n trainAcc1, testAcc1 = dt_train_test(dt1, xTrain, yTrain, xTest, yTest)\n print(\"GINI Criterion ---------------\")\n print(\"Training Acc:\", trainAcc1)\n print(\"Test Acc:\", testAcc1)\n dt = DecisionTree('entropy', args.md, args.mls)\n trainAcc, testAcc = dt_train_test(dt, xTrain, yTrain, xTest, yTest)\n print(\"Entropy Criterion ---------------\")\n print(\"Training Acc:\", trainAcc)\n print(\"Test Acc:\", testAcc)\n end = time.time()\n print(\"Time taken: \", end-start)", "def test_train_dataset(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n expected = [\n {'alpha': 0.6931471805599453,\n 'dim': 0,\n 'inequal': 'lt',\n 'threshold': 1.3},\n {'alpha': 0.9729550745276565,\n 'dim': 1,\n 'inequal': 'lt',\n 'threshold': 1.0},\n {'alpha': 0.8958797346140273,\n 'dim': 0,\n 'inequal': 'lt',\n 'threshold': 0.90000000000000002}\n ]\n self.assertEqual(classifiers, expected)", "def decision_tree(df, variables, test_size):\n from sklearn.model_selection import train_test_split\n from sklearn import tree\n\n # Define input\n X = encoding_df(df, variables)\n\n # Set validation\n y = df['target']\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n\n clf = tree.DecisionTreeRegressor()\n clf = clf.fit(X_train, y_train)\n\n print(compute_rmse(y_test, clf.predict(X_test)))\n return clf.predict(X_test), y_test", "def test_decision_tree_min_samples_split_parameter(params, X_train, X_test, y_train, y_test):", "def fit(self, dataSet, prune=False, validSet=None):\n\t\t\n\t\tmodel_args = self._model_complexity_args.copy()\n\t\tif prune:\n\t\t\tif type(validSet).__name__ != 'ndarray':\n\t\t\t\traise AttributeError(\"To make pruning, validation set accept 'ndarray'\\\n\t\t\t\t\t, cannot be {}!\".format(type(validSet).__name__))\n\t\t\t# get a fully-grown tree\n\t\t\tmodel_args['min_impurity_decrease'] = 0\n\t\t\tmodel_args['min_samples_split'] = 2\n\t\t\n\t\tif self._treeType == 'reg':\n\t\t\timpurity_crit = DecisionTree._MSE\n\t\telif self._treeType == 'clf':\n\t\t\timpurity_crit = DecisionTree._Gini\n\n\n\t\telse:\n\t\t\traise ValueError(\"Argument 'treeType' accept 'clf' or 'reg' only\")\n\t\tself._root = DecisionTree._createTree(dataSet, impurity_crit=impurity_crit,\n\t\t\t\t\t\t\t\t\t\t\t**model_args)\n\n\t\tprint(\"Decision Tree Generated!\")\n\n\t\tif prune:\n\t\t\tprint(\"Pruning...\")\n\t\t\ttreeSeq = {'tree':[self._root], 'alpha':[0], 'num_leaves': [self._root.leaves()]} \n\t\t\tpruned_tree = DecisionTree._prune(deepcopy(self._root), impurity_crit, dataSet, treeSeq)\n\t\t\tprint('Pruning Done: %d pruned sub tree got' % len(treeSeq['tree']))\n\t\t\tprint('choosing best subtree through validation set...')\n\t\t\tbestSubtree, error_score = DecisionTree._bestSubtree(treeSeq, impurity_crit, validSet)\n\t\t\tprint('best subtree selected with error score: {}'.format(error_score))\n\n\t\t\tself._root = bestSubtree", "def test_training(self):\n\t\tpass", "def buildDecisionTree(self, data):\n self.data = data\n self.decisionTree = self.buildTree(self.data, self.listAttributes)\n with open(\"decision_tree_model\", \"wb\") as f:\n pickle.dump(self.decisionTree, f, pickle.HIGHEST_PROTOCOL)\n return self.decisionTree", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def test_training(self):\n warnings.filterwarnings('ignore')\n example_args = example_args_parser()\n example_args.unittest = True\n # prepare data\n example_args.stage = 'prepare'\n example_wrapper(example_args)\n # train goalDNN model\n example_args.stage = 'train'\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # train cVAE model\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # train gcVAE model\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # cVAE harmonization\n example_args.stage = 'predict'\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # gcVAE harmonization\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # goalDNN prediction\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # XGBoost\n example_args.stage = 'train'\n example_args.model = 'XGBoost'\n example_wrapper(example_args)\n # compare with reference results\n check_args = check_results_args_parser()\n check_args.unittest = True\n check_reference_results(check_args)", "def train_and_test(self, data):\n\n np.random.shuffle(data)\n datalist = self.unpack_data(data)\n\n logger.info('[*] 75-25 partition of datasets ...')\n\n markline1 = math.floor(0.75*(len(datalist['features'])))\n markline2 = math.floor(0.75*len(datalist['labels']))\n\n train_features = datalist['features'][:(markline1)]\n test_features = datalist['features'][(markline1):]\n \n train_labels = datalist['labels'][:(markline2)]\n test_labels = datalist['labels'][(markline2):]\n\n logger.info('[*] Training started with 75% Dataset ...')\n\n self.knn_model.fit(train_features, train_labels)\n\n logger.info('[*] Testing started with 25% Dataset ...')\n print('\\n/---------------Accuracy----------------/') \n \n accuracy = self.knn_model.score(train_features, train_labels)\n print('Test set accuracy {:.2f} %'.format(accuracy*100))\n\n if accuracy < 0.40:\n logger.warning('[-.-!] Thanks for tryin\\' but this machine ain\\'t learning.')\n\n return True", "def generate_train_test_data(data_dir = '../../att_faces'):\n\n train_data = [ [ read_image('%s/s%d/%d.pgm'%( data_dir, i, j)) for j in range(1,11)] for i in range(1, 36)]\n test_data = [ [ read_image('%s/s%d/%d.pgm'%( data_dir, i, j)) for j in range(1,11)] for i in range(36, 41)]\n \n true_combinations_train = generate_true_combinations(train_data)\n false_combinations_train = generate_false_combination(train_data, int(len(true_combinations_train) / len(train_data)), 10)\n \n true_combinations_test = generate_true_combinations(test_data)\n false_combinations_test = generate_false_combination(test_data, int(len(true_combinations_test) / len(test_data)), 10)\n \n return prepare_to_classifier(true_combinations_train, false_combinations_train, true_combinations_test, false_combinations_test)", "def classify(train=None, test=None, data=None, res_dir=\"res/\", disp=True, outfilename=None):\n utils.print_success(\"Comparison of differents classifiers\")\n if data is not None:\n train_features = data[\"train_features\"]\n train_groundtruths = data[\"train_groundtruths\"]\n test_features = data[\"test_features\"]\n test_groundtruths = data[\"test_groundtruths\"]\n else:\n train = utils.abs_path_file(train)\n test = utils.abs_path_file(test)\n train_features, train_groundtruths = read_file(train)\n test_features, test_groundtruths = read_file(test)\n if not utils.create_dir(res_dir):\n res_dir = utils.abs_path_dir(res_dir)\n classifiers = {\n \"RandomForest\": RandomForestClassifier()\n # \"RandomForest\": RandomForestClassifier(n_estimators=5),\n # \"KNeighbors\":KNeighborsClassifier(3),\n # \"GaussianProcess\":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),\n # \"DecisionTree\":DecisionTreeClassifier(max_depth=5),\n # \"MLP\":MLPClassifier(),\n # \"AdaBoost\":AdaBoostClassifier(),\n # \"GaussianNB\":GaussianNB(),\n # \"QDA\":QuadraticDiscriminantAnalysis(),\n # \"SVM\":SVC(kernel=\"linear\", C=0.025),\n # \"GradientBoosting\":GradientBoostingClassifier(),\n # \"ExtraTrees\":ExtraTreesClassifier(),\n # \"LogisticRegression\":LogisticRegression(),\n # \"LinearDiscriminantAnalysis\":LinearDiscriminantAnalysis()\n }\n for key in classifiers:\n utils.print_success(key)\n clf = classifiers[key]\n utils.print_info(\"\\tFit\")\n clf.fit(train_features, train_groundtruths)\n utils.print_info(\"\\tPredict\")\n predictions = clf.predict(test_features)\n\n if outfilename is not None:\n with open(outfilename, \"w\") as filep:\n for gt, pred in zip(test_groundtruths, predictions):\n filep.write(gt + \",\" + pred + \"\\n\")\n\n # Global\n data = [key]\n data.append(str(precision_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(recall_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(f1_score(test_groundtruths, predictions, average='weighted')))\n data = \",\".join(data)\n if disp:\n print(data)\n else:\n with open(res_dir + \"global.csv\", \"a\") as filep:\n filep.write(data + \",\\n\")\n # Local\n for index, tag in enumerate(list(set(train_groundtruths))):\n precision = precision_score(test_groundtruths, predictions, average=None)\n recall = recall_score(test_groundtruths, predictions, average=None)\n f1 = f1_score(test_groundtruths, predictions, average=None)\n line = key + \",\" + str(precision[index]) + \",\" + str(recall[index]) + \",\" + str(f1[index])\n if disp:\n print(line)\n else:\n with open(res_dir + \"tag_\" + tag + \".csv\", \"a\") as filep:\n filep.write(line + \",\\n\")\n return predictions", "def classify_data(X_train, Y_train, X_test):\r\n\r\n # Use this array to make a prediction for the labels of the data in X_test\r\n predictions = []\r\n # QHACK #\r\n np.random.seed(42)\r\n\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n def layer(W):\r\n qml.Rot(W[0, 0], W[0, 1], W[0, 2], wires=0)\r\n qml.Rot(W[1, 0], W[1, 1], W[1, 2], wires=1)\r\n qml.Rot(W[2, 0], W[2, 1], W[2, 2], wires=2)\r\n\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n qml.CNOT(wires=[2, 0])\r\n \r\n def stateprep(x):\r\n qml.templates.embeddings.AngleEmbedding(x, wires=[0, 1, 2])\r\n \r\n @qml.qnode(dev)\r\n def circuit(weights, x):\r\n\r\n stateprep(x)\r\n\r\n for W in weights:\r\n layer(W)\r\n \r\n return qml.expval(qml.PauliZ(0))\r\n\r\n def variational_classifier(var, x):\r\n weights = var[0]\r\n bias = var[1]\r\n return circuit(weights, x) + bias\r\n\r\n def square_loss(labels, predictions):\r\n loss = 0\r\n for l, p in zip(labels, predictions):\r\n loss = loss + (l - p) ** 2\r\n\r\n loss = loss / len(labels)\r\n return loss\r\n\r\n def cost(var, X, Y):\r\n predictions = [variational_classifier(var, x) for x in X]\r\n return square_loss(Y, predictions)\r\n \r\n def accuracy(labels, predictions):\r\n loss = 0\r\n for l, p in zip(labels, predictions):\r\n if abs(l - p) < 1e-5:\r\n loss = loss + 1\r\n loss = loss / len(labels)\r\n\r\n return loss\r\n\r\n num_layers = 3\r\n num_qubits = 3\r\n var_init = (np.random.randn(num_layers, num_qubits, 3), 0.0)\r\n\r\n opt = qml.AdamOptimizer(0.12)\r\n batch_size = 10\r\n\r\n def pred(x):\r\n if x > 0.33:\r\n return 1\r\n if x > -0.33:\r\n return 0\r\n else:\r\n return -1\r\n\r\n var = var_init\r\n for it in range(25):\r\n\r\n # Update the weights by one optimizer step\r\n batch_index = np.random.randint(0, len(X_train), (batch_size,))\r\n X_batch = X_train[batch_index]\r\n Y_batch = Y_train[batch_index]\r\n var = opt.step(lambda v: cost(v, X_batch, Y_batch), var)\r\n\r\n # Compute accuracy\r\n predictions = [pred(variational_classifier(var, x)) for x in X_train]\r\n acc = accuracy(Y_train, predictions)\r\n\r\n #print(\r\n # \"Iter: {:5d} | Cost: {:0.7f} | Accuracy: {:0.7f} \".format(\r\n # it + 1, cost(var, X_train, Y_train), acc\r\n # )\r\n #)\r\n if acc > 0.95:\r\n break\r\n predictions = [pred(variational_classifier(var, x)) for x in X_test]\r\n\r\n # QHACK #\r\n\r\n return array_to_concatenated_string(predictions)", "def _createTree(dataSet, impurity_crit, min_impurity_decrease, min_samples_split):\n\t\tif type(dataSet).__name__ != 'ndarray':\n\t\t\traise TypeError('input must be a numpy array.')\n\n\t\ttreenode = TreeNode()\n\t\tfeat_ind, val = DecisionTree._bestFeat2split(dataSet, impurity_crit, \n\t\t\t\t\t\tmin_impurity_decrease, min_samples_split)\n\t\tif feat_ind is None:\n\t\t\ttreenode.value = val\n\t\t\treturn treenode\n\t\ttreenode.cut_off = cut_off(feat_ind, val)\n\t\t\n\t\tD1, D2 = DecisionTree._binarySplit(dataSet, *treenode.cut_off)\n\n\t\ttreenode.left = DecisionTree._createTree(D1, impurity_crit, \n\t\t\t\t\t\tmin_impurity_decrease, min_samples_split)\n\t\ttreenode.right = DecisionTree._createTree(D2, impurity_crit, \n\t\t\t\t\t\tmin_impurity_decrease, min_samples_split)\n\t\treturn treenode", "def test(self, dataset): \n predictions = np.zeros(len(dataset), int)\n \n accuracy = self.random_forest.score(dataset[:,:-1], dataset[:,-1]) # Predict and compute accuracy.\n predictions = self.predict(dataset[:,:-1]) # Predict and return list of predictions.\n \n return predictions, accuracy", "def fit_and_test(X, y) -> None:\r\n models = {\r\n \"tree2\": RandomForestClassifier(n_estimators=1, n_jobs=-1, class_weight=\"balanced\", random_state=0),\r\n \"tree1\": RandomForestClassifier(n_estimators=1, n_jobs=-1, random_state=0, criterion=\"entropy\"),\r\n \"random_forest_10\": RandomForestClassifier(\r\n n_estimators=10, n_jobs=-1, class_weight=\"balanced\", criterion=\"gini\"\r\n ),\r\n \"random_forest_100\": RandomForestClassifier(n_estimators=100, n_jobs=-1, criterion=\"entropy\"),\r\n \"knn_1\": KNeighborsClassifier(n_neighbors=1, n_jobs=-1, metric=\"hamming\"),\r\n \"knn_5\": KNeighborsClassifier(n_neighbors=5, n_jobs=-1, metric=\"hamming\"),\r\n \"knn_15\": KNeighborsClassifier(n_neighbors=15, n_jobs=-1, metric=\"hamming\"),\r\n \"cnb\": ComplementNB(),\r\n }\r\n\r\n for model_name in models.keys():\r\n cross_validate(estimator=models[model_name], X=X, y=y, num_splits=5, save_name=model_name)", "def trainDecisionTree(inputDf, outputDf):\n clf = DecisionTreeRegressor(random_state=0)\n clf.fit(inputDf, outputDf)\n return clf", "def implement_tree(X, y, labels, k, alpha_range, criterion, d):\r\n\r\n kf = KFold(n_splits=k)\r\n min_avg_error = math.inf\r\n best_alpha = None\r\n avg_time_build_tree = 0\r\n for alpha in alpha_range:\r\n total_error = 0\r\n for train_idx, test_idx in kf.split(X):\r\n X_train, X_test = X[train_idx], X[test_idx]\r\n y_train, y_test = y[train_idx], y[test_idx]\r\n train = Tree(criterion, X_train, y_train, labels)\r\n test = Tree(criterion, X_test, y_test, labels)\r\n start = time.time()\r\n train_tree = train.build_tree(train.root, train.X, train.y, 0, d, train.labels, train.root.X_idx, None)\r\n test_all(train)\r\n end = time.time()\r\n train.is_valid(train_tree)\r\n period = end - start\r\n avg_time_build_tree += period\r\n pruned_tree, pruned_alpha, error = train.prune_tree(train_tree, alpha, test.X, test.y, test.labels)\r\n total_error += error\r\n avg_error = total_error / k\r\n if avg_error < min_avg_error:\r\n min_avg_error = avg_error\r\n best_alpha = alpha\r\n avg_time_build_tree = avg_time_build_tree / (k*len(alpha_range))\r\n\r\n return min_avg_error, best_alpha, avg_time_build_tree", "def decision_tree_classifier(features,target):\r\n clf = DecisionTreeClassifier()\r\n clf.fit(features, target)\r\n return clf", "def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y", "def build_decision_tree():\n\n decision_tree_root = None\n decision_tree_root = DecisionNode(None,None,lambda feature:feature[0]==1)\n decision_tree_root.left = DecisionNode(None,None,None,1)\n decision_tree_root.right = DecisionNode(None,None,lambda feature:feature[3]==1)\n decision_tree_root.right.left = DecisionNode(None,None,lambda feature:feature[1]==0)\n decision_tree_root.right.right = DecisionNode(None,None,lambda feature:feature[2]==1)\n decision_tree_root.right.left.left = DecisionNode(None,None,None,1)\n decision_tree_root.right.left.right = DecisionNode(None,None,None,0)\n decision_tree_root.right.right.left = DecisionNode(None,None,None,0)\n decision_tree_root.right.right.right = DecisionNode(None,None,None,1)\n return decision_tree_root", "def predict(tree, dataSet):\n\n\tcount = 0 #used for tracking how many times we've correctly classified our data\n\tfor index in range(len(dataSet)):\n\t\tdataPoint = dataSet[index]\n\t\tprint \"Current dataPoint: \", dataPoint.retrieve('id').getValue()\n\t\tnode = 0\n\t\tfor i in tree.fields[tree.nType].keys():\n\t\t\tif NodeType.ROOT == tree.getNodeType(i):\n\t\t\t\tnode = i #basically an index\n\t\t\t\tprint \"root node: \", node\n\t\t\t\tbreak\n\t\t\t#keep going down the tree until no children exist, then get output classification\n\n\t\tprint \"node type\", tree.getNodeType(node)\n\n\t\twhile tree.getNodeType(node) != NodeType.LEAF:\n\t\t\tsplitVal = tree.getSplitValue(node)\n\t\t\tprint \"tree split value: \", splitVal\n\t\t\tsplitAttribute = tree.getSplitAtribute(node)\n\t\t\tprint \"tree split attribute: \", splitAttribute\n\t\t\tval = dataPoint.retrieve(splitAttribute).getValue()\n\t\t\tif val == None:\t\t\n\t\t\t\tval = np.median(retrieveDataFromColumn(dataSet, splitAttribute))\n\n\t\t\tprint \"data point value for split attribute: \", val\n\t\t\tif FeatureType.CONTINUOUS == tree.getSplitType(node): \n\t\t\t\tif val >= splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\t\tprint \"greater than\", \"going to next node\", node\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"lesser than\", \"going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\telif FeatureType.DISCRETE == tree.getSplitType(node):\n\t\t\t\tif val != splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"not equal\", \" going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"equal\", \"goint to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\tleafClass = tree.getMajorityClassification(node)\n\t\tprint \"leaf classification: \", leafClass\n\t\tleafAttribute = tree.getSplitAtribute(node)\n\t\tprint \"leaf attribute: \", leafAttribute\n\t\t\n\t\t# Need to fill the last column (which is the same column as leafAttribute) with the \n\t\t# value of the leaf (i.e. classify as winner or not)\n\t\tdataPoint.retrieve(leafAttribute).addValue(leafClass)\n\t\tprint \"prediction is: \", dataPoint.retrieve(leafAttribute).getValue()\n\n\tcreateFileCSV(dataSet)\n\treturn dataSet", "def DecisionTreeAlgorithm(df, mltask, counter = 0, min_samples = 2, max_depth = 5, random_subspace = None):\n\n if counter == 0:\n global COLUMN_HEADERS, FEATURE_TYPE\n COLUMN_HEADERS = df.columns\n FEATURE_TYPE = hf.determine_type_of_feature(df)\n data = df.values\n else:\n data = df\n \n if (check_purity(data)) or (len(data) < min_samples) or (counter == max_depth):\n leaf = create_leaf(data, mltask)\n return leaf\n \n else:\n counter += 1\n \n potential_splits = get_potential_split(data, random_subspace)\n split_column,split_value = determine_best_split(data, potential_splits, mltask)\n data_below,data_above = split_data(data,split_column,split_value)\n \n if (len(data_below) == 0) or (len(data_above) == 0):\n leaf = create_leaf(data, mltask)\n return leaf\n \n feature_name = COLUMN_HEADERS[split_column]\n type_of_feature = FEATURE_TYPE[split_column]\n if type_of_feature == 'continuous':\n question = '{} <= {}'.format(feature_name,split_value)\n else:\n question = '{} = {}'.format(feature_name,split_value)\n sub_tree = {question:[]}\n \n yes_answer = DecisionTreeAlgorithm(data_below, mltask, counter, min_samples, max_depth, random_subspace)\n no_answer = DecisionTreeAlgorithm(data_above, mltask, counter, min_samples, max_depth, random_subspace)\n \n if yes_answer == no_answer :\n sub_tree = yes_answer\n else :\n sub_tree[question].append(yes_answer)\n sub_tree[question].append(no_answer)\n \n return sub_tree", "def runner(self):\n\n print('[ INFO ]: Initializing the abalone program runner...')\n\n df, features, predictor, classes = self.preprocess()\n\n df = alg.random_feature_sample(self, df, 0.10)\n\n # Set up the training, testing and validation sets\n split = round(len(df) * 0.10)\n v_set = df[df.index < split]\n t_set = df[df.index >= split]\n\n tree = alg()\n folds_dict = tree.cross_validation(t_set, predictor, type='classification', folds=5)\n\n # Initialize comparion values\n best_fold_tree = None\n best_fold_score = 0\n best_fold_pred_labels = None\n best_fold_df = None\n\n # Loop through each fold in the folds dictionary\n for fold in folds_dict:\n\n test_set = folds_dict[fold]\n train_set = pd.DataFrame()\n for inner_fold in folds_dict:\n if inner_fold != fold:\n train_set = train_set.append(folds_dict[inner_fold], ignore_index=True)\n\n # Build an ID3 tree\n root = tree.build_tree(train_set, features, predictor)\n df, labels, pred_labels, score = tree.test(test_set, features, predictor, root)\n\n # Determine which tree is the best\n if score > best_fold_score:\n best_fold_tree = root\n best_fold_score = score\n best_fold_pred_labels = pred_labels\n best_fold_df = df\n\n # Validate results and prune the ID3 tree\n v_tree = alg()\n df, labels, pred_labels, score = v_tree.test(v_set, features, predictor, best_fold_tree)\n prune_root = v_tree.prune(df, predictor, best_fold_tree)\n prune_df, prune_labels, prune_pred_labels, prune_score = v_tree.test(v_set, features, predictor, prune_root)\n\n return best_fold_tree, score, labels, pred_labels, prune_root, prune_score, prune_labels, prune_pred_labels", "def build_random_forest(X_train, y_train):", "def build_tree(rows: list) -> DecisionNode or Leaf:\n info_gain, question = get_best_split(rows)\n\n # If no info is gained just return a leaf node with remaining rows\n if info_gain == 0:\n return Leaf(rows)\n\n true_rows, false_rows = partition(rows, question)\n false_branch = build_tree(false_rows)\n true_branch = build_tree(true_rows)\n return DecisionNode(question, rows, true_branch, false_branch)", "def _fit(self, data):\n\n\t\ttrain_in, train_labels = self._split_inputs_outputs(data)\n\t\tclf = DecisionTreeClassifier(min_samples_leaf=0.05)\n\t\tclf.fit(train_in, train_labels)\n\n\t\treturn clf", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def test(self, test_sample_indices):\n\n if self._root_node is None:\n print('Decision tree must be trained before testing.')\n sys.exit(1)\n return self._classify_samples(self._curr_dataset.samples,\n self._curr_dataset.sample_class,\n self._curr_dataset.sample_costs,\n test_sample_indices,\n self._curr_dataset.sample_index_to_key)", "def brute_tree(XTRAIN,istopTRAIN,XTEST,istopTEST):\n \n ntrain=XTRAIN.shape[0]\n ntest=XTEST.shape[0]\n \n if np.sum(istopTRAIN)==0:\n return 0,[]\n\n cost0=np.zeros(Ngammas*Nreps)\n cost1=np.zeros(Ngammas*Nreps)\n cost0test=np.zeros(Ngammas*Nreps)\n cost1test=np.zeros(Ngammas*Nreps)\n \n precisionTRAIN=np.zeros(Ngammas*Nreps)\n precisionTEST=np.zeros(Ngammas*Nreps)\n recallTEST=np.zeros(Ngammas*Nreps)\n rate=np.zeros(Ngammas*Nreps)\n \n for iii in range(Ngammas):\n \n gamma=GAMMA[iii]\n \n for jjj in range(Nreps):\n \n \"\"\" train a tree using training data with random splitting \"\"\"\n \n tree_hyperparameters['class_weight']={0:1,1:gamma}\n clf=tree.DecisionTreeClassifier(**tree_hyperparameters)\n clf.fit(XTRAIN,istopTRAIN)\n \n \"\"\"\" record costs and precision on validation data \"\"\"\n \n pTRAIN=clf.predict(XTRAIN)\n precisionTRAIN[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==1),sum(pTRAIN))\n cost0[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==0)\n cost1[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 0 and istopTRAIN[i]==1)\n \n \"\"\" record precision on test data \"\"\"\n \n pTEST=clf.predict(XTEST)\n precisionTEST[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1),sum(pTEST))\n recallTEST[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1)/sum(istopTEST)\n cost0test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==0)\n cost1test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 0 and istopTEST[i]==1)\n \n \"\"\" record positive rate on full data \"\"\"\n \n rate[iii*Nreps+jjj]=(sum(pTRAIN)+sum(pTEST))/(ntrain+ntest)\n \n \"\"\" Compute Pareto front for validation data \"\"\"\n \n Pareto = Lower_Convex_Hull(np.concatenate((cost0.reshape(-1,1),cost1.reshape(-1,1)),1))\n \n \"\"\" make some nice plots for whoever is watching \"\"\"\n \n plt.figure(figsize=(10,5))\n plt.subplot(121)\n plt.plot(cost0,cost1,'.')\n plt.plot(cost0[Pareto],cost1[Pareto],'d')\n plt.xlabel('errors on class zero training data')\n plt.ylabel('errors on class one training data')\n\n plt.subplot(122)\n plt.plot(cost0test,cost1test,'.')\n plt.plot(cost0test[Pareto],cost1test[Pareto],'d')\n plt.xlabel('errors on class zero test data')\n plt.ylabel('errors on class one test data')\n plt.show()\n \n plt.figure(figsize=(15,5))\n plt.subplot(131)\n plt.semilogy(precisionTRAIN,rate,'.')\n plt.semilogy(precisionTRAIN[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on training data')\n plt.ylabel('positive rate')\n\n plt.subplot(132) \n plt.semilogy(precisionTEST,rate,'.')\n plt.semilogy(precisionTEST[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('positive rate')\n\n plt.subplot(133) \n plt.plot(precisionTEST,recallTEST,'.')\n plt.plot(precisionTEST[Pareto],recallTEST[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('recall on test data')\n plt.show() \n \n return {'cost0':cost0,'cost1':cost1,'cost0test':cost0test,'cost1test':cost1test,'precisionTRAIN':precisionTRAIN,'precisionTEST':precisionTEST,'recallTEST':recallTEST,'rate':rate,'Pareto':Pareto}", "def main():\n # Read in trainingSet and testSet as a DataFrame\n trainingOriginal = pd.read_csv(\n filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.train.5fold.csv\")\n testOriginal = pd.read_csv(filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.test.csv\")\n\n # Select only the numeric data\n training = pd.DataFrame(trainingOriginal.select_dtypes(['number']))\n training = pd.concat([training.reset_index(drop=True),\n trainingOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Select only the numeric data\n test = pd.DataFrame(testOriginal.select_dtypes(['number']))\n test = pd.concat([test.reset_index(drop=True),\n testOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Normalize the columns for training and test\n # print training['age'].min()\n # print training['age'].max()\n # print training.head()\n\n # Run max-min normalization on numerical columns for testing and training data\n for i in range(6):\n training.iloc[:, i] = (training.iloc[:, i]- training.iloc[:, i].min())/(training.iloc[:, i].max() - training.iloc[:, i].min())\n test.iloc[:, i] = (test.iloc[:, i]- test.iloc[:, i].min())/(test.iloc[:, i].max() - test.iloc[:, i].min())\n\n # Convert the 'earns' column to boolean as follows\n training['earns'] = training['earns'] == '>50K'\n test['earns'] = test['earns'] == ' >50K'\n\n # Group the training set by the fold attribute as given by the dataset\n trainingForFinal = training\n training = training.groupby('fold')\n\n # Since we want to consider odd k-values from 1 to 39, construct a list with these values\n kList = []\n for i in range(40):\n if i % 2 == 1:\n kList.append(i)\n\n # Empty dictionary to hold performance of each k-values and its accuracy\n performance = {}\n\n # Compute the performance for each k-value\n for k in kList:\n performance = crossValidation(training, k, performance)\n\n # Sort the performance dictionary by its accuracy (value)\n performance = sorted(performance.items(), key=operator.itemgetter(1), reverse=True)\n\n # Open file to write results\n file = open('grid.results.txt', 'w')\n # Write the results to file\n file.write(\"K | Accuracy\\n\")\n for item in performance:\n if item[0] < 10:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n else:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n # Close file\n file.close()\n\n # The best K is the one at the top of the list after the sorting\n bestK = performance[0][0]\n\n print 'Running Test Set with K = ' + str(bestK)\n\n applyModel(test,trainingForFinal,bestK)", "def create_train_test(option, transform, params, split=0.2):\r\n clip_im_dir = option.clip_im_dir\r\n matting_dir = option.matting_dir\r\n csv_path = option.csv_path\r\n \r\n print(\"create datasets\")\r\n \r\n \r\n data_df = pd.read_csv(csv_path)\r\n # data_df = MergeDataframe(clip_im_dir, matting_dir)\r\n \r\n #separate data in training and test data (20/80)\r\n train_df, test_df = train_test_split(data_df, test_size=split)\r\n \r\n #search right Dataset class\r\n package_dir = Path(src.dataset.__file__).resolve().parent\r\n\r\n for (_, module_name, _) in iter_modules([package_dir]):\r\n # print(module_name, self.ComType)\r\n if option.dataset.lower() == module_name.lower() :\r\n modelModule = importlib.import_module(\".\"+module_name)\r\n break\r\n \r\n # train data\r\n training_set = modelModule(train_df, clip_im_dir, matting_dir, transform, transform)\r\n train_loader = DataLoader(training_set, **params)\r\n \r\n \r\n #test data\r\n testing_set = modelModule(test_df, clip_im_dir, matting_dir, transform, transform)\r\n test_loader = DataLoader(testing_set, **params)\r\n \r\n return train_loader, test_loader", "def test_Tree():", "def make_training_validate_test(database, training_ratio, undersample=True):\n\tassert(training_ratio > 0 and training_ratio < 1)\n\tvalidate_ratio = test_ratio = (1-training_ratio) / 2\n\t(data, target) = load_data(database, undersample=undersample)\n\t\n\tnumber_of_samples = len(target)\n\tnumber_of_training_samples = math.floor(number_of_samples * training_ratio)\n\tnumber_of_validate_samples = math.floor(number_of_samples * validate_ratio)\n\tnumber_of_test_samples = math.floor(number_of_samples * test_ratio)\n\t\n\ttraining_data = data[0:number_of_training_samples]\n\ttraining_target = target[0:number_of_training_samples]\n\tvalidate_data = data[number_of_training_samples:number_of_training_samples+number_of_validate_samples]\n\tvalidate_target = target[number_of_training_samples:number_of_training_samples+number_of_validate_samples]\n\ttest_data = data[number_of_training_samples+number_of_validate_samples:number_of_training_samples+number_of_validate_samples+number_of_test_samples]\n\ttest_target = target[number_of_training_samples+number_of_validate_samples:number_of_training_samples+number_of_validate_samples+number_of_validate_samples]\n\t\n\tjoblib.dump(training_data, \"data/training_data\");\n\tjoblib.dump(training_target, \"data/training_target\");\n\tjoblib.dump(validate_data, \"data/validate_data\");\n\tjoblib.dump(validate_target, \"data/validate_target\");\n\tjoblib.dump(test_data, \"data/test_data\");\n\tjoblib.dump(test_target, \"data/test_target\");", "def build_decision_tree(baseline=False):\r\n if baseline:\r\n model = DecisionTreeClassifier()\r\n else:\r\n model = DecisionTreeClassifier(criterion='entropy',\r\n splitter='best',\r\n max_depth=25)\r\n\r\n return model", "def test_text_classifier_train(self):\n pass", "def select_model():\r\n from sklearn import tree\r\n import graphviz\r\n\r\n ValidationSetAndLabels = AllSets[1]\r\n ValLabels = ValidationSetAndLabels[:, [-1]] # extract labels (last column)\r\n ValSet = np.delete(ValidationSetAndLabels, -1, axis=1) # delete labels\r\n\r\n TrainingSetAndLabels = AllSets[2]\r\n TrainLabels = TrainingSetAndLabels[:, [-1]] # extract labels (last column)\r\n TrainSet = np.delete(TrainingSetAndLabels, -1, axis=1) # delete labels\r\n\r\n \"\"\"\r\n This is the code to select the best hyperparameter (part b)\r\n\r\n for SplitCriterion in ['entropy', 'gini']:\r\n print \"Criterion: \" + SplitCriterion + '\\n'\r\n\r\n for MaxDepth in [int(depth) for depth in np.linspace(1, np.log2(TrainSet.shape[1]), 5)]:\r\n print \"max_depth: \" + str(MaxDepth) + '\\n'\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion=SplitCriterion, max_depth=MaxDepth)\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n print \"Accuracy for this test is: %f %%\" %Accuracy\r\n print '\\n'\r\n\r\n print '\\n'\r\n \"\"\"\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=12)\r\n\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n dot_data = tree.export_graphviz(MyTree, out_file=None, max_depth=2,\r\n feature_names=AllSets[3], filled=True, rounded=True, special_characters=True,\r\n class_names=TrainLabels.flatten().astype(str))\r\n graph = graphviz.Source(dot_data)\r\n graph.render(\"output\")", "def test_compare_old_to_new_method_to_create_trees(self):\n nodes = util.generate_sequence_of_points(2, 2)\n tree1 = kdtree.createNewTree(nodes)\n kdtree.visualize(tree1)\n \n sel_axis = (lambda axis: axis)\n tree2 = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n tree2.split2([0.25, 0.5], axis = 1)\n tree2.split2([0.75, 0.5], axis = 1)\n \n #left\n tree2.split2([0.25, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.25, 0.75], axis = 0, sel_axis = sel_axis)\n \n #right\n tree2.split2([0.75, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.75, 0.75], axis = 0, sel_axis = sel_axis)\n \n kdtree.visualize(tree2)\n \n for n in zip(kdtree.level_order(tree1), kdtree.level_order(tree2)):\n self.assertEqual(n[0].data, n[1].data, \"elements not equal\")\n \n if n[0].data is not None and n[1].data is not None:\n self.assertEqual(n[0].axis, n[1].axis, \"elements not equal\")", "def test_with_data(data):\r\n i = 0\r\n tuning_set = []\r\n training_set = []\r\n num_reps = len(data)\r\n for i in range(0, num_reps-1):\r\n if (i % 4 == 0):\r\n tuning_set.append(data[i])\r\n else:\r\n training_set.append(data[i])\r\n\r\n unpruned = induce_node_tree(training_set, original_issues, \"D\", -1)\r\n pruned = prune_tree(unpruned, tuning_set)\r\n\r\n return pruned", "def build_tree(self, rows, attribute_list, depth=1, parent_rows=None):\n if len(rows) == 0:\n if parent_rows is not None:\n label_map = DecisionTree.get_count_by_attribute_value(parent_rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n else:\n raise ValueError(\"Reached a decision node which had zero rows but was not\"\n \"provided with a parent node\")\n if self.max_depth is not None and depth == self.max_depth:\n label_map = DecisionTree.get_count_by_attribute_value(rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n try:\n splitting_func = {\"entropy\": self.get_entropy,\n \"gini\": self.get_gini}.get(self.splitting_criteria)\n except KeyError:\n print(\"Program only supports entropy and gini as splitting criteria. Provided criteria was \" +\n self.splitting_criteria)\n raise ValueError(\"Incorrect parameter value passed for splitting criteria\")\n\n value_before_split = splitting_func(rows)\n\n if len(attribute_list) == 0 or value_before_split == 0:\n label_map = DecisionTree.get_count_by_attribute_value(rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n if len(attribute_list) == 1 and attribute_list[0] == self.target_attribute:\n label_map = DecisionTree.get_count_by_attribute_value(parent_rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n best_gain = -np.inf\n best_criteria = None\n best_attribute_partitions = None\n\n # Find the attribute having the best split \"\n\n best_attribute_partitions, best_criteria = self.get_best_attribute_for_split(attribute_list,\n best_attribute_partitions,\n best_criteria, best_gain,\n rows, splitting_func,\n value_before_split)\n branches = {}\n for domain_value in self.attribute_domains[best_criteria]:\n branch_attr_list = list(attribute_list)\n branch_attr_list.remove(best_criteria)\n if domain_value in best_attribute_partitions.keys():\n partition_dataset = best_attribute_partitions[domain_value]\n branches[domain_value] = self.build_tree(rows=partition_dataset,\n attribute_list=branch_attr_list,\n parent_rows=rows,\n depth=depth+1)\n else:\n branches[domain_value] = self.build_tree(rows=[],\n attribute_list=branch_attr_list,\n parent_rows=rows,\n depth=depth+1)\n return DecisionTree.DecisionNode(attribute_name=best_criteria, branches=branches)", "def train(self):\n # 1. Extracting details of attributes\n\n self.get_attribute_data()\n if self.train_data is None and self.train_data_file is None:\n raise ValueError(\"Neither training data not training file provided\")\n\n self.get_train_data()\n self.classifier = self.build_tree(rows=self.train_data, attribute_list=self.attribute_names)", "def make_testing_training(data, percent_training, random_split=False, seed=None):\n ## Making testing and training sets\n data['computed Case Date/Time Closed'] = pd.to_datetime(data['Case Date/Time Closed'])\n ordered_data = data.sort(columns=['computed Case Date/Time Closed'])\n np.random.seed(seed=seed) \n nrows, ncols = ordered_data.shape\n\n if random_split:\n training_indices = np.random.choice(ordered_data.index, size=int(nrows*percent_training), replace=False)\n training = ordered_data.ix[training_indices]\n testing = ordered_data[~data['case_id'].isin(training['case_id'])]\n else: # split by date\n training_stop_index = int(percent_training * nrows)\n training = ordered_data[:training_stop_index]\n testing = ordered_data[training_stop_index:]\n\n return training, testing", "def _buildTree(data, labels, features, tree=None) -> Tree:\n if tree is None:\n tree = Tree()\n\n if len(data) == 0:\n tree.addNode(Leaf(None, None), None)\n return tree\n labelsCount: dict = {lbl: len(labels[labels == lbl]) for lbl in set(labels)}\n if len(labelsCount) == 1:\n tree.addNode(Leaf(None, None, labels[0]), None)\n return tree\n if len(features) == 0:\n return tree\n\n best = Node(DecisionTree._chooseBestFeatures(data, labels, features))\n tree.addNode(best, None)\n\n data = np.delete(data, features.index(best.data), axis=1)\n features.remove(best.data)\n\n subFeatIdx = best.data.getRowIdxSubFeatures()\n for idx, subFeat in enumerate(best.data.subFeatures):\n if subFeat.getEntropy() > 0 and len(features) > 0:\n subNode = SubNode(subFeat, best)\n tree.addNode(subNode, subNode.parent)\n\n features_i = deepcopy(features)\n for jdx, feat in enumerate(features_i):\n feat.setData(np.column_stack((data[subFeatIdx[idx], jdx], labels[subFeatIdx[idx]])))\n\n tree.attachTree(DecisionTree._buildTree(data[subFeatIdx[idx], :], labels[subFeatIdx[idx]],\n features_i, None), subNode)\n else:\n leaf = Leaf(subFeat, best, subFeat(), subFeat.getOutLabel())\n tree.addLeaf(leaf, best)\n return tree", "def decision_tree_prediction(example, root, attributes):\n # If reached a leaf node, return the label\n if isinstance(root, str):\n return root\n\n # Attribute that was split on\n attribute = root.attribute\n # Column of the attribute that was split on\n i = get_index(attribute, attributes)\n testValue = example[i]\n # Check every child to see what path the example must take in the decision tree\n for child in root.children:\n if isinstance(child.branch, int):\n if int(testValue) <= child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n elif isinstance(child.branch, float):\n if int(testValue) > child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n# -----------------------------------------------Naive Bayes-------------------------------------------------\n # Naive bayes\n elif child.branch == \"Naive\":\n yes_probability = child.histogram[0]\n no_probability = child.histogram[2]\n i = 0\n for feature in example:\n if feature == \"yes\" or feature == \"no\":\n continue\n if i == 0 or i == 2 or i == 4 or i == 10 or i == 11 or i == 12:\n j = 0\n # Its a float so check\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n else:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][feature]\n no_probability = no_probability * child.histogram[3][attribute_index[i]][feature]\n i += 1\n if yes_probability > no_probability:\n return \"yes\"\n elif no_probability >= yes_probability:\n return \"no\"\n# -----------------------------------------------End Naive Bayes-------------------------------------------------\n else:\n if child.branch == testValue:\n return decision_tree_prediction(example, child.nextTree, attributes)", "def check_classifier():\n content = []\n labels = []\n file = 'COMP3074-CW1-Dataset.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'name.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'Small_talk.csv'\n content, labels = get_tag(file, \"small_talk\", content, labels, )\n x_train, x_test, y_train, y_test = train_test_split(content, # Sample feature set to be divided\n labels, # The sample result to be divided (label)\n stratify=labels, # Keep the category proportions\n # the same in training and testing\n test_size=0.25, # Refers to the proportion of\n # samples reserved for testing\n random_state=22) # Random seed\n count_vect = CountVectorizer(stop_words=stopwords.words('english'))\n x_train_counts = count_vect.fit_transform(x_train)\n tfidf_transformer = TfidfTransformer(use_idf=True, # Tf_idf\n sublinear_tf=True).fit(x_train_counts)\n x_train_tf = tfidf_transformer.transform(x_train_counts) # Standardize the inherent attributes of the training set,\n # reduce dimensionality and normalize\n classify = LogisticRegression(random_state=0).fit(x_train_tf, y_train) # Logistic regression\n return classify, tfidf_transformer, count_vect", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def train_and_test_model(In_train, Out_train, In_test, Out_test):\n\n # Naive Bayes Classifier\n print(\"Naive Bayes\")\n NB_classifier = MultinomialNB()\n NB_classifier.fit(In_train, Out_train)\n predictions = NB_classifier.predict(In_test)\n print(NB_classifier.score(In_test, Out_test))\n NB_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(NB_Confusion_Matrix)\n plot_confusion_matrix(NB_Confusion_Matrix)\n print()\n\n # Stochastic Gradient Descent Classifier\n print(\"Stochastic Gradient Descent\")\n SGD_classifier = SGDClassifier()\n SGD_classifier.fit(In_train, Out_train)\n predictions = SGD_classifier.predict(In_test)\n print(SGD_classifier.score(In_test, Out_test))\n SGD_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(SGD_Confusion_Matrix)\n plot_confusion_matrix(SGD_Confusion_Matrix)\n print()\n\n # MultiLayer Perceptron Classifier\n print(\"MultiLayer Perceptron\")\n MLP_classifier = MLPClassifier()\n MLP_classifier.fit(In_train, Out_train)\n predictions = MLP_classifier.predict(In_test)\n print(MLP_classifier.score(In_test, Out_test))\n MLP_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(MLP_Confusion_Matrix)\n plot_confusion_matrix(MLP_Confusion_Matrix)\n print()\n\n # Random Forest Classifier\n print(\"Random Forest Classifier\")\n RF_classifier = RandomForestClassifier()\n RF_classifier.fit(In_train, Out_train)\n predictions = RF_classifier.predict(In_test)\n scores = cross_val_score(RF_classifier, In_test, Out_test)\n print(scores.mean())\n RF_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(RF_Confusion_Matrix)\n plot_confusion_matrix(RF_Confusion_Matrix)\n print()\n\n # Decision Tree Classifier\n print(\"Decision Tree\")\n DT_classifier = tree.DecisionTreeClassifier()\n DT_classifier.fit(In_train, Out_train)\n predictions = RF_classifier.predict(In_test)\n print(DT_classifier.score(In_test, Out_test))\n DT_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(DT_Confusion_Matrix)\n plot_confusion_matrix(DT_Confusion_Matrix)\n print()\n\n # K-Nearest Neighbors Classifier\n print(\"K-NN\")\n KNN_Classifier = KNeighborsClassifier()\n KNN_Classifier.fit(In_train, Out_train)\n predictions = KNN_Classifier.predict(In_test)\n print(KNN_Classifier.score(In_test, Out_test))\n KNN_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(KNN_Confusion_Matrix)\n plot_confusion_matrix(KNN_Confusion_Matrix)\n print()\n\n # Support Vector Machines\n print(\"Support Vector Machines\")\n SVM_Classifier = svm.SVC()\n SVM_Classifier.fit(In_train, Out_train)\n predictions = KNN_Classifier.predict(In_test)\n print(SVM_Classifier.score(In_test, Out_test))\n SVM_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(SVM_Confusion_Matrix)\n plot_confusion_matrix(SVM_Confusion_Matrix)\n print()\n\n return NB_classifier", "def test_training(self):\n self.classifier.train(\"test\", self.message)", "def load_datasets(args, train_test_split=0):\n logger.info(\"Loading data...\")\n df_data_path = \"./data/df_data.pkl\"\n graph_path = \"./data/text_graph.pkl\"\n if not os.path.isfile(df_data_path) or not os.path.isfile(graph_path):\n logger.info(\"Building datasets and graph from raw data... Note this will take quite a while...\")\n generate_text_graph(args.train_data, args.infer_data, args.max_vocab_len)\n df_data = load_pickle(\"df_data.pkl\")\n G_dict = load_pickle(\"text_graph.pkl\")\n G = G_dict[\"graph\"]\n \n if train_test_split == 0:\n infer_idx_start = G_dict[\"infer_idx_start\"]\n del G_dict\n \n logger.info(\"Building adjacency and degree matrices...\")\n A = nx.to_numpy_matrix(G, weight=\"weight\"); A = A + np.eye(G.number_of_nodes())\n degrees = []\n for d in G.degree(weight=None):\n if d == 0:\n degrees.append(0)\n else:\n degrees.append(d[1]**(-0.5))\n degrees = np.diag(degrees)\n X = np.eye(G.number_of_nodes()) # Features are just identity matrix\n A_hat = degrees@A@degrees\n f = X # (n X n) X (n X n) x (n X n) X (n X n) input of net\n \n if train_test_split == 1:\n logger.info(\"Splitting labels for training and inferring...\")\n ### stratified test samples\n test_idxs = []\n for b_id in df_data[\"label\"].unique():\n dum = df_data[df_data[\"label\"] == b_id]\n if len(dum) >= 4:\n test_idxs.extend(list(np.random.choice(dum.index, size=round(args.test_ratio*len(dum)), replace=False)))\n save_as_pickle(\"test_idxs.pkl\", test_idxs)\n # select only certain labelled nodes for semi-supervised GCN\n selected = []\n for i in range(len(df_data)):\n if i not in test_idxs:\n selected.append(i)\n save_as_pickle(\"selected.pkl\", selected)\n else:\n logger.info(\"Preparing training labels...\")\n test_idxs = [i for i in range(infer_idx_start, len(df_data))]\n selected = [i for i in range(infer_idx_start)]\n save_as_pickle(\"selected.pkl\", selected)\n save_as_pickle(\"test_idxs.pkl\", test_idxs)\n \n f_selected = f[selected]; f_selected = torch.from_numpy(f_selected).float()\n f_not_selected = f[test_idxs]; f_not_selected = torch.from_numpy(f_not_selected).float()\n labels_selected = list(df_data.loc[selected]['label'])\n if train_test_split == 1: \n labels_not_selected = list(df_data.loc[test_idxs]['label'])\n else:\n labels_not_selected = []\n \n f = torch.from_numpy(f).float()\n save_as_pickle(\"labels_selected.pkl\", labels_selected)\n save_as_pickle(\"labels_not_selected.pkl\", labels_not_selected)\n logger.info(\"Split into %d train and %d test lebels.\" % (len(labels_selected), len(labels_not_selected)))\n return f, X, A_hat, selected, labels_selected, labels_not_selected, test_idxs", "def guessTreeOpt(train, test, valid):\n best = findApproxDepth(train, valid, 5, 5)\n tree = DecisionTree(train)\n print(\"building tree from full set\")\n tree.buildTree(best[0], best[1], True)\n print(\"tree built, testing tree\")\n acc = testTreeF(tree, test)\n print(\"accuracy of:\", \"%.2f\" % (acc * 100))\n return tree", "def run_tests():\n source1 = TextModel('CS111 Syllabus')\n source1.add_file('CS111_Syllabus.txt')\n\n source2 = TextModel('AR Syllabus')\n source2.add_file('AR_Syllabus.txt')\n\n new1 = TextModel('WR120 Syllabus')\n new1.add_file('WR120_Syllabus.txt')\n new1.classify(source1, source2)\n \n new2 = TextModel('CS131 Syllabus')\n new2.add_file('CS131_Syllabus.txt')\n new2.classify(source1, source2)\n \n new3 = TextModel('My Paper 2 for WR120')\n new3.add_file('WR_Paper_2.txt')\n new3.classify(source1, source2)\n \n new4 = TextModel('CS111 PS9PR0')\n new4.add_file('ps9pr0.txt')\n new4.classify(source1, source2)", "def train(self, curr_dataset, training_samples_indices, max_depth, min_samples_per_node,\n use_stop_conditions=False, max_p_value_chi_sq=0.1):\n self._curr_dataset = curr_dataset\n print('Starting tree training...')\n self._root_node = TreeNode(\n curr_dataset,\n training_samples_indices,\n curr_dataset.valid_nominal_attribute[:],\n curr_dataset.valid_numeric_attribute[:],\n max_depth,\n min_samples_per_node,\n use_stop_conditions,\n max_p_value_chi_sq)\n self._root_node.create_subtree(self._criterion)\n print('Starting pruning trivial subtrees...')\n start_time = timeit.default_timer()\n num_nodes_pruned = self._root_node.prune_trivial_subtrees()\n time_taken_pruning = timeit.default_timer() - start_time\n print('Done!')\n return time_taken_pruning, num_nodes_pruned", "def test(self, dataset = None, debug = True, labels = None):\n\n\t\tdataset = self.vectorize(dataset) if (dataset != None) else self.testing_set_vector;\n\t\tlabels = labels if (labels != None) else self.testing_labels;\n\n\t\tprediction = self.classifier.predict(dataset)\n\n\t\tif(debug):\n\t\t\tprint(classification_report(labels, prediction))\n\n\t\treturn prediction", "def fit_tree_stump(X_train: np.ndarray, y_train: np.ndarray) -> tree.DecisionTreeClassifier:\n clf = tree.DecisionTreeClassifier()\n clf = clf.fit(X_train, y_train)\n return clf", "def evaluate_random_forest(y_test, y_pred):", "def buildTree(self, data, attributes):\n\n totalClasses = []\n data = data[:]\n assignedClass = self.assignClass()\n\n for record in data:\n totalClasses.append(record[-1])\n\n # if all attributes have been traversed or the algo runs out of data/records\n if len(attributes) < 1 or not data:\n return assignedClass\n\n countClass = totalClasses.count(totalClasses[0])\n\n # if all classes are same\n if countClass == len(totalClasses):\n return totalClasses[0]\n\n splitAttrIndex = self.getSplitAttr(data, attributes)\n decisionTree = {splitAttrIndex: {}}\n\n # Splitting the data using the attribute with the hightest info gain\n for attrVal in ['True', 'False']:\n subtreeAttributes = attributes[:]\n subtreeAttributes.pop(splitAttrIndex)\n # Get new data for children node\n subtreeData = self.getNewData(data, splitAttrIndex, attrVal)\n # generate subtree\n subtree = self.buildTree(subtreeData, subtreeAttributes)\n decisionTree[splitAttrIndex][attrVal] = subtree\n\n return decisionTree", "def treebank_experiment():\n # Start by loading the dataset\n treebank_dataset = datasets.PennTreebankDataset(n=4, min_degree=10, max_degree=float('inf'),\n categories_to_use=[\"Verb\", \"Adverb\", \"Adjective\"],\n allow_proper_nouns=True)\n\n # Combine the non-verbs\n treebank_dataset.combine_clusters(1, 2, \"Non-Verbs\")\n\n # Run the approximate diffusion algorithm\n for left, right in hypalgorithms.find_max_cut(treebank_dataset.hypergraph,\n return_each_pair=False,\n algorithm='diffusion'):\n treebank_dataset.log_confusion_matrix([left, right])\n treebank_dataset.show_clustering_stats([left, right])\n hyplogging.logger.info(\n f\"Bipartiteness: {hypcheeg.hypergraph_bipartiteness(treebank_dataset.hypergraph, left, right)}\")\n\n hyplogging.logger.info(\"\")\n\n # Run the clique algorithm\n for left, right in hypalgorithms.find_max_cut(treebank_dataset.hypergraph,\n return_each_pair=False,\n algorithm='clique'):\n treebank_dataset.log_confusion_matrix([left, right])\n treebank_dataset.show_clustering_stats([left, right])\n hyplogging.logger.info(\n f\"Bipartiteness: {hypcheeg.hypergraph_bipartiteness(treebank_dataset.hypergraph, left, right)}\")", "def tree_gen(self, data, attri_set):\n # Create a new node.\n newNode = Node()\n\n # If data set is already classified, return a leaf node.\n if data.is_positive():\n newNode.set_leaf(True)\n return newNode\n elif data.is_negative():\n newNode.set_leaf(False)\n return newNode\n\n # If attribute set is empty, can't be classified.\n if not attri_set:\n type = data.mark_most()\n newNode.set_leaf(type)\n return newNode\n\n # Find a best decision attribute.\n # If it is a continuous attribute, it should have a best mid point.\n choice, midpoint = self.find_best(data, attri_set)\n if choice == -1:\n print \"error\"\n return None\n print \"best choice:\", Attribute(choice), midpoint\n newNode.attri = Attribute(choice)\n\n # Create a new attribute set,\n # which doesn't contain the best choice just find.\n new_attri_set = deepcopy(attri_set)\n new_attri_set.remove(choice)\n\n # Create branches.\n for val in self.attri_list[choice]:\n data_v = data.filter(choice, val, midpoint=midpoint)\n if data_v.empty():\n # If branch has empty data, create a leaf child.\n childNode = Node()\n childNode.set_leaf(data.mark_most()) # set parent's most\n newNode.children.append(childNode)\n else:\n # Recursively generate decision child tree.\n childNode = self.tree_gen(data_v, new_attri_set)\n newNode.children.append(childNode)\n\n return newNode", "def model(classifier, data):\n print(\"Beggining to test model\")\n train, test = cross_validation.train_test_split(data, test_size=.30)\n f,c = train[:,1:], train[:,0]\n classifier.fit(f,c,False)\n print(\"Score: \" + classifier.score(f,c))\n print(\"Finished testing model\")", "def run_tests():\n source1 = TextModel('hilary_speaches')\n source1.add_file('hilary_source_text.txt')\n\n source2 = TextModel('bernie_speaches')\n source2.add_file('bernie_source_text.txt')\n\n new1 = TextModel('trump_speach')\n new1.add_file('trump_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('hilary_test')\n new2.add_file('hilary_test.txt')\n new2.classify(source1, source2)\n\n new3 = TextModel('bernie_test')\n new3.add_file('bernie_test.txt')\n new3.classify(source1, source2)\n\n new4 = TextModel('bill_clinton_test')\n new4.add_file('bill_clinton_source.txt')\n new4.classify(source1, source2)", "def test_predict_2():\n\n tpot_obj = TPOTClassifier()\n tpot_obj._optimized_pipeline = creator.Individual.\\\n from_string('DecisionTreeClassifier(input_matrix)', tpot_obj._pset)\n tpot_obj._fitted_pipeline = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)\n tpot_obj._fitted_pipeline.fit(training_features, training_classes)\n\n result = tpot_obj.predict(testing_features)\n\n assert result.shape == (testing_features.shape[0],)", "def test_text_classifier_add_training_samples(self):\n pass", "def train(x_train, y_train, x_test, y_test):\n\n print(\" Nearest centroid : \", end='')\n run(x_train, y_train, x_test, y_test, NearestCentroid())\n print(\" k-NN classifier (k=3) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=3))\n print(\" k-NN classifier (k=7) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=7))\n print(\" Naive Bayes (Gaussian) : \", end='')\n run(x_train, y_train, x_test, y_test, GaussianNB())\n print(\" Random Forest (trees= 5) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=5))\n print(\" Random Forest (trees= 50) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=50))\n print(\" Random Forest (trees=500) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=500))\n print(\" Random Forest (trees=1000): \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=1000))\n print(\" LinearSVM (C=0.01) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.01))\n print(\" LinearSVM (C=0.1) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.1))\n print(\" LinearSVM (C=1.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=1.0))\n print(\" LinearSVM (C=10.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=10.0))", "def load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n biases, feature, label = get_biases_features_labels(data_dir)\n # split training, validation and testing set\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n\n return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask", "def wrapper_train(tree_depth, demos, validation_demos, pred_data=[None,None], verbose=True):\n return train(program_gen_step_size = 1000, \n num_programs = NUM_PROGRAMS, \n num_dts = 5, \n max_num_particles = 25, \n input_demos = demos, \n further_demos = validation_demos, \n tree_depth = tree_depth, \n return_prior=True,\n pred_data=pred_data,\n verbose=verbose)", "def train_and_eval():\n # train_file_name = 'adult.data'\n # test_file_name = 'adult.test'\n train_file_name = 'poker-hand-testing.data'\n test_file_name = 'poker-hand-training-true.data'\n #test_file_name = maybe_download()\n df_train = pd.read_csv(\n tf.gfile.Open(\"/opt/tensor/race_result_clean.csv\"),\n names=COLUMNS,\n skipinitialspace=True,\n skiprows=1)\n df_test = pd.read_csv(\n tf.gfile.Open(\"/opt/tensor/race_result_clean.csv\"),\n names=COLUMNS,\n skipinitialspace=True,\n skiprows=1)\n\n #df_train[LABEL_COLUMN] = (df_train[\"CLASS_Poker_Hand\"].apply(lambda x: x>5)).astype(int)\n #df_test[LABEL_COLUMN] = (df_test[\"CLASS_Poker_Hand\"].apply(lambda x: x>5)).astype(int)\n\n model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir\n print(\"model directory = %s\" % model_dir)\n m = build_estimator(model_dir)\n print(m)\n m.fit(input_fn=lambda: input_fn(df_train), steps=FLAGS.train_steps)\n results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)\n for key in sorted(results):\n print(\"%s: %s\" % (key, results[key]))", "def __init__(self, dims, treeCount, incAdd = 1, testDims = 3, dimCount = 4, rotCount = 32):\n # Support structures...\n self.cats = dict() # Dictionary from cat to internal indexing number.\n self.treeCount = treeCount\n self.incAdd = incAdd\n \n # Setup the classification forest...\n self.classify = DF()\n self.classify.setInc(True)\n self.classify.setGoal(Classification(None, 1))\n self.classify.setGen(LinearClassifyGen(0, 1, testDims, dimCount, rotCount))\n \n self.classifyData = MatrixGrow()\n self.classifyTrain = self.treeCount\n \n # Setup the density estimation forest...\n self.density = DF()\n self.density.setInc(True)\n self.density.setGoal(DensityGaussian(dims))\n self.density.setGen(LinearMedianGen(0, testDims, dimCount, rotCount))\n self.density.getPruner().setMinTrain(48)\n \n self.densityData = MatrixGrow()\n self.densityTrain = self.treeCount", "def train_model(regressor=DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DATASET_FILE_NAME,\n model_file_name=MODEL_FILE_NAME):\n df = pd.read_csv(dataset_file_name)\n\n # A minumum amount of feature engineering: The player's and opponent's\n # exact score may not be that important for our decisions. The difference,\n # however, certainly is. Moreover, the card value itself is not that\n # important. Here, the sum is.\n df['score_difference'] = df.self_score - df.opp_score\n df.drop(columns=['opp_score'], inplace=True)\n df['score_if_card_played'] = df.self_score + df.result_card_val\n df.drop(columns=['result_card_val'], inplace=True)\n\n # Strategy will be to let our model predict the score for different actions\n # Hence, we're going to train the model on that now\n X, y = df.drop(columns='score'), df.score\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n regressor.fit(X_train, y_train)\n\n feature_names = ['self_score', 'opp_stands', 'result_stand',\n 'score_difference', 'score_if_card_played']\n\n score = regressor.score(X_test, y_test)\n print(f\"Score on the test set: {score}.\")\n if isinstance(regressor, DecisionTreeRegressor):\n export_graphviz(regressor, feature_names=feature_names,\n out_file=GRAPHVIZ_FILE_NAME, filled=True)\n\n # For persistence, we export the generated model\n dump(regressor, model_file_name)\n return score", "def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)", "def test_random_forest_n_estimators_parameter(params, X_train, X_test, y_train, y_test):", "def tree_model(feature_train, help_rank_train, model_name):\n decision_tree = DecisionTreeClassifier()\n decision_tree = decision_tree.fit(feature_train, help_rank_train)\n tree_model = open(model_name,'wb')\n dump(decision_tree, tree_model, -1)\n return", "def DT_train(X, y, cv):\r\n start_time = time.time()\r\n print('\\n'+ '# '*10+'[Training] Decision Tree Model (DT):'+ ' #'*10)\r\n parameters = {\r\n 'max_depth':(1, 5, 10),\r\n 'max_features':('auto', 'sqrt', 'log2'),\r\n 'min_samples_leaf':(1, 5, 10)\r\n }\r\n print('-'*2+'Grid Search Parameters:')\r\n print(parameters)\r\n clf = tree.DecisionTreeClassifier(random_state=rstate)\r\n clf = GridSearchCV(clf, parameters, cv=cv, scoring='f1')\r\n clf.fit(X, y)\r\n print('-'*2+'GridSearch Results:')\r\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\r\n print(pd.DataFrame(clf.cv_results_))\r\n print('> '*2+'Training time: %.4f seconds.'%(time.time()-start_time))\r\n return clf", "def test_information_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 1.0)", "def test_text_classifier_add_testing_samples(self):\n pass", "def run_train_test(training_input, testing_input):\n #grab the size of the training data input for each of the classes\n num_A_train = training_input[0][1]\n num_B_train = training_input[0][2]\n num_C_train = training_input[0][3]\n #remove the information that we used to find the size of the classes and segregate each of the\n #classes into their own numpy array\n training_input.remove(training_input[0])\n training = np.array(training_input)\n A_array = training[:num_A_train]\n B_array = training[1+num_A_train:num_A_train+num_B_train]\n C_array = training[1+num_A_train+num_B_train:]\n #Find the centroid by summing the columns and dividing by the total number of training data points in the given class\n A_centroid = A_array.mean(axis=0)\n B_centroid = B_array.mean(axis=0)\n C_centroid = C_array.mean(axis=0)\n #Calculate the weight\n AB_w = A_centroid - B_centroid\n BC_w = B_centroid - C_centroid\n AC_w = A_centroid - C_centroid\n #Calculate t\n AB_t = np.dot(AB_w, (A_centroid + B_centroid) / 2)\n BC_t = np.dot(BC_w, (B_centroid + C_centroid) / 2)\n AC_t = np.dot(AC_w, (A_centroid + C_centroid) / 2)\n #find the size of the testing data for each class\n num_A_test = testing_input[0][1]\n num_B_test = testing_input[0][2]\n num_C_test = testing_input[0][3]\n #remove the information and separate into three numpy arrays for each class\n testing_input.remove(testing_input[0])\n testing = np.array(testing_input)\n A_test_array = testing[:num_A_test]\n B_test_array = testing[num_A_test:num_A_test+num_B_test]\n C_test_array = testing[num_A_test+num_B_test:]\n\n truePositiveA = 0;\n truePositiveB = 0;\n truePositiveC = 0;\n trueNegativeA = 0;\n trueNegativeB = 0;\n trueNegativeC = 0;\n AinB = 0;\n AinC = 0;\n BinA = 0;\n BinC = 0;\n CinA = 0;\n CinB = 0;\n #loop through the testing data and store the true positive and true negative results. Additionally store\n #the number of A points classified as B, A points classified in C and etc.\n for i in range(num_A_test):\n if((np.dot(A_test_array[i], AB_w) >= AB_t) & (np.dot(A_test_array[i], AC_w) >= AC_t)):\n truePositiveA += 1\n elif((np.dot(A_test_array[i], AB_w) < AB_t)):\n AinB += 1\n else:\n AinC += 1\n for i in range(num_B_test):\n if((np.dot(B_test_array[i], AB_w) < AB_t) & (np.dot(B_test_array[i], BC_w) >= BC_t)):\n truePositiveB += 1\n elif((np.dot(B_test_array[i], AB_w) < AB_t)):\n BinA += 1\n else:\n BinC += 1\n for i in range(num_C_test):\n if((np.dot(C_test_array[i], AC_w) < AC_t) & (np.dot(C_test_array[i], BC_w) < BC_t)):\n truePositiveC += 1\n elif((np.dot(C_test_array[i], AC_w) < AC_t)):\n CinA += 1\n else:\n CinB += 1\n #Calculate the true positive, true negative, false positive, false negative, total positive, total negative\n #and estimated positive to calculate the tpr, fpr, error rate, accuracy and precision\n truePositive = truePositiveA + truePositiveB + truePositiveC\n trueNegative = truePositiveB + truePositiveC + BinC + CinB + truePositiveA + truePositiveB + AinB + BinA +truePositiveA + truePositiveC + AinC + CinA\n falsePositive = BinA + CinA + AinB + CinB + AinC + BinC\n falseNegative = AinC + AinB + BinA + BinC + CinA + CinB\n totalPositive = truePositive + falseNegative\n totalNegative = falsePositive + trueNegative\n estimatedPositive = truePositive + falsePositive\n #Calculate these measures and return the result values\n return {\n \"tpr\": float(truePositive)/totalPositive,\n \"fpr\": float(falsePositive)/totalNegative,\n \"error_rate\": float(falsePositive+falseNegative)/(totalPositive+totalNegative),\n \"accuracy\": float(truePositive+trueNegative)/(totalPositive+totalNegative),\n \"precision\": float(truePositive)/estimatedPositive\n }", "def main():\n data = pd.read_csv('./house-votes-84.data', header = None)\n\n class_names = [\"republican\", \"democrat\"]\n\n print(\"\\n-- Train and Test with Winnow --\\n\")\n train_and_test_with_winnow(data, class_names)\n\n print(\"\\n-- Train and Test with Naive Bayes --\\n\")\n train_and_test_with_naive_bayes(data, class_names)", "def train_and_test(self, curr_dataset, training_samples_indices, validation_sample_indices,\n max_depth, min_samples_per_node, use_stop_conditions=False,\n max_p_value_chi_sq=0.1):\n time_taken_pruning, num_nodes_pruned = self.train(curr_dataset,\n training_samples_indices,\n max_depth,\n min_samples_per_node,\n use_stop_conditions,\n max_p_value_chi_sq)\n max_depth = self.get_root_node().get_max_depth()\n return (self.test(validation_sample_indices),\n max_depth,\n time_taken_pruning,\n num_nodes_pruned)", "def test_test_model(self):\n\n dataset = ClassificationTestDataset()\n model = ClassificationTestModel(dataset)\n preds = list(model.predict(dataset.examples))\n self.assertEqual(np.argmax(preds[0]['preds']), 2)\n self.assertEqual(np.argmax(preds[1]['preds']), 1)\n self.assertEqual(np.argmax(preds[2]['preds']), 4)\n self.assertEqual(np.argmax(preds[3]['preds']), 3)", "def make_train_validation_test_sets(path_to_json, out_dir, path_to_images,\n train_fraction=0.6,\n validation_fraction=0.2,\n test_fraction=0.2,\n do_print=False):\n assert train_fraction + validation_fraction + test_fraction == 1, 'Sum of subsets fractions must be 1'\n df = pd.read_json(path_to_json)\n # one-hot encode labels\n df['Class'] = df['Class'].replace(to_replace=[3, 4, 5, 7, 8, 10],\n value=['Unilamellar', 'Multilamellar', 'Uncertain', 'Empty', 'Full', 'Uncertain'])\n\n\n # present class captions as one hot encoding\n df = pd.concat([df, pd.get_dummies(df['Class'], prefix='Label')], axis=1)\n\n # Check that all images in dataframe have corresponding file on the disk\n for index, row in df.iterrows():\n if not os.path.isfile(path_to_images + row['Image']):\n print '{} image was not found. This example will be deleted'.format(row['Image'])\n df.drop(index, inplace=True)\n\n # prepare new dataframes\n df_train = pd.DataFrame()\n df_validation = pd.DataFrame()\n df_test = pd.DataFrame()\n\n if do_print:\n print '----------\\nEntire set:\\n', df['Class'].value_counts()\n\n class_counts = df['Class'].value_counts().to_dict()\n for label, count in class_counts.iteritems():\n df_test = pd.concat([df_test, df[df['Class'] == label].sample(frac=test_fraction)])\n df = df[~df.index.isin(df_test.index)]\n\n validation_fraction_adjusted = validation_fraction / (1 - test_fraction)\n df_validation = pd.concat([df_validation, df[df['Class'] == label].sample(frac=validation_fraction_adjusted)])\n df = df[~df.index.isin(df_validation.index)]\n\n df_train = pd.concat([df_train, df[df['Class'] == label]])\n df = df[~df.index.isin(df_train.index)]\n\n if do_print:\n print '----------\\nTrain set:\\n', df_train['Class'].value_counts()\n print '----------\\nValidation set:\\n', df_validation['Class'].value_counts()\n print '----------\\nTest set:\\n', df_test['Class'].value_counts()\n\n # remove out_file if it exists\n filenames = ['train_set.json', 'test_set.json', 'validation_set']\n for f in filenames:\n try:\n os.remove(out_dir + f)\n except OSError:\n pass\n except IOError:\n pass\n\n df_train.to_json(out_dir + 'train_set.json')\n df_validation.to_json(out_dir + 'validation_set.json')\n df_test.to_json(out_dir + 'test_set.json')", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def supervisedMLClassify(sim_vec_dict, true_match_set):\n\n num_folds = 3 # Number of classifiers to create\n\n class_match_set = set()\n class_nonmatch_set = set()\n\n try:\n import numpy\n import sklearn.tree\n except:\n print('Either the \"numpy\" or \"sklearn\" modules is not installed! Aborting.')\n print('')\n\n return set(), set() # Return two empty sets so program continues\n\n import random\n\n print('Supervised decision tree classification of %d record pairs' % \\\n (len(sim_vec_dict)))\n\n # Generate the training data sets (similarity vectors plus class labels\n # (match or non-match)\n #\n num_train_rec = len(sim_vec_dict)\n num_features = len(list(sim_vec_dict.values())[0])\n\n print(' Number of training records and features: %d / %d' % \\\n (num_train_rec, num_features))\n\n all_train_data = numpy.zeros([num_train_rec, num_features])\n all_train_class = numpy.zeros(num_train_rec)\n\n rec_pair_id_list = []\n\n num_pos = 0\n num_neg = 0\n\n i = 0\n for (rec_id1, rec_id2) in sim_vec_dict:\n rec_pair_id_list.append((rec_id1, rec_id2))\n sim_vec = sim_vec_dict[(rec_id1, rec_id2)]\n\n all_train_data[:][i] = sim_vec\n\n if (rec_id1, rec_id2) in true_match_set:\n all_train_class[i] = 1.0\n num_pos += 1\n else:\n all_train_class[i] = 0.0\n num_neg += 1\n i += 1\n\n num_all = num_pos + num_neg # All training examples\n\n num_train_select = int(2. / 3 * num_all) # Select 2/3 for training\n num_test_select = num_all - num_train_select\n\n print(' Number of positive and negative training records: %d / %d' % \\\n (num_pos, num_neg))\n print('')\n\n class_list = [] # List of the generated classifiers\n\n for c in range(num_folds):\n\n train_index_list = random.sample(xrange(num_all), num_train_select)\n\n train_data = numpy.zeros([num_train_select, num_features])\n train_class = numpy.zeros(num_train_select)\n test_data = numpy.zeros([num_test_select, num_features])\n test_class = numpy.zeros(num_test_select)\n\n # Copy similarities and class labels\n #\n train_ind = 0\n test_ind = 0\n\n for i in range(num_all):\n\n if (i in train_index_list):\n train_data[:][train_ind] = all_train_data[:][i]\n train_class[train_ind] = all_train_class[i]\n train_ind += 1\n else:\n test_data[:][test_ind] = all_train_data[:][i]\n test_class[test_ind] = all_train_class[i]\n test_ind += 1\n\n # Now build and train the classifier\n #\n decision_tree = sklearn.tree.DecisionTreeClassifier()\n decision_tree.fit(train_data, train_class)\n\n # Now use the trained classifier on the testing data to see how accurate\n # it is\n #\n class_predict = decision_tree.predict(test_data)\n\n num_corr = 0\n num_wrong = 0\n\n for i in range(len(class_predict)):\n if (class_predict[i] == test_class[i]):\n num_corr += 1\n else:\n num_wrong += 1\n\n print(' Classifier %d gets %d correct and %d wrong' % \\\n (c, num_corr, num_wrong))\n\n class_list.append(decision_tree)\n\n # Now use the trained classifiers to classify all record pairs\n #\n num_match_class_list = [0] * num_all # Count how often a record pair is\n # classified as a match\n\n for decision_tree in class_list:\n\n class_predict = decision_tree.predict(all_train_data) # Classify all pairs\n\n for i in range(num_all):\n num_match_class_list[i] += class_predict[i]\n\n assert num_match_class_list[i] <= num_folds, num_match_class_list[i]\n\n for i in range(num_all):\n rec_id_pair = rec_pair_id_list[i]\n\n # More '1' (match) classifications than '0' (non-match ones)\n #\n if (float(num_match_class_list[i]) / num_folds > 0.5):\n class_match_set.add(rec_id_pair)\n else:\n class_nonmatch_set.add(rec_id_pair)\n\n print('')\n\n print(' Classified %d record pairs as matches and %d as non-matches' % \\\n (len(class_match_set), len(class_nonmatch_set)))\n print('')\n\n return class_match_set, class_nonmatch_set", "def _build_graph(self, train_data, test_data):\n\n # Network for testing / evaluation\n # As before, we define placeholders for the input. These here now can be fed\n # directly, e.g. with a feed_dict created by _evaluation_food\n self.expert_outputs = {m: test_pipeline(test_data[m], self.config['prefixes'][m],\n **self.config)\n for m in self.modalities}\n self.prediction = self._fusion(self.expert_outputs)" ]
[ "0.71724963", "0.6940502", "0.6876765", "0.6875179", "0.68419737", "0.68007624", "0.671924", "0.6693489", "0.6642988", "0.6619835", "0.657808", "0.6437057", "0.6418784", "0.63988245", "0.6392599", "0.63923985", "0.6373904", "0.63466936", "0.6346446", "0.6345242", "0.63415146", "0.6334272", "0.63155454", "0.63086283", "0.62700933", "0.6267807", "0.6244437", "0.6231396", "0.6211399", "0.6206716", "0.6190481", "0.61882216", "0.6187929", "0.61789995", "0.61697507", "0.61639404", "0.6162279", "0.6160571", "0.61540717", "0.61458683", "0.6112733", "0.6111541", "0.6103911", "0.61038935", "0.6081229", "0.6064108", "0.605971", "0.60555995", "0.605329", "0.60522777", "0.6032022", "0.6017304", "0.6014983", "0.60134614", "0.60016", "0.59917694", "0.59716547", "0.5967314", "0.59642386", "0.59636533", "0.5962772", "0.5960442", "0.5955937", "0.5940416", "0.59301186", "0.59258074", "0.59251183", "0.5919034", "0.59096277", "0.5906129", "0.5903182", "0.59004074", "0.5898827", "0.5896873", "0.589274", "0.58844626", "0.58745915", "0.58742046", "0.5865426", "0.5856569", "0.58563924", "0.5855427", "0.5854785", "0.5852011", "0.58374405", "0.5831244", "0.58307517", "0.5826804", "0.5815472", "0.5804687", "0.5798956", "0.579828", "0.5797635", "0.5785744", "0.577769", "0.57737654", "0.57727784", "0.57625526", "0.5761403", "0.5755592" ]
0.6959578
1
Test the template tag js_settings
def test_js_settings(mocker, rf): mocker.patch( "mitxpro.templatetags.js_interop.get_js_settings", return_value={"data": "value"}, ) request = rf.get("/") context = Context({"request": request}) template = Template(("{% load js_interop %}" "{% js_settings %}")) rendered_template = template.render(context) assert ( rendered_template == """<script type="text/javascript"> var SETTINGS = {"data": "value"}; </script>""" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jssettings(self):\n self.update()\n return \"var %s = %s\" % (self.js_var_settings_name,\n json.dumps(self.settings))", "def test_jssettings(self):\n settings_fullpath = os.path.join(dirs.get_main_js_dir(), \"mediabrute-settings.js\")\n \n if os.path.isfile(settings_fullpath):\n os.unlink(settings_fullpath) \n self.assertFalse(os.path.isfile(settings_fullpath))\n \n call_command(\"mediabrute_jssettings\")\n self.assertTrue(os.path.isfile(settings_fullpath))\n \n os.unlink(settings_fullpath) \n self.assertFalse(os.path.isfile(settings_fullpath))\n \n custom_filename = \"heyo.js\"\n custom_fullpath = os.path.join(dirs.get_main_js_dir(), \"heyo.js\")\n \n if os.path.isfile(custom_fullpath):\n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))\n \n call_command(\"mediabrute_jssettings\", custom_filename)\n self.assertTrue(os.path.isfile(custom_fullpath))\n \n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))\n \n custom_filename = \"heyo\"\n custom_fullpath = os.path.join(dirs.get_main_js_dir(), \"heyo.js\")\n \n if os.path.isfile(custom_fullpath):\n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))\n \n call_command(\"mediabrute_jssettings\", custom_filename)\n self.assertTrue(os.path.isfile(custom_fullpath))\n \n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))", "def test_js_url(self):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.STATIC_URL, \"js\"))\n \n with self.settings(MEDIABRUTE_USE_STATIC=False):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.MEDIA_URL, \"js\"))\n \n with self.settings(MEDIABRUTE_JS_URL_PATH=\"heyo/yoyo\"):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.STATIC_URL, \"heyo/yoyo\"))\n \n with self.settings(MEDIABRUTE_USE_STATIC=False, MEDIABRUTE_JS_URL_PATH=\"heyo/yoyo\"):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.MEDIA_URL, \"heyo/yoyo\"))", "def module_use_template_javascript(self):\n return False", "def module_use_template_javascript(self):\n return False", "def angular_js_tests(request):\n return locals()", "def get_js_file(self):\n return 'placeholder'", "def check_jsable_context(self, context):\n pass", "def test_js_source(self):\n actual = is_js_source(self.view)\n\n self.assertTrue(actual)", "def test_never_load_jquery_setting(self):\n with patch_settings(LIVETRANSLATION_JQUERY=None):\n result = find_jquery_link(NO_JQUERY)\n self.assertEqual(result, True)", "def test_default_url(self):\n with patch_settings(LIVETRANSLATION_JQUERY=None):\n pattern, url = process_jquery_setting()\n self.assertEqual(\n url,\n 'http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js')", "def test_media_includes_jsi18n(self):\n form = self.form_class(choices={'replacements': self.model.objects.all()})\n self.assertIn(reverse('admin:jsi18n'), form.media._js)", "def test_none_pattern(self):\n with patch_settings(LIVETRANSLATION_JQUERY=None):\n pattern, url = process_jquery_setting()\n self.assertEqual(pattern, None)", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def add_javascripts_subscriber(event):\n c = event.request.tmpl_context\n c.javascripts = [\n ('spline', 'lib/jquery-1.7.1.min'),\n ('spline', 'lib/jquery.cookies-2.2.0.min'),\n ('spline', 'lib/jquery.ui-1.8.4.min'),\n ('spline', 'core'),\n ('pokedex', 'pokedex-suggestions'),\n ('pokedex', 'pokedex'), # XXX only on main pokedex pages\n ]", "def test_js(self, model_value):\n return render_to_string(\"autodot/tester.js\", dict(\n test_data = json.dumps(model_value),\n model_name=self.model_name, \n hash=self.hash))", "def test_string_pattern(self):\n with patch_settings(LIVETRANSLATION_JQUERY=u'/jquery.js'):\n pattern, url = process_jquery_setting()\n self.assertEqual(pattern, ur'<script\\s[^>]*src=\"\\/jquery\\.js\"')", "def settings(request):\n gauges = Gauge.objects.all()\n return render_to_response('dashboard/settings.js',{'gauges': gauges} )", "def test_get_mt_settings(self):\n pass", "def enable_javascript(self):\n return self._enable_javascript", "def setJavaScriptMode(self,javaScriptMode):\n self.PDFreactorConfiguration.in1[\"javaScriptMode\"] = javaScriptMode", "def test_get_cached_js(self):\n self.assertEquals(len(api.get_cached_js()), 1)", "def test_searchjs_is_available(self):\n portal = self.layer['portal']\n resreg = getattr(portal, 'portal_registry')\n from Products.CMFPlone.interfaces import IResourceRegistry\n resources_ids = resreg.collectionOfInterface(\n IResourceRegistry, prefix=\"plone.resources\").keys()\n self.assertTrue(\n 'resource-search-js' in resources_ids)", "def test_get_value_json(self):\n val = self.setting_json.get_value()\n self.assertEqual(val, {'Testing': 'good'})", "def test_get_value_json(self):\n val = self.setting_json.get_value()\n self.assertEqual(val, {'Testing': 'good'})", "def test_config(self):\n self.assertEqual(self.view.template_name, \"resources/templanguage_admin.html\")", "def test_string_url(self):\n with patch_settings(LIVETRANSLATION_JQUERY=u'/jquery.js'):\n pattern, url = process_jquery_setting()\n self.assertEqual(url, '/jquery.js')", "def get_filter():\n return render_template(\"filter_js.html\")", "def get_default_javascript():\n return [\"_static/require.js\"]", "def test_settings(self):\n \n self.assertTrue(settings.USE_I18N, msg=\"setting USE_I18N must be True to have languages working\")", "def test_jsi18n(self):\n \n jspath = reverse(\"admin:jsi18n\")\n self._test_url_can_be_viewed(self.projectadmin,jspath)\n \n ain = self.testproject.get_project_admin_instance_name() \n jspathpa = reverse(\"admin:jsi18n\",current_app=self.testproject.get_project_admin_instance_name())\n self._test_url_can_be_viewed(self.projectadmin,jspath)\n \n self.assertTrue(jspath!=jspathpa,\"Path to root admin should differ from \"\n \"path to project admin, but both resolve to '{}'\".format(jspath))", "def test_is_scripting_mode():\n\n assert application_services.is_scripting_mode() is False", "def test_initialization_json(self):\n expected = {\n 'id': self.setting_json.pk,\n 'app_plugin': get_app_plugin(EXAMPLE_APP_NAME).get_model().pk,\n 'project': self.project.pk,\n 'name': 'json_setting',\n 'type': 'JSON',\n 'user': None,\n 'value': None,\n 'value_json': {'Testing': 'good'},\n 'user_modifiable': True,\n 'sodar_uuid': self.setting_json.sodar_uuid,\n }\n self.assertEqual(model_to_dict(self.setting_json), expected)", "def settings():\n raise NotImplementedError # pragma: nocoverage", "def loadjs(*args):\n return render(settings, 'JS_FILES', 'staticloader/load_js.html', *args)", "def test_initialization_json(self):\n expected = {\n 'id': self.setting_json.pk,\n 'app_plugin': get_app_plugin(EXAMPLE_APP_NAME).get_model().pk,\n 'project': None,\n 'name': 'json_setting',\n 'type': 'JSON',\n 'user': self.user.pk,\n 'value': None,\n 'value_json': {'Testing': 'good'},\n 'user_modifiable': True,\n 'sodar_uuid': self.setting_json.sodar_uuid,\n }\n self.assertEqual(model_to_dict(self.setting_json), expected)", "def generateJavascriptContent(notification):", "def project_settings(request):\n webnode_settings = kakocase_settings(request)\n webnode_settings['settings']['IS_WEBNODE'] = True\n return webnode_settings", "def check_settings(self):\r\n pass", "def setup_js(self):\n script = \"\"\"\n Salamat.contextData.redactorOptions = {imageGetJson: '%s'};\n \"\"\"\n script %= self.reverse('redactor_files', args=(self.namespace,\n self.prefix))\n return HttpResponse(script, content_type='text/javascript')", "def test_change_config(self):\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)", "def get_gizmo_js():\n return (\n \"tethys_gizmos/js/gizmo_utilities.js\",\n \"tethys_gizmos/js/cesium_map_view.js\",\n \"tethys_gizmos/js/DrawHelper.min.js\",\n )", "def test_non_js_source(self):\n self.view.set_syntax_file(\"Packages/Python/Python.tmLanguage\")\n\n actual = is_js_source(self.view)\n\n self.assertFalse(actual)", "def settings_view():\n return template('settings.html')", "def __init__( settings={} ):", "def test_constructor(self):\n # Build the Settings objects\n self.assertEqual(self.extension, self.settings.extension)\n\n # Ensure that the registration settings dict gets\n # added to this Settings\n self.assertEqual(self.test_dict['test_key1'],\n self.settings['test_key1'])", "def test_js_basic():\n\n content = \"\"\"\n /* This is a single line comment */\n /* This is a multi line\n comment */\n var data = \"Hi this is a string.\";\n \"\"\"\n parser = JSParser(\"path\", content)\n assert parser.sections.has_section(\"This is a single line comment\")\n assert parser.sections.has_section(\"This is a multi line\")\n assert parser.sections.has_section(\"comment\")\n assert parser.sections.has_section(\"Hi this is a string.\")", "def check_settings(self):\n pass", "def test_js_app_dirs(self):\n try:\n ext = settings.APP_JS\n except AttributeError:\n ext = defaults.APP_JS\n \n for app, directory in dirs.APP_JS_DIRS:\n self.assertIn(\"/%s\" % ext, directory)", "def setting():\n return render_template('setting.html', year=datetime.now().year)", "def bootstrap_javascript_url():\n return javascript_url()", "def get_js_extensions(self):\n return JS_EXTENSIONS", "def js():\n with lcd(BASEDIR):\n js_ext = (\n 'submodules/jquery-cookie/src/jquery.cookie.js',\n 'submodules/jquery-treegrid/js/jquery.treegrid.js',\n 'submodules/bootstrap/dist/js/bootstrap.js',\n )\n js_own = (\n 'js/variables.js',\n 'js/bmf-autocomplete.js',\n 'js/bmf-calendar.js',\n 'js/bmf-editform.js',\n 'js/bmf-inlineform.js',\n 'js/bmf-buildform.js',\n 'js/menu.js',\n )\n\n local('cp submodules/bootstrap/dist/js/bootstrap.min.js djangobmf/static/djangobmf/js/')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.cookie.min.js submodules/jquery-cookie/src/jquery.cookie.js')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.treegrid.min.js submodules/jquery-treegrid/js/jquery.treegrid.js')\n\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_ext + js_own))\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/djangobmf.min.js djangobmf/static/djangobmf/js/djangobmf.js')\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_own))", "def static(request):\n return {\n 'JSERRORLOGGING_STATIC_URL': STATIC_URL\n }", "def test_momentjs_locale(self):\n with translation.override('no-no'):\n self.assertEqual(\n context_processors.momentjs_locale(True), {\n 'MOMENTJS_LOCALE_URL': None,\n }\n )\n\n with translation.override('en-us'):\n self.assertEqual(\n context_processors.momentjs_locale(True), {\n 'MOMENTJS_LOCALE_URL': None,\n }\n )\n\n with translation.override('de'):\n self.assertEqual(\n context_processors.momentjs_locale(True), {\n 'MOMENTJS_LOCALE_URL': 'misago/momentjs/de.js',\n }\n )\n\n with translation.override('pl-de'):\n self.assertEqual(\n context_processors.momentjs_locale(True), {\n 'MOMENTJS_LOCALE_URL': 'misago/momentjs/pl.js',\n }\n )", "def settings(request):\n from django.conf import settings\n return {\n 'exhibitBaseTemplate': settings.EXHIBIT_TEMPLATE,\n 'thumbnailUrl': settings.THUMBNAIL_URL,\n 'calisphere': settings.CALISPHERE\n }", "def inject_js(widget_id, options):\n\n s3 = current.response.s3\n appname = current.request.application\n\n # Static JS\n scripts = s3.scripts\n if s3.debug:\n script = \"/%s/static/scripts/S3/s3.shelter_inspection.js\" % appname\n else:\n script = \"/%s/static/scripts/S3/s3.shelter_inspection.min.js\" % appname\n scripts.append(script)\n\n # Instantiate widget\n scripts = s3.jquery_ready\n script = '''$('#%(id)s').shelterInspection(%(options)s)''' % \\\n {\"id\": widget_id, \"options\": json.dumps(options)}\n if script not in scripts:\n scripts.append(script)", "def test_js_dir(self):\n fullpath = dirs.get_main_js_dir()\n ext_only = dirs.get_main_js_dir(full_path=False)\n \n try:\n ext_compare = settings.JS_DIR\n except AttributeError:\n ext_compare = defaults.JS_DIR\n \n fullpath_compare = os.path.join(dirs.get_root(), ext_compare)\n \n self.assertEquals(fullpath_compare, fullpath)\n self.assertEquals(ext_compare, ext_only)", "def globalsettings(golbalsettingbutton):\n try:\n atomacclick(golbalsettingbutton)\n global_settings_content = getApplicatontitle(golbalsettingbutton)\n except Exception as er:\n print \"Not able to get globalsettings_content\"\n return False\n return global_settings_content", "def test_set_json(self):\n setting_name = 'project_json_setting'\n value = {'key': 'value'}\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': value,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n obj = AppSetting.objects.get(name=setting_name, project=self.project)\n self.assertEqual(obj.get_value(), value)", "def prepare_config_request(self, req):\n\t\tself.content_type = 'text/javascript'\n\t\tself.template = 'fckconfig-custom.js.tmpl'", "def test_config(self):\n self.assertIs(self.view.model, TempLanguage)\n self.assertEqual(self.view.template_name, \"resources/templanguage_list.html\")", "def resource_js(self):\n \n portal_url = getSite().absolute_url()\n \n return \"\"\"\n <script type=\"text/javascript\" src=\"%s/++resource++swfobject.js\"></script>\n <script type=\"text/javascript\" src=\"%s/++resource++audio_player.js\"></script> \n <script type=\"text/javascript\"> \n AudioPlayer.setup(\"%s/++resource++audio_player.swf\", { \n width: 300\n }); \n </script>\n \"\"\" % (portal_url, portal_url, portal_url)", "def load_settings(self):\n\n self.std = settings.settings", "def plugin_settings(settings): # pylint: disable=unused-argument\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_settings(mock_os_environ, update_on_init):\n kwargs = {'prefix': 'TEST_STUFF'}\n if update_on_init is None:\n pass\n else:\n kwargs['update_on_init'] = update_on_init\n settings_map = settings_parser.Settings(**kwargs)\n assert isinstance(settings_map, Mapping)\n if update_on_init is False:\n expected = {}\n else:\n expected = {'testgroup': {'testvar': 7, 'test_var': 6}, 'testgroup_test_var': 9}\n assert dict(settings_map) == expected", "def test_settings(settings):\n\n # Prevents CSRF attacks\n assert settings.SESSION_COOKIE_SAMESITE == \"Strict\"\n\n # Allows credentials to be sent on other subdomains\n assert settings.CORS_ALLOW_CREDENTIALS\n\n # SessionAuthentication must not require CSRF checking\n assert (\n \"rest_framework.authentication.SessionAuthentication\"\n not in settings.REST_FRAMEWORK[\"DEFAULT_AUTHENTICATION_CLASSES\"]\n )\n assert (\n \"chemreg.auth.authentication.CsrfExemptSessionAuthentication\"\n in settings.REST_FRAMEWORK[\"DEFAULT_AUTHENTICATION_CLASSES\"]\n )\n\n # BasicAuthentication is needed to login the first time\n assert (\n \"rest_framework.authentication.BasicAuthentication\"\n in settings.REST_FRAMEWORK[\"DEFAULT_AUTHENTICATION_CLASSES\"]\n )\n\n # Authenticated users can perform any request.\n # Unauthorised users will only be permitted if the request\n # method is one of the \"safe\" methods; GET, HEAD or OPTIONS.\n assert (\n \"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly\"\n in settings.REST_FRAMEWORK[\"DEFAULT_PERMISSION_CLASSES\"]\n )", "def update(self):\n registry = getUtility(IRegistry)\n site_settings = registry.forInterface(\n ISiteSchema, prefix=\"plone\", check=False)\n try:\n if site_settings.webstats_js:\n self.webstats_js = site_settings.webstats_js\n except AttributeError:\n pass", "def test_themes_cant_access_configpy_attributes():\n app = create_ctfd()\n with app.app_context():\n assert app.config[\"SECRET_KEY\"] == \"AAAAAAAAAAAAAAAAAAAA\"\n assert (\n app.jinja_env.from_string(\"{{ get_config('SECRET_KEY') }}\").render()\n != app.config[\"SECRET_KEY\"]\n )\n destroy_ctfd(app)", "def locking_admin_form_js(self, request, object_id):\n return render(request,\n 'locking/admin_form.js',\n {'options': self.get_json_options(request, object_id)},\n content_type=\"application/javascript\")", "def testJava(self):\n self.assertEqual(\n self.java,\n self.config.java\n )", "def is_js_file(fname):\r\n return REJS.search(fname) and \\\r\n TEST_INDICATOR not in fname", "def test_exclude_javascript(parsed_html):\n assert len(parsed_html.find_all(\"script\")) == 2\n parsed_html = utils.exclude_javascript(parsed_html)\n assert len(parsed_html.find_all(\"script\")) == 1", "def settings():\n return _get_settings()[1]", "def third_party_scripts(request):\n return {\n 'ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE':\n settings.ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE\n }", "def test_template_feedback(self):\r\n pass", "def test_settings_files():\n\n def _callback(action: kuber.CommandAction):\n s = action.bundle.settings\n assert s.foo and s.foo == s.spam\n assert s.bar and s.bar == s.ham\n assert s.baz and s.baz == s.eggs\n\n cb = MagicMock()\n cb.side_effect = _callback\n\n kuber.cli(\n cb,\n arguments=[\n \"render\",\n f'--settings={os.path.join(MY_DIRECTORY, \"settings.yaml\")}',\n f'--settings={os.path.join(MY_DIRECTORY, \"settings.json\")}',\n ],\n )\n cb.assert_called_once()", "def settings(request):\n return {\"SETTINGS\": django_settings, \"GLOBAL_DEFINITIONS\": global_definitions}", "def test_settings_doesnt_break(self):\r\n self.settingsDeploy()", "def test_read_my_settings(monkeypatch, tmp_path, my_settings):\n monkeypatch.setattr(my_code, \"MY_SETTINGS_PATH\", tmp_path / \".my_fake_settings\")\n my_code.MY_SETTINGS_PATH.write_text(json.dumps(my_settings))\n assert my_code.read_my_settings() == my_settings", "def render_js_module(\n integration_docs: Dict[str, Any],\n metric_docs: Dict[str, Any],\n metrics_by_integration: Dict[str, List[str]],\n meta: Dict[str, Any],\n) -> str:\n return dedent(\n f\"\"\"\n window.integrationsDocumentation={json_compact(integration_docs)};\n window.integrationsMeta={json_compact(meta)};\n window.metricDocumentation={json_compact(metric_docs)};\n // Plugin docs are omitted since they are rarely used.\n window.pluginDocumentation={{}};\n // This is the map of integration name to a list of metrics that that integration supports.\n window.pluginMetrics={metrics_by_integration};\n \"\"\"\n )", "def build_settings(self, settings):\n settings.add_json_panel('Makesmith Settings', self.config, data=self.json)", "def showSettings():\n cq = dz()\n cq.abag()", "def settings():\n return SettingsMock.instance()", "def test_config(self):\n self.assertIs(self.view.model, TempLanguage)\n self.assertEqual(self.view.template_name, \"resources/templanguage_detail.html\")", "def propeller_javascript_url():\n return javascript_url()", "def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)", "def test_isJS8Sandbox_property(self, cache_set_mock, cache_get_mock):\n cache_get_mock.return_value = None\n self.assertIsNotNone(self.core.isJS8Sandbox)", "def test_context(self):\n\n response = self.client.get('requests_view_10')\n self.assertTrue('settings' in response.context)\n self.assertEqual(response.context['settings'], settings)", "def js(self, script):\n self.page().mainFrame().evaluateJavaScript(script)", "def validate_settings(self, settings):\n pass", "def settings():\n\n\n def dec(func):\n \"\"\" Annotation wrapping web public function\"\"\"\n\n\n def deco(*a, **b):\n \"\"\" Decorator for public method\"\"\"\n if not SettingsService.is_first_run():\n return func(*a, **b)\n raise cherrypy.HTTPRedirect('/settings/settings')\n\n\n return deco\n\n\n return dec", "def js_embed(self):\n if self.force_js_embed:\n return True\n else:\n return self._jshost in (\n SCRIPT_FILE_PATH, constants.DEFAULT_JUPYTER_GITHUB_URL)", "def read_javascript_includes():\n\tif \"CFG_PREFIX\" in globals(): \n\t\tjs_filepath = os.path.join(CFG_PREFIX,\"var/www/js/jquery/jquery-lib.html\")\n\t\tif os.path.exists(js_filepath):\n\t\t\tf = open(js_filepath,\"r\")\n\t\t\tjs_text = f.read()\n\t\t\tf.close()\n\t\t\treturn js_text\n\t\telse: \t\n\t\t\twarning(\"no javascipt file included %s\" %js_filepath)\n\t\t\treturn None\n\telse: \t\n\t\twarning(\"CFG_PREFIX not set. no javascript includes\")\n\t\treturn None", "def test_default_no_match(self):\n config = (DEFAULT_JQUERY_PATTERN, 'DUMMY')\n with patch_settings(LIVETRANSLATION_JQUERY=config):\n result = find_jquery_link(NO_JQUERY)\n self.assertEqual(result, None)", "def _load_settings_to_jinja_env(self) :\n\t\t# Load filters if exists\n\t\tif hasattr(self.settings, 'FILTERS') :\n\t\t\tfor name, cls in utils.load_module(self.settings.FILTERS).__dict__.items() :\n\t\t\t\tself.jinja_env.filters[name] = cls\n\n\n\t\t# Load globals if exists\n\t\tif hasattr(self.settings, 'GLOBALS') :\n\t\t\tfor name, cls in utils.load_module(self.settings.GLOBALS).__dict__.items() :\n\t\t\t\tself.jinja_env.globals[name] = cls", "def toastr_messages_js(subdomain: t.Optional[str] = None) -> Response:\n return current_app.response_class(\n render_template('toastr_messages.js.jinja2'), mimetype='application/javascript'\n )", "def initialize(self):\n my_setting = self.settings.get('my_setting')", "def get_vendor_js():\n return (\"://plotly-load_from_python.js\",)" ]
[ "0.6923433", "0.6532566", "0.6531576", "0.6457588", "0.6457588", "0.6105654", "0.6031209", "0.60102826", "0.5945879", "0.5761625", "0.57324207", "0.5658938", "0.5633526", "0.5626261", "0.5626261", "0.55889726", "0.5578439", "0.5576125", "0.5567976", "0.5524449", "0.54723006", "0.5431144", "0.542574", "0.5400255", "0.5359174", "0.5359174", "0.5351283", "0.5345427", "0.53056467", "0.5301526", "0.52327675", "0.5225093", "0.5223648", "0.5216525", "0.52109516", "0.52056354", "0.5198514", "0.5133804", "0.5130607", "0.5102345", "0.50636995", "0.5053668", "0.5039032", "0.50158805", "0.5013313", "0.5002573", "0.49915057", "0.49813366", "0.4965121", "0.49556458", "0.49545977", "0.49492523", "0.4933518", "0.49326313", "0.4927234", "0.49239564", "0.4918186", "0.4914749", "0.4911413", "0.490083", "0.4896939", "0.48881817", "0.483989", "0.48340243", "0.48333287", "0.48326212", "0.48257577", "0.48115128", "0.48092675", "0.48043463", "0.48035008", "0.48023486", "0.4801442", "0.47927672", "0.47868812", "0.47851864", "0.47723284", "0.4771954", "0.47464967", "0.47448877", "0.4740783", "0.47343695", "0.47329825", "0.47279388", "0.47269484", "0.47266653", "0.47193068", "0.4718929", "0.47157094", "0.47143722", "0.4710652", "0.47057194", "0.47034305", "0.46971914", "0.4696641", "0.46932393", "0.467789", "0.46683258", "0.46680325", "0.46669525" ]
0.8117115
0
Uses keypoint algorithm SIFT to extract feature points from the image and get point correspondences
def getFeatureMatches(img1, img2): sift = xfeatures2d.SIFT_create() kp1, des1 = sift.detectAndCompute(img1, None) kp2, des2 = sift.detectAndCompute(img2, None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) Left_Pts = list() Right_Pts = list() # Ratio criteria according to Lowe's paper for i, (m, n) in enumerate(matches): if m.distance < 0.5 * n.distance: Left_Pts.append(kp1[m.queryIdx].pt) Right_Pts.append(kp2[m.trainIdx].pt) left = np.array(Left_Pts) right = np.array(Right_Pts) features = (left, right) return features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SIFT_features(self):\n start_time = datetime.datetime.now() \n self.func_log(\"\\n\\tIn SIFT_features()\")\n \n key_points = {}\n descriptor_list = []\n SIFT = cv2.xfeatures2d.SIFT_create()\n \n self.func_log(\"\\t\\tSIFT feature extraction start\")\n \n for key, value in self.images.items():\n features = [] \n for img in value:\n kp, des = SIFT.detectAndCompute(img,None) \n descriptor_list.extend(des)\n features.append(des)\n \n key_points[key] = features \n \n self.func_log(\"\\t\\t\\tKEY: {} finished\".format(key))\n \n self.descriptor_list = descriptor_list\n self.key_points = key_points \n \n end_time = datetime.datetime.now() \n self.func_log(\"\\n\\t\\tTime Cost: {}\\n\".format(end_time-start_time))", "def surf_keypoint_detection(img):\n surf = cv2.xfeatures2d.SURF_create(510)\n kp, des = surf.detectAndCompute(img, None)\n return des", "def sift_keypt_extractor(img1, img2, ratio=0.7, max_matches=-1, visualize=False, max_features=-1):\n sift = cv2.xfeatures2d.SIFT_create(max_features) if max_features > 0 else cv2.xfeatures2d.SIFT_create()\n\n img1_g = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n img2_g = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n try:\n kp1, des1 = sift.detectAndCompute(img1_g, None)\n kp2, des2 = sift.detectAndCompute(img2_g, None)\n\n # FLANN parameters\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=50)\n\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1, des2, k=2)\n\n pts1 = []\n pts2 = []\n filtered_kp1 = []\n filtered_kp2 = []\n\n # ratio test as per Lowe's paper\n for i, (m, n) in enumerate(matches):\n if m.distance < ratio * n.distance:\n pts1.append(kp1[m.queryIdx].pt)\n pts2.append(kp2[m.trainIdx].pt)\n filtered_kp1.append(kp1[m.queryIdx])\n filtered_kp2.append(kp2[m.trainIdx])\n\n if max_matches > 0 and len(pts1) > max_matches - 1:\n break\n\n if visualize:\n draw_matches(img1, filtered_kp1, img2, filtered_kp2, plot_title=\"\")\n\n return kp1, kp2, pts1, pts2\n except:\n return None, None, None, None", "def correspondence_points(img1, img2, tag='c'):\n if len(img1.shape) == 3:\n img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n if len(img2.shape) == 3:\n img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n detector = cv2.SURF(800)\n norm = cv2.NORM_L2\n flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)\n kp1, desc1 = detector.detectAndCompute(img1, None)\n kp2, desc2 = detector.detectAndCompute(img2, None)\n raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2\n p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)\n\n if len(p1) >= 4:\n H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)\n print '%d / %d inliers/matched' % (np.sum(status), len(status))\n status = status.reshape(-1) # flatten\n p1 = p1[status == 1]\n p2 = p2[status == 1]\n kp_pairs = [kp_pairs[i] for i in range(len(kp_pairs)) if status[i] == 1]\n else:\n # Just depend on the thresholding for filtering matches\n p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches, ratio=0.3)\n\n draw_correspondence_points(img1, img2, kp_pairs, tag=tag)\n return p1, p2, kp_pairs", "def features_keypoints(image, keypoints, window_size):\n kps = [cv2.KeyPoint(x, y, window_size) for x, y in keypoints]\n img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n descriptor = cv2.xfeatures2d.SIFT_create()\n _, features = descriptor.compute(img, kps)\n return features", "def image_detect_and_compute(detector, img_name):\n img_building = cv2.imread(img_name)\n img_building = cv2.cvtColor(img_building, cv2.COLOR_BGR2RGB)\n sift = cv2.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(img_building, None)\n img_kp = cv2.drawKeypoints(img_building, kp, img_building)\n return img_building, kp, des", "def SURF_features(self):\n start_time = datetime.datetime.now()\n self.func_log(\"\\n\\tIn SURF_features()\")\n \n key_points = {}\n descriptor_list = []\n surf = cv2.xfeatures2d.SURF_create()\n \n self.func_log(\"\\t\\tSURF feature extraction start\")\n \n for key, value in self.images.items():\n features = [] \n for img in value:\n kp, des = surf.detectAndCompute(img,None) \n descriptor_list.extend(des)\n features.append(des)\n \n key_points[key] = features\n \n self.func_log(\"\\t\\t\\tKEY: {} finished\".format(key))\n \n self.descriptor_list = descriptor_list\n self.key_points = key_points\n \n end_time = datetime.datetime.now() \n self.func_log(\"\\n\\t\\tTime Cost: {}\\n\".format(end_time-start_time))", "def __sift_dect_and_compute(image):\n img = cv2.imread(image, cv2.IMREAD_COLOR)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n kp, des = cv2.SIFT().detectAndCompute(gray, None)\n return kp, des", "def _match_keypoints(self, query_image: Image) -> typing.Tuple[typing.List[int], typing.List[typing.Any]]:\n query_image.get_keypoints_and_descriptors()\n\n # matches = self.matcher.match(query_image.descriptor)\n matches = self.matcher.knnMatch(query_image.descriptor, k=2)\n good = []\n for m_n in matches:\n if len(m_n) == 1:\n good.append(m_n[0])\n continue\n elif len(m_n) != 2:\n continue\n (m, n) = m_n\n if m.distance < 0.7 * n.distance:\n good.append(m)\n\n images_scores = [0] * len(self._candidate_images)\n images_matches = [None] * len(self._candidate_images)\n for image_index, image in enumerate(self._candidate_images):\n matches_scores = []\n matches = []\n for i, match in enumerate(good):\n if match.imgIdx != image_index:\n continue\n matches.append(match)\n matches_scores.append((256 - match.distance) / 256)\n\n match_cnt = len(matches_scores)\n if match_cnt <= 0:\n continue\n\n images_scores[image_index] = (\n 0.5 + ((math.tanh(match_cnt / 3 - 1)) / 2)) * (sum(matches_scores) / match_cnt)\n\n images_matches[image_index] = matches\n\n return images_scores, images_matches", "def extract_features(img, thr=0.005):\n if img.ndims == 3:\n img = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\n\n detector = cv2.AKAZE_create()\n (kpts, descs) = detector.detectAndCompute(img, None)\n return kpts, descs", "def get_local_features(self, img):\n kp, des = self.fe.detectAndCompute(img, None)\n return kp, des", "def __key_points(image_shape, point_list):\n keypoint_list = []\n for i in range(point_list.shape[0]):\n keypoint_list.append(ia.Keypoint(x=point_list[i, 0, 0], y=point_list[i, 0, 1]))\n return ia.KeypointsOnImage(keypoint_list,\n shape=ia.quokka(size=image_shape[:2]))", "def find_matching_points(img1, img2, max_pix_movement=50, normalize=True, show=False):\n\n # Initiate ORB detector\n orb = cv2.ORB_create()\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(img1, None)\n kp2, des2 = orb.detectAndCompute(img2, None)\n\n # create BFMatcher object\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # Match descriptors.\n matches = bf.match(des1,des2)\n # Sort them in the order of their distance.\n matches = sorted(matches, key = lambda x:x.distance)\n # Draw first 10 matches.\n if show:\n img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:500], None,flags=2)\n plt.imshow(img3),plt.show()\n # Get the matching keypoints for each of the images\n\n list_kp1 = []\n list_kp2 = []\n for mat in matches:\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n list_kp1.append(kp1[img1_idx].pt)\n list_kp2.append(kp2[img2_idx].pt)\n\n n_kp1, n_kp2 = np.float32(list_kp1), np.float32(list_kp2)\n n_kp1 /= np.asarray([img1.shape[1], img1.shape[0]], np.float32)\n n_kp2 /= np.asarray([img2.shape[1], img2.shape[0]], np.float32)\n n_kp1 = n_kp1 * 2. - 1.\n n_kp2 = n_kp2 * 2. - 1.\n\n return np.int32(list_kp1), np.int32(list_kp2), n_kp1, n_kp2", "def up_to_step_4(imgs):\n # ... your code here ...\n for i in range(len(imgs)-1):\n \n detector = cv2.xfeatures2d.SURF_create(hessianThreshold = 3000,\n nOctaves = 4,\n nOctaveLayers = 3,\n upright = False,\n extended = False)\n gray1= cv2.cvtColor(imgs[i],cv2.COLOR_BGR2GRAY)\n kp1,des1 = detector.detectAndCompute(gray1,None)\n gray2= cv2.cvtColor(imgs[i+1],cv2.COLOR_BGR2GRAY)\n kp2,des2 = detector.detectAndCompute(gray2,None)\n# bf = cv2.BFMatcher()\n matches = knnmatch(des2,des1)\n# good = []\n# for m,n in matches:\n# if m.distance < 0.75*n.distance:\n# good.append(m)\n# \n src_pts = np.float32([ kp2[m.queryIdx].pt for m in matches ])\n dst_pts = np.float32([ kp1[m.trainIdx].pt for m in matches ])\n H = findhomography(src_pts, dst_pts, 3000)\n# H,mask = cv2.findHomography(src_pts,dst_pts,cv2.RANSAC)\n # warp = warpperspective(imgs[0],H)\n warp = cv2.warpPerspective(imgs[i+1], H, (imgs[i+1].shape[1]*2 , imgs[i+1].shape[0]*2))\n rows, cols = np.where(warp[:,:,0] !=0)\n min_row, max_row = min(rows), max(rows) +1\n min_col, max_col = min(cols), max(cols) +1\n result = warp[min_row:max_row,min_col:max_col,:]\n # imgs = warp\n # warp[0:imgs[0].shape[0], 0:imgs[0].shape[1]] = imgs[2]\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch((imgs[i],result))\n imgs[i+1] = result[1]\n imgs[0] = imgs[-2]\n return imgs[0]", "def extractFeatures(image, feature_list):\n # for multiple features or color features\n #feat_vec = np.array([])\n \n # sift has 128D\n feat_vec = np.empty((0,128))\n n_channels = (image.shape[2] if len(image.shape)==3 else 1)\n \n #img_f32 = image.astype(np.float32)\n\n for feature in feature_list:\n if (feature.strip().lower() == 'dsift'):\n print \"computing dsift (dense rootSift) features\"\n dense = cv2.FeatureDetector_create(\"Dense\")\n sift = cv2.SIFT()\n if n_channels == 1:\n kp = dense.detect(image[:,:])\n # compute kp descriptors\n _,des = sift.compute(image[:,:],kp)\n \n # normalize the descriptors (L1)\n des /= (des.sum(axis=1, keepdims=True) + 1e-7)\n des = np.sqrt(des)\n \n feat_vec = np.vstack((feat_vec, des))\n else:\n for channel in xrange(n_channels):\n kp = dense.detect(image[:,:,channel])\n _,des = sift.compute(image[:,:,channel],kp)\n \n # normalize the descriptors (L1)\n des /= (des.sum(axis=1, keepdims=True) + 1e-7)\n des = np.sqrt(des)\n\n feat_vec = np.vstack((feat_vec, des))\n \n# if (feature.strip().lower() == 'color'):\n# print \"computing color features\"\n# # scale from 0-255 between 0 and 1\n# if args.scale == 1:\n# img_f32 /= 255.\n# \n# f_tmp = img_f32.flatten()\n# feat_vec = np.append(feat_vec, f_tmp)\n else:\n raise Exception(\"Method '%s' is not implemented!\"%(feature)) \n \n return feat_vec", "def im_detect_keypoints_aug(model, im, boxes):\n\n # Collect heatmaps predicted under different transformations\n heatmaps_ts = []\n # Tag predictions computed under downscaling and upscaling transformations\n ds_ts = []\n us_ts = []\n\n def add_heatmaps_t(heatmaps_t, ds_t=False, us_t=False):\n heatmaps_ts.append(heatmaps_t)\n ds_ts.append(ds_t)\n us_ts.append(us_t)\n\n # Compute the heatmaps for the original image (identity transform)\n im_scales = im_conv_body_only(model, im)\n heatmaps_i = im_detect_keypoints(model, im_scales, boxes)\n add_heatmaps_t(heatmaps_i)\n\n # Perform keypoints detection on the horizontally flipped image\n if cfg.TEST.KPS_AUG.H_FLIP:\n heatmaps_hf = im_detect_keypoints_hflip(model, im, boxes)\n add_heatmaps_t(heatmaps_hf)\n\n # Compute detections at different scales\n for scale in cfg.TEST.KPS_AUG.SCALES:\n ds_scl = scale < cfg.TEST.SCALES[0]\n us_scl = scale > cfg.TEST.SCALES[0]\n heatmaps_scl = im_detect_keypoints_scale(\n model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes\n )\n add_heatmaps_t(heatmaps_scl, ds_scl, us_scl)\n\n if cfg.TEST.KPS_AUG.SCALE_H_FLIP:\n heatmaps_scl_hf = im_detect_keypoints_scale(\n model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes, hflip=True\n )\n add_heatmaps_t(heatmaps_scl_hf, ds_scl, us_scl)\n\n # Compute keypoints at different aspect ratios\n for aspect_ratio in cfg.TEST.KPS_AUG.ASPECT_RATIOS:\n heatmaps_ar = im_detect_keypoints_aspect_ratio(\n model, im, aspect_ratio, boxes\n )\n add_heatmaps_t(heatmaps_ar)\n\n if cfg.TEST.KPS_AUG.ASPECT_RATIO_H_FLIP:\n heatmaps_ar_hf = im_detect_keypoints_aspect_ratio(\n model, im, aspect_ratio, boxes, hflip=True\n )\n add_heatmaps_t(heatmaps_ar_hf)\n\n # Select the heuristic function for combining the heatmaps\n if cfg.TEST.KPS_AUG.HEUR == 'HM_AVG':\n np_f = np.mean\n elif cfg.TEST.KPS_AUG.HEUR == 'HM_MAX':\n np_f = np.amax\n else:\n raise NotImplementedError(\n 'Heuristic {} not supported'.format(cfg.TEST.KPS_AUG.HEUR)\n )\n\n def heur_f(hms_ts):\n return np_f(hms_ts, axis=0)\n\n # Combine the heatmaps\n if cfg.TEST.KPS_AUG.SCALE_SIZE_DEP:\n heatmaps_c = combine_heatmaps_size_dep(\n heatmaps_ts, ds_ts, us_ts, boxes, heur_f\n )\n else:\n heatmaps_c = heur_f(heatmaps_ts)\n\n return heatmaps_c", "def get_keypoints(self, image_path, image_data):\r\n print 'Using serializer', Pyro4.config.SERIALIZER\r\n\r\n base_path, ext = os.path.splitext(image_path)\r\n image_name = 'image' + ext\r\n img_local_path = os.path.join(self.local_path, image_name)\r\n\r\n with open(img_local_path, 'wb') as f:\r\n f.write(image_data)\r\n\r\n self.update_imagelist(img_local_path)\r\n self.detect_face()\r\n bbox_data = self.get_bbox_data()\r\n\r\n if self.is_face_detected(bbox_data):\r\n self.format_bbox_file(image_name, bbox_data)\r\n self.detect_keypoints()\r\n keypoints = self.kpts_from_binary()\r\n else:\r\n keypoints = []\r\n\r\n return keypoints", "def Feature(imgs, name = 'SIFT', scale = 0.6):\n bf = cv2.BFMatcher()\n if name == 'SIFT':\n process = cv2.xfeatures2d.SIFT_create()\n elif name == 'SURF':\n process = cv2.xfeatures2d.SURF_create()\n else :\n process = cv2.ORB_create(200)\n bf = cv2.BFMatcher(cv2.NORM_HAMMING)\n\n kps = []\n dess = []\n for img in imgs:\n kp,des= process.detectAndCompute(img, None)\n kps.append(kp)\n dess.append(des)\n\n results = []\n des = dess[0]\n for e_kp, e_des in zip(kps[0], dess[0]):\n results.append([(e_kp, e_des)])\n\n for i in range(1,len(imgs)):\n matches = bf.knnMatch(des, dess[i], k=2)\n goods = [m for m, n in matches if m.distance < scale * n.distance]\n new_results = []\n new_des = []\n for good in goods: \n results[good.queryIdx].append((kps[i][good.trainIdx],dess[i][good.trainIdx]))\n new_results.append(results[good.queryIdx])\n new_des.append(des[good.queryIdx])\n results = new_results\n des = np.array(new_des)\n\n return results", "def get_descriptors(extractor, img):\n # kps: key points\n # descriptors: descriptor is feature of each keypoint\n kps, descriptors = extractor.detectAndCompute(img, None)\n return kps, descriptors", "def extract_feat(self, points, img, img_metas):\n img_feats = self.extract_img_feat(img, img_metas)\n pts_feats = self.extract_pts_feat(points, img_feats, img_metas)\n return (img_feats, pts_feats)", "def extract_feat(self, points, img_metas):\n voxel_dict = self.voxelize(points)\n voxel_features = self.voxel_encoder(voxel_dict['voxels'],\n voxel_dict['num_points'],\n voxel_dict['coors'])\n batch_size = voxel_dict['coors'][-1, 0].item() + 1\n feats_dict = self.middle_encoder(voxel_features, voxel_dict['coors'],\n batch_size)\n x = self.backbone(feats_dict['spatial_features'])\n if self.with_neck:\n neck_feats = self.neck(x)\n feats_dict.update({'neck_feats': neck_feats})\n return feats_dict, voxel_dict", "def get_keypoints_and_descriptors(imgL, imgR):\n orb = cv2.ORB_create()\n kp1, des1 = orb.detectAndCompute(imgL, None)\n kp2, des2 = orb.detectAndCompute(imgR, None)\n\n ############## Using FLANN matcher ##############\n # Each keypoint of the first image is matched with a number of\n # keypoints from the second image. k=2 means keep the 2 best matches\n # for each keypoint (best matches = the ones with the smallest\n # distance measurement).\n FLANN_INDEX_LSH = 6\n index_params = dict(\n algorithm=FLANN_INDEX_LSH,\n table_number=6, # 12\n key_size=12, # 20\n multi_probe_level=1,\n ) # 2\n search_params = dict(checks=50) # or pass empty dictionary\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n flann_match_pairs = flann.knnMatch(des1, des2, k=2)\n return kp1, des1, kp2, des2, flann_match_pairs", "def detectAndDescribe(image, method=None):\n assert method is not None, \"You need to define a feature detection method. Values are: 'sift', 'surf'\"\n # detect and extract features from the image\n if method == 'sift':\n descriptor = cv2.xfeatures2d.SIFT_create()\n elif method == 'surf':\n descriptor = cv2.xfeatures2d.SURF_create()\n elif method == 'brisk':\n descriptor = cv2.BRISK_create()\n elif method == 'orb':\n descriptor = cv2.ORB_create()\n \n # get keypoints and descriptors\n (kps, features) = descriptor.detectAndCompute(image, None)\n \n return (kps, features)", "def image_detect_and_compute_video(detector, img_name):\n img_building = cv2.cvtColor(img_name, cv2.COLOR_BGR2RGB)\n sift = cv2.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(img_building, None)\n img_kp = cv2.drawKeypoints(img_building, kp, img_building)\n return img_building, kp, des", "def compute_match_sift(img_A, img_B, window_size=16, stride=8, method=\"greedy\"):\n m1, n1 = img_A.shape[0:2]\n m2, n2 = img_B.shape[0:2]\n\n print(\"Computing points\")\n points_A = []\n for y in range(window_size, m1 - window_size, stride):\n for x in range(window_size, n1 - window_size, stride):\n points_A.append((x, y))\n\n points_B = []\n for y in range(window_size, m2 - window_size, stride):\n for x in range(window_size, n2 - window_size, stride):\n points_B.append((x, y))\n\n print(\"Generating features\")\n features_A = features_keypoints(img_A, points_A, window_size)\n features_B = features_keypoints(img_B, points_B, window_size)\n\n print(\"Matching features\")\n if method == \"greedy\":\n match_A, match_B = match_greedy_sift(points_A, points_B, features_A, features_B)\n elif method == \"dtw\":\n match_A, match_B = match_dtw_sift(points_A, points_B, features_A, features_B)\n\n return match_A, match_B", "def find_keypoints ( gray , quality , ksize , blocksize , max_area = None ) :\n gray32 = np.float32(gray)\n points = cv2.goodFeaturesToTrack(gray32,maxCorners = 100, qualityLevel = quality ,minDistance = ksize , blockSize = blocksize )\n\n if points is None :\n return None , None , None\n\n if len(points) < 4 :\n return None , None , None\n\n oob = cv2.minAreaRect(points) \n\n if oob is None :\n return None, None , None\n\n oob_corners = get_oob_corners ( oob = oob )\n\n if oob_corners is None :\n return None, None , None\n\n if max_area is None :\n return points , oob , oob_corners\n\n area = get_polygon_area ( corners = oob_corners )\n\n if area > max_area :\n return None, None , None\n\n return points , oob , oob_corners", "def im_detect_keypoints(model, im_scales, boxes):\n assert len(im_scales) == 1, \\\n 'Only single-image / single-scale batch implemented'\n\n M = cfg.KRCNN.HEATMAP_SIZE\n if boxes.shape[0] == 0:\n pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)\n return pred_heatmaps\n\n inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scales)}\n\n # Add multi-level rois for FPN\n if cfg.FPN.MULTILEVEL_ROIS:\n _add_multilevel_rois_for_test(inputs, 'keypoint_rois')\n\n for k, v in inputs.items():\n workspace.FeedBlob(core.ScopedName(k), v)\n workspace.RunNet(model.keypoint_net.Proto().name)\n\n pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze()\n\n # In case of 1\n if pred_heatmaps.ndim == 3:\n pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)\n\n return pred_heatmaps", "def get_sift_features(image):\n frames, descriptors = sift(image, compute_descriptor=True, float_descriptors=True, verbose=False)\n return frames, descriptors", "def get_sift_features(image):\n frames, descriptors = sift(image, compute_descriptor=True, float_descriptors=True, verbose=False)\n return frames, descriptors", "def detectAndDescribe(image, method=None):\n \n assert method is not None, \"You need to define a feature detection method. Values are: 'sift', 'surf'\"\n \n # detect and extract features from the image\n if method == 'sift':\n descriptor = cv2.xfeatures2d.SIFT_create()\n elif method == 'surf':\n descriptor = cv2.xfeatures2d.SURF_create()\n elif method == 'brisk':\n descriptor = cv2.BRISK_create()\n elif method == 'orb':\n descriptor = cv2.ORB_create()\n \n # get keypoints and descriptors\n (kps, features) = descriptor.detectAndCompute(image, None)\n \n return (kps, features)", "def charuco_img_points(images, objpoint, board, a_dict):\r\n #Criteria for subpixel refinement\r\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)\r\n\r\n objpoints = [] # 3d point in world space\r\n imgpoints = [] # 2d point in image plane\r\n\r\n for img in images:\r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n corners, ids, rejpoints = cv2.aruco.detectMarkers(gray, a_dict)\r\n if len(corners)>0:\r\n res2 = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,board)\r\n if res2[1] is not None:\r\n cv2.cornerSubPix(gray,res2[1],(3,3),(-1,1),criteria)\r\n imgpoints.append(res2[1].T[:,0,:])\r\n objpoints.append(objpoint[:,res2[2].flatten()])\r\n cv2.aruco.drawDetectedCornersCharuco(img,res2[1],res2[2])\r\n cv2.imshow(\"frame\",img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n return objpoints,imgpoints", "def get_features(img1,mask1, depth1):\n colors = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)\n img3 = img1.copy()\n img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n img1 = clahe.apply(img1) # Applying Clahe\n kp, des = orb.detectAndCompute(img1, mask=mask1) # Computing ORB features\n kp_pts = np.float32([ kp[m].pt for m in range(len(kp))]).reshape(-1,2)\n # Getting Colors\n col = []\n for i in range(len(kp)):\n col.append(colors[kp_pts[i,1].astype(int), kp_pts[i,0].astype(int)])\n col = np.array(col)\n # Getting 2D points\n kp_2d = []\n for m in range(len(kp)):\n kp_2d.append([int(kp[m].pt[0]), int(kp[m].pt[1])])\n kp_2d = np.array(kp_2d).reshape(-1,2)\n \n # Getting the 3D points\n kp_3d, _, _ = convert_3d(kp_2d, depth1, img3)\n \n # Removing points with Zero depth\n my_ind = np.where(kp_3d[:,2]!=0)[0]\n new_kp_3d = kp_3d[my_ind,:]\n new_kp_2d = kp_2d[my_ind,:]\n new_des = des[my_ind,:]\n new_col = col[my_ind,:]\n \n # Removing the duplicates\n uni_3d = np.unique(new_kp_3d, return_index= True, axis=0)[1]\n new_kp_3d1 = new_kp_3d[uni_3d,:]\n new_kp_2d1 = new_kp_2d[uni_3d,:]\n new_des1 = new_des[uni_3d,:]\n new_col1 = new_col[uni_3d,:]\n return kp_3d, kp_2d, des, col", "def _compute_prediction(self, k, img, kp, des):\n\n # TODO: find corresponding points in the input image and the template image\n # put keypoints from template image in template_pts\n # put corresponding keypoints from input image in img_pts\n self.template_pts = []\n self.img_pts = []\n\n distances_matrix = cdist(self.descs[k],des)\n # closest vector in des2 to the 0th descriptor vector in des1\n closest = distances_matrix.argsort()[:,:2]\n # print closest.shape\n for i in range(len(self.descs[k])):\n # print distances_matrix[i, closest[0], self.good_thresh*distances_matrix[i,closest[1]], closest[0]]\n if distances_matrix[i, closest[i,0]] < self.good_thresh*distances_matrix[i,closest[i,1]]:\n self.template_pts.append(self.kps[k][i].pt)\n self.img_pts.append(kp[closest[i,0]].pt)\n self.img_pts = np.array(self.img_pts)\n self.template_pts = np.array(self.template_pts)\n # print len(self.template_pts), type(self.img_pts), type(self.template_pts)\n\n #TODO: change img to img_T once you do the homography transform\n # Transform input image so that it matches the template image as well as possible\n M, mask = cv2.findHomography(self.img_pts, self.template_pts, cv2.RANSAC, self.ransac_thresh)\n img_T = cv2.warpPerspective(img, M, self.im_bw[k].shape[::-1])\n # cv2.imshow('image_window',img_T)\n # cv2.waitKey(0)\n visual_diff = compare_images(img_T, self.im_bw[k])\n return visual_diff", "def detect_points(self):\r\n\r\n\t\r\n\r\n\t\tfeature_mask = np.zeros_like(self.gray) ## Create a mask so we only look for template features in the ROI\r\n\t\t\r\n\t\tfeature_mask[max(0,self.bb[1]):min(360,self.bb[1] + self.bb[3]),max(0,self.bb[0]):min(640,self.bb[0] + self.bb[2])] = 255\r\n\r\n\t\t# search for good points\r\n\t\tfeatures = cv2.goodFeaturesToTrack(self.gray, mask = feature_mask, **feature_params)\r\n\t\t# refine the corner locations\r\n\t\tcv2.cornerSubPix(self.gray,features, **subpix_params)\r\n\r\n\t\tself.features = features\r\n\r\n\t\tself.tracks = [[p] for p in features.reshape((-1,2))]\r\n\r\n\t\tself.prev_gray = self.gray", "def extract_pts_feat(self, pts, img_feats, img_metas):\n if not self.with_pts_bbox:\n return None\n voxels, num_points, coors = self.voxelize(pts)\n voxel_features = self.pts_voxel_encoder(voxels, num_points, coors,\n img_feats, img_metas)\n batch_size = coors[-1, 0] + 1\n x = self.pts_middle_encoder(voxel_features, coors, batch_size)\n x = self.pts_backbone(x)\n if self.with_pts_neck:\n x = self.pts_neck(x)\n return x", "def detect_feats(img_fpath, use_adaptive_scale=False, nogravity_hack=False, **kwargs):\n if __DEBUG__:\n print('[hes] Detecting Keypoints')\n print('[hes] use_adaptive_scale=%r' % (use_adaptive_scale,))\n print('[hes] nogravity_hack=%r' % (nogravity_hack,))\n print('[hes] kwargs=%s' % (ut.dict_str(kwargs),))\n # Load image\n hesaff_ptr = _new_fpath_hesaff(img_fpath, **kwargs)\n if __DEBUG__:\n print('[hes] detect')\n # Get num detected\n nKpts = HESAFF_CLIB.detect(hesaff_ptr)\n if __DEBUG__:\n print('[hes] allocate')\n # Allocate arrays\n kpts = alloc_kpts(nKpts)\n vecs = alloc_vecs(nKpts)\n if __DEBUG__:\n print('[hes] export')\n # Populate arrays\n HESAFF_CLIB.exportArrays(hesaff_ptr, nKpts, kpts, vecs)\n HESAFF_CLIB.free_hesaff(hesaff_ptr)\n if __DEBUG__:\n import vtool as vt\n assert vt.check_sift_validity(vecs)\n if use_adaptive_scale: # Adapt scale if requested\n if __DEBUG__:\n print('[hes] adapt_scale')\n kpts, vecs = adapt_scale(img_fpath, kpts)\n if nogravity_hack:\n if __DEBUG__:\n print('[hes] adapt_rotation')\n kpts, vecs = vtool_adapt_rotation(img_fpath, kpts)\n return kpts, vecs", "def get_features_fast(self, frame_gray):\n keypoints = self.fast.detect(frame_gray, None)\n\n return np.float32(\n [kp.pt for kp in keypoints]\n ).reshape(-1, 1, 2)", "def checker_img_points(images, objpoint, checkerboard):\r\n #Criteria for subpixel refinement\r\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)\r\n\r\n objpoints = [] # 3d point in world space\r\n imgpoints = [] # 2d point in image plane\r\n\r\n dellist = []\r\n\r\n for i,img in enumerate(images):\r\n print(\"image number:\", i)\r\n\r\n #when running raw_plane_chess, don't run below code\r\n #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n gray = img\r\n\r\n # Find the chess board corners\r\n ret, corners = cv2.findChessboardCorners(gray, checkerboard, None)\r\n # If found, add object points, image points (after refining them)\r\n if ret == True:\r\n objpoints.append(objpoint.T)\r\n cv2.cornerSubPix(gray,corners,(3,3),(-1,-1),criteria)\r\n cv2.drawChessboardCorners(img, checkerboard, corners, ret)\r\n imgpoints.append(corners.T[:,0,:])\r\n else:\r\n print(\"Found no checkerboard in this image {}\".format(i))\r\n cv2.imshow(\"image\",img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n dellist.append(i)\r\n\r\n for index in sorted(dellist, reverse=True):\r\n del images[index]\r\n\r\n print(\"Found {} checkerboards of size {}\".format(len(objpoints),checkerboard))\r\n return objpoints,imgpoints", "def draw_key_pts(image, keypoints):\n \n # Draw blobs on our image as green circles \n blank = np.zeros((1, 1)) \n image = cv2.drawKeypoints(image, keypoints, blank, (0, 255, 0), \n cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n for curKey in keypoints:\n x=np.int(curKey.pt[0])\n y=np.int(curKey.pt[1])\n #size = np.int(curKey.size)\n image_final = cv2.circle(image,(x,y),2,(255, 0, 0), 3)\n \n return image_final", "def findMatchesBetweenImages(image_1, image_2):\n # matches - type: list of cv2.DMath\n matches = None\n # image_1_kp - type: list of cv2.KeyPoint items.\n image_1_kp = None\n # image_1_desc - type: numpy.ndarray of numpy.uint8 values.\n image_1_desc = None\n # image_2_kp - type: list of cv2.KeyPoint items.\n image_2_kp = None\n # image_2_desc - type: numpy.ndarray of numpy.uint8 values.\n image_2_desc = None\n # WRITE YOUR CODE HERE.\n\n sift = cv2.ORB_create()\n image_1_kp, image_1_desc = sift.detectAndCompute(image_1, None)\n image_2_kp, image_2_desc = sift.detectAndCompute(image_2, None)\n\n # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # matches = bf.match(image_1_desc, image_2_desc)\n # matches = sorted(matches, key = lambda x:x.distance)\n # matches = matches[:10]\n\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(image_1_desc, image_2_desc, k=2)\n\n # Apply ratio test\n good = []\n for m, n in matches:\n print(m.distance, n.distance, m.distance < .75*n.distance)\n if m.distance < (0.75 * n.distance):\n good.append([m])\n\n # We coded the return statement for you. You are free to modify it -- just\n # make sure the tests pass.\n print(len(good), good)\n return image_1_kp, image_2_kp, matches\n # END OF FUNCTION.", "def _compute_prediction(self, k, img, kp, des):\n\n # find corresponding points in the input image and the template image\n #put keypoints from template image in template_pts\n #put corresponding keypoints from input image in img_pts\n good = []\n self.matcher = cv2.BFMatcher() #cv2's \"brute force\" matcher\n matches = self.matcher.knnMatch(self.descs[k],des,k=2)\n for m,n in matches:\n if m.distance < self.good_thresh*n.distance: #if first best keypoint is closer to the template than .7 * second best, it's good\n good.append(m)\n if len(good) > self.min_match_count:\n img_pts = np.float32([ kp[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n template_pts = np.float32([ self.kps[k][m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n else:\n return None\n\n # Transform input image so that it matches the template image as well as possible\n M, mask = cv2.findHomography(img_pts, template_pts, cv2.RANSAC, self.ransac_thresh)\n img_T = cv2.warpPerspective(img, M, self.signs[k].shape[::-1])\n visual_diff = compare_images(img_T, self.signs[k])\n return visual_diff", "def img_and_key_point_augmentation(augmentation, img, bbox, key_points):\n\n # img_copy = img.copy()\n image_shape = img.shape\n h, w = image_shape[0:2]\n\n # Convert the stochastic sequence of augmenters to a deterministic one.\n # The deterministic sequence will always apply the exactly same effects to the images.\n det = augmentation.to_deterministic()\n\n ia_bbox = list()\n for bounding_box in bbox:\n x1, y1, x2, y2 = bounding_box\n ia_bbox.append(ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2))\n\n bbs = ia.BoundingBoxesOnImage(ia_bbox, shape=image_shape)\n bbs_aug = det.augment_bounding_boxes([bbs])[0]\n # img = bbs_aug.draw_on_image(img)\n\n after_bbox = list()\n for bounding_box in bbs_aug.bounding_boxes:\n bbox_list = [bounding_box.x1_int, bounding_box.y1_int, bounding_box.x2_int, bounding_box.y2_int]\n\n if bbox_list[0] >= w: bbox_list[0] = w - 1\n if bbox_list[1] >= h: bbox_list[1] = h - 1\n if bbox_list[2] >= w: bbox_list[2] = w - 1\n if bbox_list[3] >= h: bbox_list[3] = h - 1\n\n if bbox_list[0] == bbox_list[2] or bbox_list[1] == bbox_list[3]:\n return img_and_key_point_augmentation(augmentation, img, bbox, key_points)\n\n bbox_list = list(map(lambda x: max(x, 0), bbox_list))\n after_bbox.append(bbox_list)\n\n after_key_points = list()\n for key_point_list in key_points:\n after_key_point_list = list()\n for key_point in key_point_list:\n xy_points = list()\n for i, x in enumerate(key_point[::2]):\n y = key_point[(i * 2) + 1]\n xy_points.append(ia.Keypoint(x=x, y=y))\n\n keypoints_on_image = det.augment_keypoints([ia.KeypointsOnImage(xy_points, shape=image_shape)])\n # img = keypoints_on_image[0].draw_on_image(img)\n\n xy_points = list()\n for key_point in keypoints_on_image[0].keypoints:\n kp = [key_point.x_int, key_point.y_int]\n if 0 > min(kp) or w <= max(kp[::2]) or h <= max(kp[1::2]):\n # print(kp)\n return img_and_key_point_augmentation(augmentation, img, bbox, key_points)\n xy_points.extend(kp)\n\n after_key_point_list.append(xy_points)\n\n after_key_points.append(after_key_point_list)\n\n img_aug = det.augment_image(img)\n assert img_aug.shape == image_shape, \"Augmentation shouldn't change image size\"\n\n return img_aug, after_bbox, after_key_points", "def _collect_points(self, image, point_value=0):\n return zip(*np.where(image == point_value))", "def get_surf_kps(img_fn, img=None, center_out=0,\n cness_thresh=1000, min_pts=10, max_pts=300):\n assert center_out < 1, \"Too high center part to remove\"\n # initialize the SURF keypoint detector and descriptor\n surf = cv2.SURF(cness_thresh)\n # load the gray-scale image\n if img is None:\n img = cv2.imread(img_fn, 0)\n # detect and describe SURF keypoints\n cvkp, ds = surf.detectAndCompute(img, None, None)\n # re-arrange the data properly\n ds.shape = (-1, surf.descriptorSize()) # reshape to (n_pts, desc_size)\n kp = np.array([p.pt for p in cvkp])\n cness = np.array([p.response for p in cvkp])\n # filter out points in the middle (likely to be on the moving actor)\n if center_out > 0:\n rx = img.shape[1]\n lb = center_out * 0.5 * rx\n ub = (1 - center_out * 0.5) * rx\n mask = (kp[:, 0] < lb) + (kp[:, 0] > ub)\n kp = kp[mask, :]\n ds = ds[mask, :]\n cness = cness[mask]\n # check we're within the limits\n if kp.shape[0] < min_pts:\n if cness_thresh > 100:\n # redo the whole thing with a lower threshold\n _, kp, ds = get_surf_kps(img_fn, img=img, center_out=center_out,\n min_pts=min_pts, max_pts=max_pts,\n cness_thresh=0.5 * cness_thresh)\n else:\n # we lowered the threshold too much and didn't find enough points\n raise ValueError('Degenerate image (e.g. black) or too high center_out')\n if kp.shape[0] > max_pts:\n # too many points, take those with max cornerness only\n cness_order = np.argsort(cness)[::-1]\n kp = kp[cness_order[:max_pts], :]\n ds = ds[cness_order[:max_pts], :]\n return img, kp, ds", "def feature_extraction(img, feature):\n\n if feature == 'HoG':\n # HoG parameters\n\n # In the case of the Hog Feature, we already given the base parameters for using hog feature function.\n # TA - You can just use that parameter with each subdivide image (which has image grid size * image grid size)\n # Thank you for the reply. Does it mean to divide the image into 20x20 size sub-images and perform the feature extraction on each image??\n # TA - Yes. In the SIFT, image grid size is different.\n\n win_size = (32, 32)\n block_size = (32, 32)\n block_stride = (16, 16)\n cell_size = (16, 16)\n\n nbins = 9\n deriv_aperture = 1\n win_sigma = 4\n histogram_norm_type = 0\n l2_hys_threshold = 2.0000000000000001e-01\n gamma_correction = 0\n nlevels = 64\n\n # Your code here. You should also change the return value.\n\n # sample visualizing\n # cv2.imshow('img', img)\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n hog = cv2.HOGDescriptor(win_size,\n block_size,\n block_stride,\n cell_size,\n nbins,\n deriv_aperture,\n win_sigma,\n histogram_norm_type,\n l2_hys_threshold,\n gamma_correction,\n nlevels)\n\n # additional parameters\n\n #hist = hog.compute(gray,winStride,padding,locations)\n\n #TODO: Check if this is valid???\n\n hist = hog.compute(gray)\n hist_resized = np.resize(hist, (int(len(hist)/36), 36))\n hist_resized\n return hist_resized\n\n elif feature == 'SIFT':\n\n # Your code here. You should also change the return value.\n\n #input image size 240 * 200 ==> divide H, W by 20 ==> 12 * 10 = 120\n #in case of this input image, the number of feature is 120.\n #So the number of feature is changed according to input image size.\n\n #IF PROBLEMS WITH DEPENDENCIES: pip3 install opencv-contrib-python==3.4.2.16\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n sift = cv2.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(gray, None)\n\n return des", "def describe(image):\n needle = cv2.imread(image, 0)\n orb = cv2.ORB()\n keypoints, description = orb.detectAndCompute(needle, None)\n print(keypoints)\n print(description)\n return keypoints, description", "def get_spoof_features(img):\n # Converting default BGR of OpenCV to RGB\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # Changing dimension from 3*m*n to m*n*3\n h_im = img[:, :, 0]\n s_im = img[:, :, 1]\n v_im = img[:, :, 2]\n\n img = np.array([h_im, s_im, v_im])\n\n return compute_msu_iqa_features(img)", "def M12Nut(image):\n kernel = np.ones((5, 5), np.uint8)\n image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=4)\n\n parameters = cv2.SimpleBlobDetector_Params()\n detector = cv2.SimpleBlobDetector_create(parameters=parameters)\n keypoints = detector.detect(image)\n new_image = cv2.drawKeypoints(image, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n for i in range(len(keypoints)):\n print(\"Keypoint: \", keypoints[i].pt)\n cv2.imshow(\"Keypoints\", new_image)\n cv2.waitKey(1000)\n cv2.destroyAllWindows()\n x, y = keypoints[0].pt\n\n return x, y", "def im_detect_keypoints_hflip(model, im, boxes):\n # Compute keypoints for the flipped image\n im_hf = im[:, ::-1, :]\n boxes_hf = box_utils.flip_boxes(boxes, im.shape[1])\n\n im_scales = im_conv_body_only(model, im_hf)\n heatmaps_hf = im_detect_keypoints(model, im_scales, boxes_hf)\n\n # Invert the predicted keypoints\n heatmaps_inv = keypoint_utils.flip_heatmaps(heatmaps_hf)\n\n return heatmaps_inv", "def find_initial_position(img1, img2):\n # find points of interest in points\n img1_kp, img1_des = compute_orb(img1)\n img2_kp, img2_des = compute_orb(img2)\n\n # get closest 2 matches per point\n bf = cv2.BFMatcher(normType=cv2.NORM_HAMMING)\n matches = bf.knnMatch(img1_des, img2_des, k=2)\n\n good_matches = []\n pts1 = []\n pts2 = []\n # Lowe's ratio test\n for m, n in matches:\n if m.distance < 0.75*n.distance:\n good_matches.append(m)\n pts1.append(img1_kp[m.queryIdx].pt)\n pts2.append(img2_kp[m.trainIdx].pt)\n\n pts1 = np.float32(pts1)\n pts2 = np.float32(pts2)\n\n # essential matrix gives the motion of the points\n # to get motion of the camera, flip the inputs between pts1 and pts2\n essential_matrix, e_mask = cv2.findEssentialMat(pts2, pts1, intrinsic_camera_matrix)\n\n # select only inlier points as per the RANSAC method\n pts1 = pts1[e_mask.ravel() == 1]\n pts2 = pts2[e_mask.ravel() == 1]\n\n _, rotation, translation, mask, triangulated_points = cv2.recoverPose(essential_matrix, pts2, pts1, intrinsic_camera_matrix, distanceThresh=50)\n triangulated_points = np.asarray([np.divide(triangulated_points[0], triangulated_points[3]),\n np.divide(triangulated_points[1], triangulated_points[3]),\n np.divide(triangulated_points[2], triangulated_points[3])]).transpose()\n\n CAMERA_POSES.clear()\n CAMERA_POSES.append(np.hstack((np.identity(3), np.array([[0], [0], [0]]))))\n CAMERA_POSES.append(np.hstack((rotation, translation)))\n return rotation, translation, triangulated_points", "def sift_alignment(image_1: str, image_2: str):\n\tim1 = cv2.imread(image_1, cv2.IMREAD_COLOR)\n\tim2 = cv2.imread(image_2, cv2.IMREAD_COLOR)\n\t\n\tsift = cv2.xfeatures2d.SIFT_create()\n\tkey_points_1, descriptors_1 = sift.detectAndCompute(im1, None)\n\tkey_points_2, descriptors_2 = sift.detectAndCompute(im2, None)\n\t\n\tbf_matcher = cv2.BFMatcher() # brute force matcher\n\t# matches = bf_matcher.match(descriptors_1, descriptors_2) # result is not good\n\tmatches = bf_matcher.knnMatch(descriptors_1, descriptors_2, k=2)\n\t\n\t# Apply ratio test\n\tgood_matches = []\n\tfor m, n in matches:\n\t\tif m.distance < 0.6 * n.distance: # this parameter affects the result filtering\n\t\t\tgood_matches.append([m])\n\t\n\tmatch_img = cv2.drawMatchesKnn(im1, key_points_1, im2, key_points_2,\n\t good_matches, None, flags=2)\n\treturn len(matches), len(good_matches), match_img", "def show_keypoints(image, key_pts):\n plt.imshow(image)\n plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m')", "def extract_feat(self, img):\n x = self.backbone(img)\n y = self.backbone_gan(img)\n if self.with_feature_selection:\n x, y = self.feature_selection(x, y)\n if self.with_neck:\n x = self.neck(x)\n return x, y", "def predict(X_img, knn_clf=None, model_path=None, distance_threshold=0.6):\n # if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:\n # raise Exception(\"Invalid image path: {}\".format(X_img_path))\n #\n # if knn_clf is None and model_path is None:\n # raise Exception(\"Must supply knn classifier either thourgh knn_clf or model_path\")\n #\n # # Load a trained KNN model (if one was passed in)\n print(model_path)\n if knn_clf is None:\n with open(model_path, 'rb') as f:\n print(f)\n print(\"before open\")\n knn_clf = pickle.load(f)\n print(\"is_open?\")\n\n # Grab a single frame of video\n # ret, frame = X_img_path.read()\n\n # Load image file and find face locations\n # X_img = frame[:, :, ::-1] #np.array(frame)\n print(\"X_img why not working\")\n # print(X_img)\n startTime = time.time()\n\n X_img = face_recognition.load_image_file('find.jpg')\n print(\"face_recognition : load img\")\n print(time.time() - startTime)\n\n startTime = time.time()\n\n X_face_locations = face_recognition.face_locations(X_img)\n print(X_face_locations)\n print(time.time() - startTime)\n startTime = time.time()\n #print(type((X_face_locations[0])[2]))\n #X_face_locations = fd.get_face()\n #X_face_locations = [(int(X_face_locations[0]), int(X_face_locations[3]), int(X_face_locations[2]), int(X_face_locations[1]))]\n print(X_face_locations)\n # face_bounding_boxes1.append(X_face_locations[0])\n # face_bounding_boxes1.append(X_face_locations[1])\n # face_bounding_boxes1.append(X_face_locations[2])\n # face_bounding_boxes1.append(X_face_locations[3])\n print(\"face location\")\n print(X_face_locations)\n print(time.time() - startTime)\n print(len(X_face_locations))\n\n # cv2.imshow(\"asdf\", X_face_locations)\n # If no faces are found in the image, return an empty result.\n if len(X_face_locations) == 0:\n return []\n\n # Find encodings for faces in the test iamge\n # print(rgb_small_frame)\n print(\"X_face_locations\")\n print(X_face_locations)\n\n # cap = cv2.VideoCapture(0)\n # ret1, frame1 = cap.read()\n\n # while True:\n #\n # if ret:\n # cv2.imshow(\"video\", X_img)\n #\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break;\n # else:\n # break;\n #print(X_face_locations)\n startTime = time.time()\n faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)\n print(\"encoding\")\n print(time.time() - startTime)\n #print(faces_encodings)\n startTime = time.time()\n # Use the KNN model to find the best matches for the test face\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\n print(\"kneighbors\")\n print(time.time() - startTime)\n # closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\n are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]\n\n # Predict classes and remove classifications that aren't within the threshold\n return [(pred, loc) if rec else (\"unknown\", loc) for pred, loc, rec in\n zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]", "def match_greedy_sift(keypoints_A, keypoints_B, features_A, features_B, test_ratio=0.75):\n matcher = cv2.DescriptorMatcher_create(\"BruteForce\")\n matches = matcher.knnMatch(features_A, features_B, 2)\n\n refined_matches = []\n for m in matches:\n if len(m) == 2 and m[0].distance < m[1].distance * test_ratio:\n refined_matches.append((m[0].queryIdx, m[0].trainIdx))\n\n match_A, match_B = [], []\n for idA, idB in refined_matches:\n match_A.append(keypoints_A[idA])\n match_B.append(keypoints_B[idB])\n\n return np.int32(match_A), np.int32(match_B)", "def find(image):\n keypoint, description = describe(image)\n # load keypoints, descriptions from mongodb\n\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n best_match_size = float(\"inf\")\n best_match_index = -1\n match_index = 0\n best_matches = 0\n\n for desc in descriptions:\n matches = bf.match(desc,description)\n matches = sorted(matches, key = lambda x:x.distance)\n if len(matches) > 0:\n match_size = sum(x.distance for x in matches[:10])\n\n print \"match size is \", match_size\n if match_size < best_match_size:\n best_match_size = match_size\n best_match_index = match_index\n best_matches = matches\n\n match_index += 1\n\n needle_color = cv2.imread('needle-stripped.png')[:,:,::-1] # needle\n best_match_image = cv2.imread(\"haystack/\"+files[best_match_index])\n print \"best match is \", files[best_match_index]\n\n # Draw first 10 matches.\n outImg = cv2.imread(\"output/outImg.png\")\n match = cv2.drawMatches(needle_color,keypoint,best_match_image[:,:,::-1],keypoints[best_match_index],best_matches[-20:],outImg, flags=6)\n\n plt.imshow(match),plt.show()\n return", "def extract_feat(self, points, img_metas=None):\n voxels, num_points, coors = self.voxelize(points)\n voxel_features = self.voxel_encoder(voxels, num_points, coors)\n batch_size = coors[-1, 0].item() + 1\n x = self.middle_encoder(voxel_features, coors, batch_size)\n x = self.backbone(x)\n if self.with_neck:\n x = self.neck(x)\n return x", "def x_means(image_path):\n\n # List where all the descriptors are stored\n des_list = []\n\n im = cv2.imread(image_path)\n kpts = fea_det.detect(im)\n kpts, des = des_ext.compute(im, kpts)\n\n # rootsift - not boosting performance\n # rs = RootSIFT()\n # des = rs.compute(kpts, des)\n\n des_list.append((image_path, des))\n\n # Stack all the descriptors vertically in a numpy array\n descriptors = des_list[0][1]\n\n # gather features\n test_features = np.zeros((1, numWords), \"float32\")\n words, _ = vq(descriptors, voc)\n for w in words:\n test_features[0][w] += 1\n\n # Perform Tf-Idf vectorization and L2 normalization\n test_features = test_features * idf\n test_features = preprocessing.normalize(test_features, norm='l2')\n\n score = np.dot(test_features, im_features.T)\n # print \"score: \", score\n rank_ID = np.argsort(-score)\n # print \"rank matrix: \", rank_ID[0]\n\n for ID in rank_ID[0]:\n if score[0][ID] <= float(lowcut):\n if ID not in low_union:\n low_union.append(ID)\n\n x_list = []\n for x in score[0]:\n x_list.append([x])\n X = np.array(x_list)\n for n in X:\n n[0] *= 1000000 # increase difference\n # print \"Scores for X-Means: \", X\n\n # compute k range\n # ks = range(1, 21)\n bot_k = 30 * len(image_paths) / (200 + len(image_paths)) - 5\n ks = range(bot_k, bot_k + 10)\n\n # run ks times kmeans and save each result in the KMeans object\n KMeans = [cluster.KMeans(n_clusters=i, init=\"k-means++\").fit(X) for i in ks]\n\n # now run for each cluster the BIC computation\n bic_max = -sys.maxint - 1\n max_idx = 0\n # BIC = [compute_bic(kmeansi,X) for kmeansi in KMeans]\n for i in range(len(KMeans)):\n curr = compute_bic(KMeans[i], X)\n # print \"BIC = \", curr, \" when using %d clusters\\r\" % (i + bot_k)\n if curr > bic_max:\n bic_max = curr\n max_idx = i\n\n best_k = max_idx + bot_k\n # print \"Best K = \", best_k, \"with BIC = %d\\r\" % bic_max\n best_k_labels = KMeans[max_idx].labels_\n # print \"Best K labels\", KMeans[max_idx].labels_\n\n freq = {}\n for cluster_no in best_k_labels:\n if cluster_no in freq:\n freq[cluster_no] = freq[cluster_no] + 1\n else:\n freq[cluster_no] = 1\n max_cluster = 0\n max_size = -1\n for k, v in freq.items():\n if v > max_size:\n max_cluster = k\n max_size = v\n res = []\n for idx in range(len(best_k_labels)):\n if best_k_labels[idx] == max_cluster:\n res.append(idx)\n assert max_size == len(res)\n print \"\\nSize of the largest cluster = \", max_size\n\n # # Archive big clusters for inspection\n # path = os.path.split(pool_path)[0] + \"\\\\\" + \"cluster_\" + os.path.split(image_path)[1]\n # if os.path.exists(path):\n # shutil.rmtree(path)\n # os.mkdir(path)\n # print \"Adding cluster images for pool image \", image_path, \" to path \", path\n # for ID in res:\n # # print \"Adding \", image_paths[ID]\n # shutil.copy(image_paths[ID], path)\n\n return res # biggest_cluster list", "def extract_features(self, write_to_file=False):\r\n logging.info(f\"Extracting features from {self.name}...\")\r\n sift = cv.SIFT_create()\r\n self.keypoints, self.descriptors = sift.detectAndCompute(self.image, None)\r\n logging.info(f\"Feature extraction complete.\")\r\n if write_to_file:\r\n self.write_features()\r\n return None", "def _compute_prediction(self, k, img, kp, des):\n\n # http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(self.descs[k], des, k=2)\n\n good = []\n for m, n in matches:\n if m.distance < 0.75*n.distance:\n good.append(m)\n\n # http://stackoverflow.com/questions/35884409/how-to-extract-x-y-coordinates-from-opencv-cv2-keypoint-object\n img_pts = np.float32([kp[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n curr_kp = self.kps[k]\n template_pts = np.float32([curr_kp[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n\n # Transform input image so that it matches the template image as well as possible\n M, mask = cv2.findHomography(img_pts, template_pts, cv2.RANSAC, self.ransac_thresh)\n img_T = cv2.warpPerspective(img, M, self.signs[k].shape[::-1])\n\n visual_diff = compare_images(img_T, self.signs[k])\n return visual_diff", "def _series_merging_map(self, map_list, feature_option=\"sift\"):\n print(\" --- Start ---\")\n # Transform state into 3 specified values\n for i in range(len(map_list)):\n map_list[i] = cv2.cvtColor(map_list[i], cv2.COLOR_RGB2GRAY)\n map_list[i] = MF._transform_state(map_list[i])\n \n\n map_ref = map_list[0]\n for i in range(len(map_list)-1):\n map_align = map_list[i+1]\n\n \n if feature_option == \"orb\":\n orb = cv2.ORB_create()\n key_points_1, descriptor_1 = orb.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = orb.detectAndCompute(map_align, None)\n \n elif feature_option == \"surf\":\n surf = cv2.xfeatures2d.SURF_create(400)\n key_points_1, descriptor_1 = surf.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = surf.detectAndCompute(map_align, None)\n else:\n siftDetector = cv2.xfeatures2d.SIFT_create()\n key_points_1, descriptor_1 = siftDetector.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = siftDetector.detectAndCompute(map_align, None)\n\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(descriptor_1, descriptor_2, k=2)\n\n good = []\n for m, n in matches:\n if m.distance < 0.75*n.distance:\n good.append(m)\n \n pts_1, pts_2 = [], []\n for i in good:\n query_idx = i.queryIdx\n train_idx = i.trainIdx\n\n pts_1.append([\n key_points_1[query_idx].pt[0],\n key_points_1[query_idx].pt[1],\n ])\n pts_2.append([\n key_points_2[train_idx].pt[0],\n key_points_2[train_idx].pt[1],\n ])\n \n pts1 = np.array(pts_1)\n pts2 = np.array(pts_2)\n\n # relation, value, _ = RMM._ransac_find_rotation_translation(pts_set_1=pts2, pts_set_2=pts1, sigma=0.5, max_iter=5000)\n # print(\"- Inlier Percent: %f\"%value)\n # # Because the coordinates between the maps and the SIFT features are different:\n # # SIFT Features: Right: +x, Down: +y\n # # Maps: Down: +x, Right: +y\n # # Hence the dx and dy should be changed.\n # dx = relation[1]\n # dy = relation[0]\n # dyaw = relation[2]\n # print(\"- (x, y, t): (%f, %f, %f)\"%(dx,dy,dyaw))\n\n # # index, agr, dis = RMM._similarity_index(x=[dy, dx, dyaw], map1=map_ref, map2=map_align)\n # # print(\"Similarity Index: %f\\nAgree Number: %f\\nDisargee Number: %f\"%(index, agr, dis))\n # index, agr, dis, _ = RMM._similarity_index_2(x=[dx, dy, dyaw], map1=map_ref, map2=map_align)\n # print(\"- Similarity Index: %f\\n- Agree Number: %f\\n- Disargee Number: %f\"%(index, agr, dis))\n \n # map_merged = MF._merging_map(dx=dx, dy=dy, dtheta=dyaw, map1=map_ref, map2=map_align)\n # map_ref = map_merged.astype(np.uint8)\n # map_ref = MF._modify_map_size(merged_map=map_ref)\n\n relation, value, _ = RANSAC_Map_Merging()._ransac_find_all(pts_set_1=pts2, pts_set_2=pts1, sigma=5, max_iter=2000)\n dx = relation[1]\n dy = relation[0]\n dyaw = relation[2]\n dr = relation[3]\n print(\"- Inlier Percent: %f\"%value)\n print(\"- (dx, dy, dyaw, dr) = %f, %f, %f, %f\"%(dx,dy,dyaw, dr))\n map_merged = MAP_Function()._merging_map_ratio(dx=dx, dy=dy, dtheta=dyaw, dr=dr, map1=map_ref, map2=map_align)\n map_ref = map_merged.astype(np.uint8)\n map_ref = MF._modify_map_size(merged_map=map_ref)\n\n # return map_ref, (dx, dy, dyaw)\n return map_ref, (dx, dy, dyaw, dr)", "def find_features(pyr):\n feature_pnts = spread_out_corners(pyr[0], SPREAD_N, SPREAD_M ,SPREAD_CORNERS_RADIUS)\n descriptors = sample_descriptor(pyr[2], feature_pnts, SAMPLE_RAD)\n return feature_pnts, descriptors", "def _forward_keypoint(\n\t\tself, features: Dict[str, torch.Tensor], instances: List[Instances]\n\t) -> Union[Dict[str, torch.Tensor], List[Instances]]:\n\t\tif not self.keypoint_on:\n\t\t\treturn {} if self.training else instances\n\n\t\tfeatures = [features[f] for f in self.in_features]\n\n\t\tif self.training:\n\t\t\t# The loss is defined on positive proposals with at >=1 visible keypoints.\n\t\t\tproposals, _ = select_foreground_proposals(instances, self.num_classes)\n\t\t\tproposals = select_proposals_with_visible_keypoints(proposals)\n\t\t\tprint(\"proposals going to keypoint\")\n\t\t\tprint(proposals)\n\t\t\tproposal_boxes = [x.proposal_boxes for x in proposals]\n\n\t\t\tkeypoint_features = self.keypoint_pooler(features, proposal_boxes)\n\t\t\treturn self.keypoint_head(keypoint_features, proposals)\n\t\telse:\n\t\t\tpred_boxes = [x.pred_boxes for x in instances]\n\t\t\tkeypoint_features = self.keypoint_pooler(features, pred_boxes)\n\t\t\treturn self.keypoint_head(keypoint_features, instances)", "def find(self, image, k=None, ratio=None):\n if not self._targets:\n return []\n k = 2 if k is None else k\n ratio = 0.75 if ratio is None else ratio\n keypoints, descriptors = self._detector.detectAndCompute(image, None)\n if len(keypoints) < self.min_match_count:\n return []\n matches = self._matcher.knnMatch(descriptors, k=int(k))\n matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * ratio]\n if len(matches) < self.min_match_count:\n return []\n matches_by_id = [[] for _ in xrange(len(self._targets))]\n for m in matches:\n matches_by_id[m.imgIdx].append(m)\n tracked = []\n for imgIdx, matches in enumerate(matches_by_id):\n if len(matches) < self.min_match_count:\n continue\n target = self._targets[imgIdx]\n p0 = [target.keypoints[m.trainIdx].pt for m in matches]\n p1 = [keypoints[m.queryIdx].pt for m in matches]\n p0, p1 = np.float32((p0, p1))\n H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)\n status = status.ravel() != 0\n if status.sum() < self.min_match_count:\n continue\n p0, p1 = np.int32((p0, p1))\n inliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if s]\n outliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if not s]\n quad = cv2.perspectiveTransform(target.quad.reshape(1, -1, 2), H).reshape(-1, 2)\n track = TrackedTarget(target=target, image=image, inliers=inliers, outliers=outliers, H=H, quad=quad)\n tracked.append(track)\n tracked.sort(key = lambda t: len(t.inliers), reverse=True)\n return tracked", "def drawMatches(img1, kp1, img2, kp2, matches):\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')\n\n # Place the first image to the left\n out[:rows1,:cols1] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n out[:rows2,cols1:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in matches:\n\n # Get the matching keypoints for each of the images\n img1_idx = mat.queryIdx\n img2_idx = des_dict[mat.trainIdx][1]\n\n # x - columns\n # y - rows\n (x1,y1) = kp1[img1_idx].pt\n (x2,y2) = kp2[img2_idx].pt\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour green\n # thickness = 1\n cv2.circle(out, (int(x1),int(y1)), 4, (0, 255, 0)) \n cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (0, 255, 0))\n\n # Draw a line in between the two points\n # thickness = 1\n # colour green\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (0, 255, 0), 1)\n\n\n # Show the image\n #cv2.imshow('Matched Features', out)\n #cv2.waitKey(0)\n #cv2.destroyWindow('Matched Features')\n\n # Also return the image if you'd like a copy\n return out", "def _forward_keypoint(self, features: Dict[str, torch.Tensor], instances: List[Instances]):\n if not self.keypoint_on:\n return {} if self.training else instances\n\n if self.training:\n # head is only trained on positive proposals with >=1 visible keypoints.\n instances, _ = select_foreground_proposals(instances, self.num_classes)\n instances = select_proposals_with_visible_keypoints(instances)\n\n if self.keypoint_pooler is not None:\n features = [features[f] for f in self.keypoint_in_features]\n boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances]\n features = self.keypoint_pooler(features, boxes)\n else:\n features = {f: features[f] for f in self.keypoint_in_features}\n return self.keypoint_head(features, instances)", "def compute(self, image, keypoints, imgDescriptor=None): # real signature unknown; restored from __doc__\n pass", "def create_masks(img_path, frame_num):\n #import the images\n key_frame = cv2.imread(img_path + \"_\" + str(frame_num) + \".png\")\n beam_mask = filter_beam(key_frame)\n key_frame = cv2.cvtColor(cv2.bitwise_and(beam_mask,key_frame), cv2.COLOR_BGR2GRAY)\n cv2.imwrite(img_path + \"_\" + str(frame_num) + \"_beamed.png\",key_frame)\n key_frame = change_contrast(key_frame, 4.0)\n\n #key_mask = cv2.imread(img_path + \"_mask_\" + str(frame_num) + \".png\",0)\n #masked_key = cv2.bitwise_and(key_frame,key_mask)\n new_frame = cv2.imread(img_path + \"_\" + str(frame_num + 1) + \".png\")\n new_frame = cv2.cvtColor(cv2.bitwise_and(beam_mask,new_frame), cv2.COLOR_BGR2GRAY)\n new_frame = change_contrast(new_frame, 4.0)\n\n #trying with a couple methods here:\n #SIFT method\n sift = cv2.SIFT_create()\n keypoints_1, descriptors_1 = sift.detectAndCompute(key_frame,None)\n keypoints_2, descriptors_2 = sift.detectAndCompute(new_frame,None)\n bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)\n\n matches = bf.match(descriptors_1,descriptors_2)\n matches = sorted(matches, key = lambda x:x.distance)\n for x in keypoints_1:\n print(x.pt)\n\n img3 = cv2.drawMatches(key_frame, keypoints_1, new_frame, keypoints_2, matches, new_frame, flags=2)\n cv2.imshow(\"matched\",img3)\n cv2.waitKey(0)\n\n #use the SIFT paradigm but do it semi-manually\n\n #active contouring method", "def pose_2d_pts(self,image):\n '''\n image- rgb image \n return:-\n pts - list of 2d pose landmarks as img coords\n image- rgb image on which the 2d pose landmarks are drawn\n ''' \n pts=[]\n imgRGB=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n results=pose.process(imgRGB)\n if results.pose_landmarks:\n mpDraw.draw_landmarks(image,results.pose_landmarks,mpPose.POSE_CONNECTIONS)\n for id,lm in enumerate(results.pose_landmarks.landmark):\n h,w,c=image.shape\n imgx,imgy=int(lm.x*w),int(lm.y*h)\n \n pts.append((imgx,imgy)) \n return pts,image", "def keypoint_decode(output, img_metas):\r\n batch_size = len(output)\r\n\r\n c = np.zeros((batch_size, 2), dtype=np.float32)\r\n s = np.zeros((batch_size, 2), dtype=np.float32)\r\n for i in range(batch_size):\r\n c[i, :] = img_metas[i]['center']\r\n s[i, :] = img_metas[i]['scale']\r\n\r\n preds, maxvals = keypoints_from_heatmaps(\r\n output, c, s,\r\n kernel=11)\r\n\r\n all_preds = np.zeros((batch_size, preds.shape[1], 3), dtype=np.float32)\r\n all_preds[:, :, 0:2] = preds[:, :, 0:2]\r\n all_preds[:, :, 2:3] = maxvals\r\n\r\n return all_preds", "def get_aligned_kpts(i, j, keypoints, matches, mask=None):\n if mask is None:\n mask = np.ones(len(matches[i][j])) #if no mask is given, all matches used. This is helpful if we only want to triangulate certain matches.\n\n kpts_i, kpts_i_idxs, kpts_j, kpts_j_idxs = [], [], [], []\n for k in range(len(matches[i][j])):\n if mask[k] == 0: continue\n kpts_i.append(keypoints[i][matches[i][j][k].queryIdx].pt)\n kpts_i_idxs.append(matches[i][j][k].queryIdx)\n kpts_j.append(keypoints[j][matches[i][j][k].trainIdx].pt)\n kpts_j_idxs.append(matches[i][j][k].trainIdx)\n kpts_i = np.array(kpts_i)\n kpts_j = np.array(kpts_j)\n kpts_i = np.expand_dims(kpts_i, axis=1) #this seems to be required for cv2.undistortPoints and cv2.trangulatePoints to work\n kpts_j = np.expand_dims(kpts_j, axis=1)\n\n return kpts_i, kpts_j, kpts_i_idxs, kpts_j_idxs", "def extract_features(img_path):\n X_img = face_recognition.load_image_file(img_path)\n locs = face_locations(X_img, number_of_times_to_upsample = N_UPSCLAE)\n if len(locs) == 0:\n return None, None\n face_encodings = face_recognition.face_encodings(X_img, known_face_locations=locs)\n return face_encodings, locs", "def pickle_keypoints(point):\n return cv2.KeyPoint, (*point.pt, point.size, point.angle, point.response,\n point.octave, point.class_id)", "def show_all_keypoints(image, predicted_key_pts, gt_pts=None):\n # image is grayscale\n plt.imshow(image, cmap='gray')\n plt.scatter(predicted_key_pts[:, 0], predicted_key_pts[:, 1], s=20, marker='.', c='m')\n # plot ground truth points as green pts\n if gt_pts is not None:\n plt.scatter(gt_pts[:, 0], gt_pts[:, 1], s=20, marker='.', c='g')\n plt.show()", "def find_homography(kp1, kp2, goodmatches):\n src_pts = np.float32([kp1[m.trainIdx].pt for m in goodmatches]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in goodmatches]).reshape(-1, 1, 2)\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n matchesMask = mask.ravel().tolist()\n return M, matchesMask", "def ORB_feature_extractor(img, show = False):\r\n try:\r\n img = pp.grayscale(img)\r\n except:\r\n pass\r\n ORB = cv2.ORB_create(nfeatures = 60, scaleFactor = 1.2, nlevels = 8, patchSize = 15, \r\n edgeThreshold = 7, scoreType=cv2.ORB_FAST_SCORE)\r\n\r\n keypoints, description = ORB.detectAndCompute(img, None)\r\n\r\n if show:\r\n for point in keypoints:\r\n x,y = point.pt\r\n cv2.circle(img, (int(x), int(y)), 2,(0, 255, 255))\r\n pp.show_image(img)\r\n \r\n if description is None:\r\n description = np.zeros((1,ORB.descriptorSize()))\r\n \r\n return description", "def all_feature_extractor(imgpath):\r\n\r\n image = cv2.imread(imgpath)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n # Extracting Gabor Features\r\n feature_dict = gabor_feature_extractor(image)\r\n\r\n feature_dict['Original'] = image\r\n\r\n entropy_img = entropy(image, disk(1))\r\n feature_dict['Entropy'] = entropy_img\r\n\r\n gaussian3_img = nd.gaussian_filter(image, sigma=3)\r\n feature_dict['Gaussian3'] = gaussian3_img\r\n\r\n gaussian7_img = nd.gaussian_filter(image, sigma=7)\r\n feature_dict['Gaussian7'] = gaussian7_img\r\n\r\n sobel_img = sobel(image)\r\n feature_dict['Sobel'] = sobel_img\r\n\r\n canny_edge_img = cv2.Canny(image, 100, 200)\r\n feature_dict['Canny'] = canny_edge_img\r\n\r\n robert_edge_img = roberts(image)\r\n feature_dict['Robert'] = robert_edge_img\r\n\r\n scharr_edge = scharr(image)\r\n feature_dict['Scharr'] = scharr_edge\r\n\r\n prewitt_edge = prewitt(image)\r\n feature_dict['Prewitt'] = prewitt_edge\r\n\r\n median_img = nd.median_filter(image, size=3)\r\n feature_dict['Median'] = median_img\r\n\r\n variance_img = nd.generic_filter(image, np.var, size=3)\r\n feature_dict['Variance'] = variance_img\r\n\r\n return feature_dict", "def predict(image_path, wrapper):\n \"\"\"\n #Don't forget to store your prediction into ImgPred\n img_prediction = ImgPred(...)\n \"\"\"\n\n #This is where all of our code will probably go. Here are the steps to success\n\n \n #Step One: Make a list which will contain the locations of every character in our source Image.\n SymPredList = []\n\n #Step Two: Go down that list we just made and use the code from PA4 in conjunction with our new Model to analyze each character. George made this part.\n #This is the find a character part of the code. Max and George worked it out.\n im = cv2.imread(image_path,0)\n (thresh, imbw) = cv2.threshold(im,20,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n #cv2.imwrite('clapfuck.jpg', imbw)\n im3,contours,hierarchy = cv2.findContours(imbw,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n idx = 0\n for cnt in contours:\n idx += 1\n x1,y1,w,h = cv2.boundingRect(cnt)\n roi=imbw[y1:y1+h,x1:x1+w]\n\n #Step Two.1: Make a Numpy Array of all the pixels starting from the top left corner of an identified character to the bottom right corner of the identified character.\n height, width = roi.shape\n if height >= width:\n padded = cv2.copyMakeBorder(roi,0,0,(height-width)//2,(height-width)//2,cv2.BORDER_CONSTANT,value=[0,0,0])\n else:\n padded = cv2.copyMakeBorder(roi,(width-height)//2,(width-height)//2,0,0,cv2.BORDER_CONSTANT,value=[0,0,0])\n Smol = cv2.resize(padded, (28, 28))\n (thresh, evaluateMe) = cv2.threshold(Smol, 20, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n #scipy.misc.imsave(os.path.basename(file), ree)\n #Step Two.2: Feed that numpy into our PA4 image analyzer converter thing but using our new trained model\n evaluateMeMe = numpy.reshape(evaluateMe, (1, 28, 28, 1))\n prediction = tf.argmax(y_conv,1)\n final_number = prediction.eval(feed_dict={x:evaluateMeMe,y_:numpy.zeros((1,40)), keep_prob:1.0})\n #Step Two.3: Record what we think it is as the prediction field of the SymPred we are currently on\n final_guess = wrapper.label_types[int(final_number)]\n DisSymPred = SymPred(final_guess,x1,y1,x1+w,y1-h)\n SymPredList.append(DisSymPred)\n\n #Step Three: Wrap that now complete SymPred list, in an ImgPred, fill out all the fields of that ImgPred, and then return that shit.\n img_prediction = ImgPred(os.path.basename(image_path), SymPredList)\n\n #Step Four: Were Donezo\n return img_prediction", "def keypoints(self, otsu=False):\n if not otsu:\n self.kp = self.fast.detect(self.th, None)\n else:\n self.kp = self.fast.detect(self.th, None)\n self.kpimg = cv2.drawKeypoints(self.th, self.kp, None, color=(0, 255, 0))", "def get_table_corner_points(img):\n\n img_height, img_width, img_channels = img.shape\n lower_bounds,upper_bounds = get_table_color_bounds() # Get the bounds for the table felt color\n\n \"\"\"\n Table Image Masking\n \"\"\"\n res = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) # Convert image to HSV color space\n mask = cv2.inRange(res,lower_bounds,upper_bounds) # Get image mask from bounds\n res = cv2.bitwise_and(img,img,mask = mask) # Mask the image\n\n \"\"\"\n Contour Detection\n \"\"\"\n imgray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY) # Get res in gray color space\n tmp,thresh = cv2.threshold(imgray,255,255,255) # Threshold on the gray image\n retr = cv2.RETR_EXTERNAL # Contour parameters\n mode = cv2.CHAIN_APPROX_SIMPLE\n tmp, contours, hierarchy = cv2.findContours(thresh,retr,mode) # Get contours\n res = np.zeros((img_height,img_width,3),np.uint8) # Blank image for contours\n color = (255,255,255) # Contour drawing parameters\n thickness = 3\n cv2.drawContours(res,contours,-1,color,thickness) # Draw contours on blank image\n\n \"\"\"\n Canny Detection\n \"\"\"\n blur = 9 # Blur parameter \n res = cv2.medianBlur(res,blur) # Blur\n res = cv2.Canny(res,100,150,apertureSize = 3) # Canny edge detection\n\n \"\"\"\n Hough Line Detection\n \"\"\"\n rho = 0.75 # Hough parameters\n theta = np.pi/180\n threshold = 150\n lines = cv2.HoughLines(res,rho,theta,threshold) # Get lines\n\n \"\"\"\n K-Mean 4 Line Clustering\n \"\"\"\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,10,1.0) # K-Means parameters\n flags = cv2.KMEANS_PP_CENTERS\n n = 4\n max_iterations = 10 \n compactness,labels,centers = cv2.kmeans(np.float32(lines),n,None,criteria,max_iterations,flags) # K-Means\n \n \"\"\"\n Get And Return Points\n \"\"\"\n growth = 100 # Offset to expand corners\n pts = _get_table_corners_from_lines_hough_space(centers,growth,img_width,img_height) # Gets the corner points\n ''' \n mask = np.zeros((img_height,img_width,3),np.uint8)\n cv2.fillPoly(mask,np.array([pts],dtype = np.int32),(255,255,255))\n show_img(mask)\n cv2.resize(mask,(img.shape[0],img.shape[1]),fx = 2, fy = 2, interpolation = cv2.INTER_CUBIC)\n show_img(mask*2)\n res = cv2.bitwise_and(img,mask)\n '''\n return pts", "def get_face_features(frame, face):\r\n\r\n import math\r\n\r\n def distance(p1, p2):\r\n \"\"\"\r\n Calculate euclidean distance between two points\r\n \"\"\"\r\n return math.hypot(p1[0] - p2[0], p1[1] - p2[1])\r\n\r\n if f_type == \"LD\":\r\n distances = []\r\n for couple in [(50, 58), (61, 67), (51, 57), (62, 66), (52, 56), (63, 65), (48, 54),\r\n (60, 64), (49, 59), (53, 55)]:\r\n a_indexes = (couple[0], couple[0] + 68)\r\n b_indexes = (couple[1], couple[1] + 68)\r\n\r\n a = (video_features[frame][face][a_indexes[0]], video_features[frame][face][a_indexes[1]])\r\n\r\n b = (video_features[frame][face][b_indexes[0]], video_features[frame][face][b_indexes[1]])\r\n\r\n distances.append(distance(a, b))\r\n return distances\r\n else:\r\n return video_features[frame][face][136:]", "def read_features(self):\r\n def unpack_keypoint(data):\r\n try:\r\n kpts = data['keypoints']\r\n desc = data['descriptors']\r\n keypoints = [cv.KeyPoint(x, y, _size, _angle, _response, int(_octave), int(_class_id))\r\n for x, y, _size, _angle, _response, _octave, _class_id in list(kpts)]\r\n return keypoints, np.array(desc)\r\n except(IndexError):\r\n return np.array([]), np.array([])\r\n try:\r\n data = np.load(self.features_path + self.id + \".npz\")\r\n self.keypoints, self.descriptors = unpack_keypoint(data)\r\n logging.info(f\"Existing features for {self.name} found in features directory.\")\r\n except FileNotFoundError:\r\n logging.info(f\"Features for {self.name} not found in {self.features_path}.\")", "def draw_points(self, pic_path, points_data):\n # Pupil Finding here\n pupils = get_eye_locations_in_image(pic_path)\n img = cv2.imread(pic_path)\n frame_number = int(re.findall(r'\\d+', pic_path.split('/')[-1])[0])\n dets = detector(img)\n shape = None\n height, width, channels = img.shape\n\n for k, d in enumerate(dets):\n shape = predictor(img, d)\n\n if(not shape):\n return\n\n pointList = []\n c = 0\n for b in range(68):\n # sanitizing input points\n point = Point(shape.part(b).x, shape.part(b).y)\n points_data[c] = [point.x, point.y]\n c = c + 1\n # some points might be out of bound\n # so, move them to the closest boundary\n if(point.x < 0):\n point.x = 0\n elif(point.x >= width):\n point.x = width - 1\n if(point.y < 0):\n point.y = 0\n elif(point.y >= height):\n point.y = height - 1\n\n pointList.append(point)\n\n roll = findRoll(pointList)\n #print(\"roll is \" + str(roll) + ' angles')\n yaw = findYaw(pointList)\n #print(\"yaw is \" + str(yaw) + ' angles')\n pitch = findPitch(pointList)\n #print(\"pitch is \" + str(pitch) + ' angles')\n self.data[frame_number] = [roll, yaw, pitch]\n counter = 0\n for point in pointList:\n cv2.circle(img, (point.x, point.y), ImageProcessor.POINT_SIZE, ImageProcessor.POINT_COLOR, -1)\n counter = counter + 1\n\n self.draw_triangles(img, pointList)\n \n for pupil in pupils:\n cv2.circle(img, (pupil.left.x, pupil.left.y), 5, (0,0,255), -1)\n cv2.circle(img, (pupil.right.x, pupil.right.y), 5, (0,0,255), -1)\n points_data[-1] = [pupil.left.x, pupil.left.y]\n points_data[-2] = [pupil.right.x, pupil.right.y]\n #print(pupil.left.x, \", \", pupil.left.y)\n #print(pupil.right.x, \", \", pupil.right.y)\n\n cv2.imwrite(pic_path, img)", "def detail_matching(self):\n paradic = self.cfg['param']['paradic']\n work_dir = self.work_dir\n \n x = float(self.cfg['param']['x']) # selected pixel in the first image\n y = float(self.cfg['param']['y'])\n \n # sift parameters\n # number of bins in the orientation histogram\n n_bins = int(paradic['n_bins']) \n n_hist = int(paradic['n_hist']) \n # descriptor of n_hist X n_hist weighted histograms with n_ori\n n_ori = int(paradic['n_ori']) \n delta_min = float(paradic['delta_min'])\n sigma_min = float(paradic['sigma_min'])\n sigma_in = float(paradic['sigma_in'])\n lambda_ori = float(paradic['lambda_ori'])\n lambda_descr = float(paradic['lambda_descr'])\n #threshold defining reference orientations\n n_spo = int(paradic['n_spo'])\n \n # Read feature vectors from output files\n if (os.path.getsize(work_dir+'OUTmatches.txt') > 0 ):\n pairdata = find_nearest_keypoint(work_dir+'OUTmatches.txt', y, x)\n \n illustrate_pair(pairdata, n_bins, n_hist, n_ori, work_dir)\n\n \n # Read keys coordinates.\n d = 6+n_bins+n_hist*n_hist*n_ori # size of keydata inside pairdata\n v = n_hist*n_hist*n_ori\n [x1, y1, sigma1, theta1] = [float(x) for x in pairdata[0:4]]\n [o1, s1] = [float(x) for x in pairdata[4+v:4+v+2]]\n [x2a, y2a, sigma2a, theta2a] = [float(x) for x in pairdata[d:d+4]]\n [o2a, s2a] = [float(x) for x in pairdata[d+4+v:d+4+v+2]]\n [x2b, y2b, sigma2b, theta2b] = \\\n [float(x) for x in pairdata[2*d:2*d+4]]\n [o2b, s2b] = [float(x) for x in pairdata[2*d+4+v:2*d+4+v+2]]\n \n draw_one_match(pairdata,\n work_dir+'input_0.png',\n work_dir+'input_1.png',\n d,\n lambda_ori,\n lambda_descr,\n n_hist,\n work_dir+'OUTonepair.png')\n \n \n # Extract thumbnails.\n # keypoint 1 (image 1)\n print ' '.join(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n self.wait_proc(proc, timeout=self.timeout)\n \n # keypoint 2a (nearest neighbor in image 2)\n print ' '.join(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n # keypoint 2b (second nearest neighbor in image 2)\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2b), str(y2b), str(sigma2b), str(theta2b), str(o2b), str(s2b),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2b\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n \n return 1", "def extract_features(img, clf, windows, y_start_stop, xy_window, stride):\n\n transformers = {k: v for k, v in clf.named_steps['features'].transformer_list}\n\n chist_transformer = transformers['chist']\n # remove the first two steps since they are not needed\n chist_transformer = Pipeline(chist_transformer.steps[2:])\n\n sb_transformer = transformers['sb']\n # remove the first two steps since they are not needed\n sb_transformer = Pipeline(sb_transformer.steps[2:])\n\n img_scaled_sb = convert_cspace(img, transformers['sb'].named_steps['sb_csc'].cspace)\n samples_sb = cut_out_windows(img_scaled_sb, windows)\n\n img_scaled_chist = convert_cspace(img, transformers['chist'].named_steps['chist_csc'].cspace)\n samples_chist = cut_out_windows(img_scaled_chist, windows)\n\n img_scaled_hog = convert_cspace(img, transformers['hog'].named_steps['hog_csc'].cspace)\n search_area_hog = img_scaled_hog[y_start_stop[0]:y_start_stop[1], :, :]\n\n hog_vectors = get_hog_vector(search_area_hog, transformers['hog'], xy_window[0], stride)\n sb_vectors = sb_transformer.transform(samples_sb)\n chist_vectors = chist_transformer.transform(samples_chist)\n\n return np.concatenate((hog_vectors, chist_vectors, sb_vectors), axis=1)", "def _convert_to_features(self, img: np.ndarray) -> np.ndarray:", "def draw_keypoints(img_to_plot, keypoints):\n cv2.drawKeypoints(img_to_plot, keypoints, img_to_plot, flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)", "def match_keypoints(desc1, desc2, k=2, thresh=.9, matchertype=None):\n if not matchertype:\n # default is brute forcce\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matches = matcher.knnMatch(desc1, desc2, k=3)\n print(matches)\n\n elif matchertype == \"FlannORB\":\n #\n FLANN_INDEX_LSH = 6\n search_params = dict(checks=50)\n index_params = dict(algorithm=FLANN_INDEX_LSH,\n table_number=6, # 12\n key_size=12, # 20\n multi_probe_level=1) # 2\n matcher = cv2.FlannBasedMatcher(index_params, search_params)\n matches = matcher.knnMatch(desc1, desc2, k=2)\n\n elif matchertype == \"FlannSURF\":\n FLANN_INDEX_KDTREE = 2\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=500)\n search_params = dict(checks=50)\n matcher = cv2.FlannBasedMatcher(index_params, search_params)\n matches = matcher.knnMatch(desc1, desc2, k=2)\n\n goodmatches = []\n for m, n in matches:\n if m.distance < thresh * n.distance:\n goodmatches.append(m)\n return goodmatches", "def best_img_pair(img_adjacency, matches, keypoints, K, top_x_perc=0.2):\n num_matches = []\n\n for i in range(img_adjacency.shape[0]):\n for j in range(img_adjacency.shape[1]):\n if img_adjacency[i][j] == 1:\n num_matches.append(len(matches[i][j]))\n\n num_matches = sorted(num_matches, reverse=True)\n min_match_idx = int(len(num_matches)*top_x_perc)\n min_matches = num_matches[min_match_idx]\n best_R = 0\n best_pair = None\n\n #For all img pairs in top xth %ile of matches, find pair with greatest rotation between images.\n for i in range(img_adjacency.shape[0]):\n for j in range(img_adjacency.shape[1]):\n if img_adjacency[i][j] == 1:\n if len(matches[i][j]) > min_matches:\n kpts_i, kpts_j, kpts_i_idxs, kpts_j_idxs = get_aligned_kpts(i, j, keypoints, matches)\n E, _ = cv2.findEssentialMat(kpts_i, kpts_j, K, cv2.FM_RANSAC, 0.999, 1.0)\n points, R1, t1, mask = cv2.recoverPose(E, kpts_i, kpts_j, K)\n rvec, _ = cv2.Rodrigues(R1)\n rot_angle = abs(rvec[0]) +abs(rvec[1]) + abs(rvec[2])# sum rotation angles for each dimension\n if (rot_angle > best_R or best_pair == None) and points == len(kpts_i): #Ensure recoverPose worked.\n best_R = rot_angle\n best_pair = (i,j)\n\n return best_pair", "def recognize_faces(x_img,\n knn_clf=None,\n model_path=None,\n distance_threshold=0.3):\n print(\"Start recognize\")\n # Making a check\n if knn_clf is None and model_path is None:\n raise Exception(\"Must supply knn classifier either thought knn_clf or model_path\")\n\n # Load a trained KNN model (if one was passed in)\n if knn_clf is None:\n with open(model_path, 'rb') as f:\n knn_clf = pickle.load(f)\n\n # Load image file and find face locations\n x_face_locations = face_recognition.face_locations(x_img)\n # Set variable for changes on camera (if connected) check\n # x_face_locations_len = 0\n\n # If no faces are found in the image, return an empty result\n if len(x_face_locations) == 0:\n return []\n \n\n # Checking for changes on camera (if connected)\n # if len(x_face_locations) != x_face_locations_len:\n # Find encodings for faces in the test iamge\n faces_encodings = face_recognition.face_encodings(x_img, known_face_locations=x_face_locations)\n # Use the KNN model to find the best matches for the test face\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\n are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(x_face_locations))]\n accur_list = [1-closest_distances[0][i][0] for i in range(len(x_face_locations))]\n x_face_locations_len = len(x_face_locations)\n # Predict classes and remove classifications that aren't within the threshold\n return [(pred, loc, accur, rec) if rec else (\"unknown\", loc, 0,0) for pred, loc, accur, rec in\n zip(knn_clf.predict(faces_encodings),\n x_face_locations,\n accur_list,\n are_matches)]", "def get_interest_points(image: torch.Tensor, num_points: int = 1000) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\r\n\r\n # We initialize the Harris detector here, you'll need to implement the\r\n # HarrisNet() class\r\n harris_detector = HarrisNet()\r\n\r\n # The output of the detector is an R matrix of the same size as image,\r\n # indicating the corner score of each pixel. After non-maximum suppression,\r\n # most of R will be 0.\r\n R = harris_detector(image)\r\n \r\n nonzero_idx = torch.nonzero(R)\r\n nonzero_R = R[R.nonzero(as_tuple=True)]\r\n sort = torch.argsort(nonzero_R, dim=0, descending=True)\r\n \r\n if num_points > sort.shape[0]:\r\n num_points = sort.shape[0]\r\n \r\n x = torch.Tensor(num_points)\r\n y = torch.Tensor(num_points)\r\n c = torch.Tensor(num_points)\r\n\r\n for i in range(0,num_points):\r\n ind = sort[i]\r\n c[i] = nonzero_R[ind]\r\n x[i] = nonzero_idx[ind,:][3] # width\r\n y[i] = nonzero_idx[ind,:][2] # height\r\n \r\n return remove_border_vals(image, x, y, c)", "def find_dots(img):\n # will hold all points\n coordinates = []\n # will hold only relevant points\n points = []\n # losing the side\n img[:, 475:] = 0\n # using for finding the best corners in edged image 65\n corners = cv2.goodFeaturesToTrack(img, 75, 0.085, 61)\n corners = np.int0(corners)\n for corner in corners:\n x, y = corner.ravel()\n if y > 350 or y < 10: # avoid from top and bottom\n continue\n coordinates.append((x, y))\n # sort in order to start from right to left\n sort_coordinates = sorted(coordinates)\n num_of_dot = 1\n for i in reversed(sort_coordinates):\n # when its 9, break\n if num_of_dot > 9:\n break\n points.append((i[0], i[1]))\n num_of_dot += 1\n return points", "def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6):\r\n if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:\r\n raise Exception(\"Invalid image path: {}\".format(X_img_path))\r\n\r\n if knn_clf is None and model_path is None:\r\n raise Exception(\"Must supply knn classifier either thourgh knn_clf or model_path\")\r\n\r\n if knn_clf is None:\r\n with open(model_path, 'rb') as f:\r\n knn_clf = pickle.load(f)\r\n\r\n X_img = face_recognition.load_image_file(X_img_path)\r\n X_face_locations = face_recognition.face_locations(X_img)\r\n\r\n if len(X_face_locations) == 0:\r\n return []\r\n\r\n faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)\r\n\r\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\r\n are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]\r\n return [(pred, loc) if rec else (\"unknown\", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]", "def visulize_matches(matches, k2, k1, img2, img1):\n\n import scipy as sp\n img2 = cv.cvtColor(img2, cv.COLOR_GRAY2BGR)\n h1, w1 = img1.shape[:2]\n h2, w2 = img2.shape[:2]\n view = sp.zeros((max(h1, h2), w1 + w2, 3), sp.uint8)\n view[:h1, :w1, :] = img1 \n view[:h2, w1:, :] = img2\n view[:, :, 1] = view[:, :, 0] \n view[:, :, 2] = view[:, :, 0]\n\n for m in matches:\n m = m[0]\n # draw the keypoints\n # print m.queryIdx, m.trainIdx, m.distance\n color = tuple([sp.random.randint(0, 255) for _ in xrange(3)])\n pt1 = (int(k1[m.queryIdx].pt[0]), int(k1[m.queryIdx].pt[1]))\n pt2 = (int(k2[m.trainIdx].pt[0] + w1), int(k2[m.trainIdx].pt[1]))\n\n cv.line(view, pt1, pt2, color)\n return view", "def draw_matches(img1, kp1, img2, kp2, matches):\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n out = np.zeros((max([rows1, rows2]), cols1 + cols2, 3), dtype='uint8')\n out = img2.copy()\n # Place the first image to the left\n # out[:rows1,:cols1,:] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n # out[:rows2,cols1:cols1+cols2,:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in matches:\n # Get the matching keypoints for each of the images\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n (x1, y1) = kp1[img1_idx].pt\n (x2, y2) = kp2[img2_idx].pt\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour blue\n # thickness = 1\n # cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1)\n cv2.circle(out, (int(x2) + cols1, int(y2)), 4, (255, 0, 0), 1)\n\n # Draw a line in between the two points\n # thickness = 1\n # colour blue\n # cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1)\n return out", "def draw_features(self, image):\n \n for x,y in self.new_points.reshape(-1,2):\n cv2.circle(image, (x,y), 2, (255,0,255), 2)\n return image", "def pairing(PAN, th = None): #---- pairing feature points\r\n if th is None: th= PAN.knn_th; # setup KNN2 threshold\r\n img_no = PAN.count;\r\n N = int(img_no*(img_no-1)/2); # Total combination\r\n PAN.pair_no = np.zeros((img_no, img_no)); # matching point number \r\n PAN.matchinfo = []; # Matching infomation\r\n PAN.match_seq = np.zeros((N, 2));\r\n index = 0;\r\n for ky in range(0, img_no-1):\r\n #for kx in range(ky+1, img_no): # match to all image\r\n kx = ky+1; # only match to next image\r\n print('pairing between image no: ',ky,' and no: ',kx);\r\n match = FD_tools.matching(PAN.DESCs[ky],PAN.DESCs[kx],th=th);\r\n PAN.pair_no[ky,kx] = match.shape[0];\r\n PAN.pair_no[kx,ky] = match.shape[0];\r\n PAN.matchinfo.append(match);\r\n PAN.match_seq[index,0] = ky; \r\n PAN.match_seq[index,1] = kx;\r\n index += 1;\r\n print('Matching process complete!');", "def findfeatures(self):\n self.set_wdiff()\n\n #xp, wp=st.findfeatures(self.xarr, self.farr, self.slines, self.sfluxes,\n # self.ws, mdiff=self.mdiff, wdiff=self.wdiff, sigma=self.sigma, niter=self.niter, sections=3)\n xp,wp=st.crosslinematch(self.xarr, self.farr, self.slines, self.sfluxes,\n self.ws, mdiff=self.mdiff, wdiff=20, sigma=self.sigma, niter=self.niter)\n for x, w in zip(xp, wp):\n if w not in self.wp and w>-1: \n self.xp.append(x)\n self.wp.append(w)\n self.plotFeatures()\n self.redraw_canvas()", "def drawMatches(img1, kp1, img2, kp2, matches):\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')\n\n # Place the first image to the left\n out[:rows1,:cols1] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n out[:rows2,cols1:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in matches:\n\n # Get the matching keypoints for each of the images\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n (x1,y1) = kp1[img1_idx].pt\n (x2,y2) = kp2[img2_idx].pt\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour blue\n # thickness = 1\n cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1) \n cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)\n\n # Draw a line in between the two points\n # thickness = 1\n # colour blue\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1)\n\n\n # Show the image\n\n # Also return the image if you'd like a copy\n return out", "def drawMatches(image_1, image_1_keypoints, image_2, image_2_keypoints, matches):\n # Compute number of channels.\n num_channels = 1\n if len(image_1.shape) == 3:\n num_channels = image_1.shape[2]\n # Separation between images.\n margin = 10\n # Create an array that will fit both images (with a margin of 10 to separate\n # the two images)\n joined_image = np.zeros((max(image_1.shape[0], image_2.shape[0]),\n image_1.shape[1] + image_2.shape[1] + margin,\n 3))\n if num_channels == 1:\n for channel_idx in range(3):\n joined_image[:image_1.shape[0],\n :image_1.shape[1],\n channel_idx] = image_1\n joined_image[:image_2.shape[0],\n image_1.shape[1] + margin:,\n channel_idx] = image_2\n else:\n joined_image[:image_1.shape[0], :image_1.shape[1]] = image_1\n joined_image[:image_2.shape[0], image_1.shape[1] + margin:] = image_2\n\n for match in matches:\n image_1_point = (int(image_1_keypoints[match.queryIdx].pt[0]),\n int(image_1_keypoints[match.queryIdx].pt[1]))\n image_2_point = (int(image_2_keypoints[match.trainIdx].pt[0] + \\\n image_1.shape[1] + margin),\n int(image_2_keypoints[match.trainIdx].pt[1]))\n\n cv2.circle(joined_image, image_1_point, 5, (0, 0, 255), thickness = -1)\n cv2.circle(joined_image, image_2_point, 5, (0, 255, 0), thickness = -1)\n cv2.line(joined_image, image_1_point, image_2_point, (255, 0, 0), \\\n thickness = 3)\n return joined_image" ]
[ "0.75240153", "0.7496029", "0.7456625", "0.73814374", "0.7321232", "0.7184007", "0.7014539", "0.6925322", "0.68039143", "0.6791409", "0.6788328", "0.6726114", "0.66914046", "0.66845495", "0.66036594", "0.6586292", "0.6563133", "0.6560529", "0.6559361", "0.65400463", "0.64445937", "0.64431065", "0.6438713", "0.6436684", "0.64274585", "0.64119285", "0.63927186", "0.6383165", "0.6383165", "0.6380148", "0.63396347", "0.63361657", "0.6334164", "0.6322095", "0.6319499", "0.6305163", "0.62974375", "0.62926644", "0.62861484", "0.62534237", "0.6242209", "0.62221634", "0.62220377", "0.6204393", "0.61893314", "0.61654323", "0.6158288", "0.6151667", "0.6151645", "0.61428946", "0.611593", "0.6111544", "0.6109982", "0.6108336", "0.6105229", "0.60890555", "0.60782444", "0.6077595", "0.6067585", "0.6066667", "0.60656786", "0.6064542", "0.6063387", "0.6061596", "0.6056342", "0.60380995", "0.60375255", "0.60307914", "0.6029686", "0.602927", "0.6010426", "0.59986866", "0.59883296", "0.5982869", "0.5979083", "0.5974012", "0.5972482", "0.59358096", "0.59291", "0.592612", "0.5914705", "0.5909019", "0.59085953", "0.5902719", "0.5888532", "0.5884118", "0.5881003", "0.5876057", "0.58674854", "0.586514", "0.585606", "0.5851811", "0.58487606", "0.58329314", "0.5823323", "0.5815262", "0.5804502", "0.5799411", "0.5793881", "0.57932764" ]
0.67450947
11
This function computes the fundamental matrix by computing the SVD of Ax = 0 ; 8point algorithm
def computeFundamentalMatrix(pts1, pts2): A = np.empty((8, 9)) for i in range(len(pts1)-1): x1 = pts1[i][0] x2 = pts2[i][0] y1 = pts1[i][1] y2 = pts2[i][1] A[i] = np.array([x1 * x2, x2 * y1, x2, y2 * x1, y2 * y1, y2, x1, y1, 1]) # Compute F matrix by evaluating SVD U, S, V = np.linalg.svd(A) F = V[-1].reshape(3, 3) # Constrain the F matrix to rank 2 U1, S1, V1 = np.linalg.svd(F) # print('Old S', S) # S[2] = 0 S2 = np.array([[S1[0], 0, 0], [0, S1[1], 0], [0, 0, 0]]) # print('New S', S) F = np.dot(np.dot(U1, S2), V1) return F
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svd0(A):\n M,N = A.shape\n if M>N: return sla.svd(A, full_matrices=True)\n else: return sla.svd(A, full_matrices=False)", "def invert_L1_svd():", "def visualize_svd():", "def svd(self):\n U, s, Vh = la.svd(self)\n S = np.zeros(self.shape)\n np.fill_diagonal(S, s)\n return (Matrix(U), Matrix(S), Matrix(Vh))", "def svd(matrix):\n u = None\n s = None\n v = None\n ### YOUR CODE HERE\n (u,s,v)=np.linalg.svd(matrix)\n ### END YOUR CODE\n\n return u, s, v", "def estimateFundamentalMatrix(x1, x2):\n A = correspondence_matrix(x1, x2)\n # compute linear least square solution\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # constrain F. Make rank 2 by zeroing out last singular value\n U, S, V = np.linalg.svd(F)\n S[-1] = 0\n \n F = np.dot(U, np.dot(np.diag(S), V))\n return F", "def svd(matrix):\n u = None\n s = None\n v = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return u, s, v", "def svd(self, X): # [5pts]\n N,D = X.shape[0],X.shape[1]\n if X.ndim == 3:\n U = np.zeros((N,N,3))\n S = np.zeros((min(N,D),3))\n V = np.zeros((D,D,3))\n for i in range(3):\n U_temp,S_temp,V_temp = np.linalg.svd(X[:,:,i],compute_uv=True, full_matrices=True,hermitian=False)\n U[:,:,i] = U_temp\n S[:,i] = S_temp\n V[:,:,i] = V_temp\n else:\n U,S,V = np.linalg.svd(X,compute_uv=True,full_matrices=True, hermitian=False)\n return U,S,V", "def svd(matrix, approach):\n\n # Getting the eigenvalues and vectors of transpose(A) * A for V and Sigma\n a = mat_multiply(transpose(matrix), matrix)\n if approach == \"qr\":\n V, sigma, iterations = qr_eig(a)\n else:\n V, sigma, iterations = eig(a)\n\n # Sorting singular values and the colums of V accordingly\n V = transpose(V)\n\n singular_values = list()\n sorted_V = list()\n\n r = 0\n for i in range(rows(sigma)):\n singular_values.append([(sigma[i][i]), i])\n if sigma[i][i] > math.exp(-8):\n r += 1\n\n singular_values.sort(key=first_item, reverse=True)\n\n sigma_r = eye(r)\n sigma_r_inv = eye(r)\n\n # Constructing the sorted U and sigma matrices\n i, j = 0, 0\n for value in singular_values:\n if value[0] > math.exp(-8):\n sorted_V.append(V[value[1]])\n sigma_r[j][j] = value[0] ** (1 / 2)\n sigma_r_inv[j][j] = 1 / (value[0] ** (1 / 2))\n j += 1\n i += 1\n\n # Constructing U by multiplying V and sigma inverse\n sorted_U = mat_multiply(mat_multiply(matrix, transpose(sorted_V)), sigma_r_inv)\n\n return (sorted_U, sigma_r, sorted_V, r, iterations)", "def eight_points_algorithm(x1, x2, normalize=True):\n N = x1.shape[1]\n\n if normalize:\n # Construct transformation matrices to normalize the coordinates\n T1 = get_normalization_matrix(x1)\n T2 = get_normalization_matrix(x2)\n\n # Normalize inputs\n x1 = T1 @ x1\n x2 = T2 @ x2\n\n # Construct matrix A encoding the constraints on x1 and x2\n A = np.stack((x2[0, :] * x1[0, :],\n x2[0, :] * x1[1, :],\n x2[0, :],\n x2[1, :] * x1[0, :],\n x2[1, :] * x1[1, :],\n x2[1, :],\n x1[0, :],\n x1[1, :],\n np.ones((N,))), 1)\n\n # Solve for f using SVD\n U, S, V = np.linalg.svd(A)\n F = V.T[:, 8].reshape(3, 3)\n\n # Enforce that rank(F)=2\n U, S, V = np.linalg.svd(F)\n S[2] = 0\n F = (U[:, :len(S)] * S) @ V\n\n # Transform F back\n if normalize:\n F = T2.T @ F @ T1\n\n return F", "def singular_decomp(A):\n # Initialization\n n, m = A.shape\n U = np.zeros((n, m), dtype='float64')\n\n # Diagonalization of A^T * A\n rot, e, V = eigen.diag(np.dot(np.transpose(A), A))\n\n # Calculate U\n U = np.dot(A, V)\n for i in range(m):\n e[i] = np.sqrt(e[i])\n U[:, i] /= e[i]\n\n return U, e, V", "def invert_L2_svd():\n print('Starting SVD inversion')\n\n pix2avevel = np.nans(ts.size)\n pix2cumdef = np.nans(ts.size)\n\n for i in np.range(ts.WIDTH):\n print('column {0}'.format(i))\n pix2date = np.zeros(ts.LENGTH, ts.DATES)\n pix2model = np.zeros(ts.LENGTH, ts.DT)\n colPix = np.zeros(ts.LENGTH, ts.IGRAMS)\n\n # concatenate same column from each interferogram into an array\n for j, ig in enumerate(ts):\n column = np.fromfile(ig.NAME, dtype=float16, size=ts.LENGTH)\n colPix[:,j] = column\n\n pix2igram = np.isfinite(colPix)\n coverage = np.fromfile(coverage) #laod DQmap\n iterPixels = np.where(coverage >= ts.igthresh)\n\n #preform pixel-by-pixel inversion\n for k, pixel in enumerate(iterPixels):\n indIG = find(pix2igram[pixel,:])==1\n indDate = unique(ts.timeIndex[indIG,:])\n dtVector = np.diff(ts.Serial(indDate)) / 365.242 #convert years to days\n\n # Set up B matrix\n B = np.zeros(len(indIG), len(dtVector))\n\n print('Done')", "def incremental_svd(A, qr_flg=False):\n\n m = 256\n n = 7291\n\n n0 = 256\n\n if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')\n\n start = time.clock()\n\n A0 = A[:, :n0]\n U, s, V = ln.svd(A0, full_matrices=False)\n\n # NOTE: s is a vector; np.diag(s) will produce a diagonal matrix\n for i in range(n0, n):\n\n # new matrix is just a single vector (i-th column of A)\n A1 = np.matrix(A[:, i]).T\n\n if qr_flg:\n J, K = ln.qr(A1 - np.dot(np.dot(U, U.T), A1))\n U_, s_, V_ = ln.svd(\n np.vstack((\n np.hstack((np.diag(s), np.dot(U.T, A1))),\n np.hstack((np.zeros((K.shape[0], s.shape[0])), K))\n )),\n full_matrices=False)\n\n # update the result of SVD\n U = np.dot(np.hstack((U, J)), U_)\n\n else:\n U_, s_, V_ = ln.svd(np.hstack((np.diag(s), np.dot(U.T, A1))), full_matrices=False)\n U = np.dot(U, U_)\n\n s = s_\n\n # NOTE: V from svd on NumPy is already transposed\n V = np.dot(V_,\n np.vstack((\n np.hstack((V, np.zeros((V.shape[0], i+1-V.shape[1])))),\n np.hstack((np.zeros((V_.shape[1]-V.shape[0], V.shape[1])), np.eye(V_.shape[1]-V.shape[0], i+1-V.shape[1])))\n ))\n )\n\n # for next computation, update A0\n A0 = np.hstack((A0, A1))\n\n elapsed_time = time.clock() - start\n print 'time:', elapsed_time\n\n return U, s, V", "def spd_pinv(a, rcond=1e-10, square_root=False, check_stability=True):\n N, _N = a.shape\n assert N == _N, \"Matrix is not square!\"\n # get the eigen-decomposition\n # w, v = np.linalg.eigh(a)\n v, w, u = np.linalg.svd(a)\n sort_index = np.argsort(w)\n w = w[sort_index]\n v = v[:,sort_index]\n # check positive-definiteness\n ev_min = w.min()\n if ev_min <= 0:\n msg = \"Matrix is not positive-definite: min ev = {0}\"\n raise IndefiniteError(msg.format(ev_min))\n # check stability of eigen-decomposition\n if check_stability:\n # XXX use a preconditioner?\n if not np.allclose(a, np.dot(v, w[:, np.newaxis] * v.T)):\n raise NumericalError(\n \"Instability in eigh (condition number={:g})\".format(\n (w.max() / w.min())))\n\n # invert the \"large enough\" part of s\n cutoff = rcond * w.max()\n for i in range(N):\n if w[i] > cutoff:\n if square_root:\n # square root of the pseudo-inverse\n w[i] = np.sqrt(1. / w[i])\n else:\n w[i] = 1. / w[i]\n else:\n w[i] = 0.\n # compute the pseudo-inverse (using broadcasting)\n res = np.real(np.dot(v, w[:, np.newaxis] * v.T))\n # check stability of pseudo-inverse\n if check_stability:\n if square_root:\n pa = np.dot(res, res)\n approx_a = np.dot(a, np.dot(pa, a))\n msg = \"Instability in square-root of pseudo-inverse\"\n else:\n approx_a = np.dot(a, np.dot(res, a))\n msg = \"Instability in pseudo-inverse\"\n if not np.allclose(a, approx_a):\n # be a bit laxist by looking at the Mean Squared Error\n mse = np.mean((a - approx_a) ** 2)\n if mse > 1e-16:\n raise NumericalError(\"{} (MSE={:g})\".format(msg, mse))\n return res", "def truncated_svd(A,k=None):", "def smith_nf(matrix):\n\n A=np.copy(matrix)\n if (np.around(A) != A).any():\n raise Exception('This function requires integer input.')\n\n # This looks much like an SVD algorithm that first bidiagonalizes\n # A by Givens rotations and then chases zeros, except for\n # the construction of the 2 by 2 elementary transformation.\n\n m, n = A.shape\n\n S = A\n U = np.eye(m)\n V = np.eye(n)\n\n # Bidiagonalize S with elementary Hermite transforms.\n for j in range(min(m, n)):\n # Zero column j below the diagonal.\n for i in range(j+1, m):\n if S[i, j]:\n # Construct an elementary Hermite transformation E\n # to zero S(i,j) by combining rows i and j.\n E = ehermite(S[j, j], S[i, j])\n # Apply the transform to S and U.\n S[[j, i], :] = np.dot(E, S[[j, i], :])\n # U[:, [j, i]] = U[:, [j, i]] / E\n U[:, [j, i]] = left_matrix_division(U[:, [j, i]], E) # solving the left matrix division\n\n # % Zero row j after the superdiagonal.\n for i in range(j+2, n):\n if S[j, i]:\n # Construct an elementary Hermite transformation E\n # to zero S(j,i) by combining columns j+1 and i.\n E = ehermite(S[j, j+1], S[j, i])\n # Apply the transform to S and V.\n S[:, [j+1, i]] = np.dot(S[:, [j+1, i]], E.T)\n # V[:, [j+1, i]] = V[:, [j+1, i]] / E\n V[:, [j+1, i]] = left_matrix_division(V[:, [j+1, i]], E) # solving the left matrix division\n\n # Now S is upper bidiagonal.\n # Chase the superdiagonal nonzeros away.\n\n D = np.diag(S, 1)\n while any(D):\n b = min(np.where(D))[0]\n # Start chasing bulge at first nonzero superdiagonal element.\n # To guarantee reduction in S(b,b), first make S(b,b) positive\n # and make S(b,b+1) nonnegative and less than S(b,b).\n if S[b, b] < 0:\n S[b, :] = -S[b, :]\n U[:, b] = -U[:, b]\n\n q = np.floor(S[b, b+1] / S[b, b])\n E = np.array([[1, 0], [-q, 1]])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E) # solving the left matrix division\n\n if S[b, b+1]:\n # Zero the first nonzero superdiagonal element\n # using columns b and b+1, to start the bulge at S(b+1,b).\n E = ehermite(S[b, b], S[b, b+1])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E)\n\n for j in range(min(m, n)):\n if j+1 < m:\n # Zero S(j+1,j) using rows j and j+1.\n E = ehermite(S[j, j], S[j+1, j])\n S[[j, j+1], :] = np.dot(E, S[[j, j+1], :])\n # U[:, [j, j+1]] = U[:, [j, j+1]] / E\n U[:, [j, j+1]] = left_matrix_division(U[:, [j, j+1]], E)\n if j+2 < n:\n # Zero S(j,j+2) using columns j+1 and j+2.\n E = ehermite(S[j, j+1], S[j, j+2])\n S[:, [j+1, j+2]] = np.dot(S[:, [j+1, j+2]], E.T)\n # V[:, [j+1, j+2]] = V[:, [j+1, j+2]] / E\n V[:, [j+1, j+2]] = left_matrix_division(V[:, [j+1, j+2]], E)\n D = np.diag(S, 1)\n\n # Now S is diagonal. Make it nonnegative.\n\n for j in range(min(m, n)):\n if S[j, j] < 0:\n S[j, :] = -S[j, :]\n U[:, j] = -U[:, j]\n\n # Squeeze factors to lower right to enforce divisibility condition.\n\n for i in range(min(m, n)):\n for j in range(i+1, min(m, n)):\n # Replace S(i,i), S(j,j) by their gcd and lcm respectively.\n a = S[i, i]\n b = S[j, j]\n [c, d, g] = extgcd(a, b)\n E = np.array([[1, d], [-b/g, a*c/g]])\n F = np.array([[c, 1], [-b*d/g, a/g]])\n S[np.ix_([i, j], [i, j])] = np.dot(np.dot(E, S[:, [i, j]][[i, j], :]), F.T)\n # S[i, i] = tmp_arr[0, 0]\n # S[i, j] = tmp_arr[0, 1]\n # S[j, i] = tmp_arr[1, 0]\n # S[j, j] = tmp_arr[1, 1]\n U[:, [i, j]] = left_matrix_division(U[:, [i, j]], E)\n V[:, [i, j]] = left_matrix_division(V[:, [i, j]], F)\n\n U = np.around(U)\n V = np.around(V)\n return U, S, V", "def compact_svd(A, tol=1e-6):\n #Compute eigenvalues/vectors\n lam, V = la.eig((A.conj().T @ A))\n sig = np.sqrt(lam)\n \n #Sort results\n argB = np.argsort(sig)\n arg = []\n for i in range(0, len(argB)):\n arg.append(argB[len(argB)-1-i])\n sig = sig[arg]\n V = V[:,arg]\n #How many non-zero positive\n r = 0\n for j in range(0, len(sig)):\n if abs(sig[j]) >= tol:\n r += 1\n \n sig1 = sig[:r]\n V1 = np.array(V[:,:r])\n \n# print(np.shape(A))\n# print(np.shape(V1))\n U1 = A@V1\n U1 = U1/sig1\n \n #Return answers\n return U1, sig1, V1.conj().T\n\n raise NotImplementedError(\"Problem 1 Incomplete\")", "def nullOld(A, eps=1e-14):\n\t# Taken with gratitude from http://stackoverflow.com/questions/5889142/python-numpy-scipy-finding-the-null-space-of-a-matrix\n\tu, s, vh = la.svd(A)\n\tnull_mask = (s <= eps)\n\tnull_space = scipy.compress(null_mask, vh, axis=0)\n\treturn scipy.transpose(null_space)", "def get_stain_matrix(I):", "def visualize_svd():\n A=np.array([[3,1],[1,3]])\n U,s,Vh=truncated_svd(A)\n \n twopi=np.linspace(0,2.*np.pi,360)\n one=np.reshape(np.linspace(0,1,100),(1,100))\n zeros=np.zeros((1,100))\n S=np.vstack((np.reshape(np.cos(twopi),(1,360)),np.reshape(np.sin(twopi),(1,360))))\n e1=np.vstack((zeros,one))\n e2=e1[::-1] \t\n \n s1S=Vh.dot(S)\n s1e1=Vh.dot(e1)\n s1e2=Vh.dot(e2)\n\n s2S=np.diag(s).dot(s1S)\n s2e1=np.diag(s).dot(s1e1)\n s2e2=np.diag(s).dot(s1e2)\n \n s3S=U.dot(s2S)\n s3e1=U.dot(s2e1)\n s3e2=U.dot(s2e2)\n \n \n \n \n\n \n \n plt.subplot(221)\n plt.plot(S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(e2[0],s3e2[1],\"r-.\",lw=2)\n \n \n \n plt.subplot(222)\n plt.plot(s1S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s1e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s1e2[0],s3e2[1],\"r-.\",lw=2)\n \n \n plt.subplot(223)\n plt.plot(s2S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s2e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s2e2[0],s3e2[1],\"r-.\",lw=2)\n \n plt.subplot(224) \n \n plt.plot(s3S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s3e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s3e2[0],s3e2[1],\"r-.\",lw=2)\n \n plt.show()", "def test_svd(self):\n eigenvectors, eigenvalues = self.svd.run(self.test_matrix)\n\n self.assertEqual(eigenvectors.shape, (100, 100))\n self.assertEqual(eigenvalues.shape, (100,))", "def matrix_svd(\n self,\n chis=None,\n eps=0,\n print_errors=\"deprecated\",\n break_degenerate=False,\n degeneracy_eps=1e-6,\n sparse=False,\n trunc_err_func=None,\n ):\n if print_errors != \"deprecated\":\n msg = (\n \"The `print_errors` keyword argument has been deprecated, \"\n \"and has no effect. Rely instead on getting the error as a \"\n \"return value, and print it yourself.\"\n )\n warnings.warn(msg)\n chis = self._matrix_decomp_format_chis(chis, eps)\n maxchi = max(chis)\n assert self.defval == 0\n assert self.invar\n\n # SVD each sector at a time.\n # While doing so, also keep track of a list of all singular values, as\n # well as a heap that gives the negative of the largest singular value\n # in each sector. These will be needed later when deciding how to\n # truncate the decomposition.\n svds = {}\n dims = {}\n minus_next_sings = []\n all_sings = []\n for k, v in self.sects.items():\n if 0 in v.shape:\n shp = v.shape\n m = min(shp)\n u = np.empty((shp[0], m), dtype=self.dtype)\n s = np.empty((m,), dtype=np.float_)\n v = np.empty((m, shp[1]), dtype=self.dtype)\n else:\n if sparse and maxchi < min(v.shape) - 1:\n u, s, v = spsla.svds(\n v, k=maxchi, return_singular_vectors=True\n )\n order = np.argsort(-s)\n u = u[:, order]\n s = s[order]\n v = v[order, :]\n else:\n u, s, v = np.linalg.svd(v, full_matrices=False)\n svd = (s, u, v)\n svds[k] = svd\n dims[k] = 0\n sings = svd[0]\n all_sings.append(sings)\n if 0 not in sings.shape:\n heapq.heappush(minus_next_sings, (-sings[0], k))\n try:\n all_sings = np.concatenate(all_sings)\n except ValueError:\n all_sings = np.array((0,))\n\n if sparse:\n norm_sq = self.norm_sq()\n else:\n norm_sq = None\n\n # Figure out what bond dimension to truncate to, how this bond\n # dimension is distributed over the different sectors, and what the\n # truncation error is.\n chi, dims, rel_err = type(self)._find_trunc_dim(\n all_sings,\n svds,\n minus_next_sings,\n dims,\n chis=chis,\n eps=eps,\n break_degenerate=break_degenerate,\n degeneracy_eps=degeneracy_eps,\n trunc_err_func=trunc_err_func,\n norm_sq=norm_sq,\n )\n\n # Truncate each block and create the dim for the new index.\n new_dim = []\n new_qim = []\n svds = {k: v for k, v in svds.items() if dims[k] > 0}\n for k, v in svds.items():\n d = dims[k]\n if d > 0:\n new_dim.append(d)\n new_qim.append(k[0])\n svds[k] = (v[0][:d], v[1][:, :d], v[2][:d, :])\n else:\n del svds[k]\n\n # Initialize U, S, V.\n d = self.dirs[0]\n U = type(self)(\n [self.shape[0], new_dim],\n qhape=[self.qhape[0], new_qim],\n dirs=[d, -d],\n qodulus=self.qodulus,\n dtype=self.dtype,\n charge=0,\n )\n S = type(self)(\n [new_dim],\n qhape=[new_qim],\n dirs=[d],\n qodulus=self.qodulus,\n dtype=np.float_,\n invar=False,\n charge=0,\n )\n V = type(self)(\n [new_dim, self.shape[1]],\n qhape=[new_qim, self.qhape[1]],\n dirs=[d, self.dirs[1]],\n qodulus=self.qodulus,\n dtype=self.dtype,\n charge=self.charge,\n )\n\n # Set the blocks of U, S and V.\n for k, v in svds.items():\n k_U = (k[0], k[0])\n S[(k[0],)] = v[0]\n U[k_U] = v[1]\n V[k] = v[2]\n\n return U, S, V, rel_err", "def compact_svd(A, tol=1e-6):\r\n eigs, vecs = la.eig(A.conj().T@A)\r\n svs = np.sqrt(eigs)\r\n #sort eigenvalues and eigenvectors accordingly\r\n sorter = list(zip(svs,vecs.T))\r\n sorter.sort(reverse=True, key=lambda tup: tup[0])\r\n svs = [x[0] for x in sorter]\r\n vecs = [x[1] for x in sorter]\r\n #find number of nonzero eigenvalues\r\n r_not = svs.count(0)\r\n r = len(svs) - r_not\r\n svs_1 = np.array(svs[:r])\r\n vecs_1 = np.array(vecs[:r])\r\n u_1 = (A@vecs_1)/svs_1\r\n\r\n return u_1, svs_1, vecs_1.conj().T", "def get_singular_values(matrix, n):\n singular_values = None\n u, s, v = svd(matrix)\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return singular_values", "def fit_svd(self):\n\n # U has the eigenvectors of G.Gt as columns ()\n # S has square roots of the eigenvalues of G.Gt and Gt.G in its diagonal\n # The square roos of the eigenvalues are called singular values\n # V has the eigenvectors of Gt.G as columns ()\n # full_matrices set to false will set the Vt matrix to a shape m x n\n\n U, S, Vt = linalg.svd(self.norm_matrix, full_matrices=False)\n\n # Compute the eigenvalues\n eig_val = (S ** 2)\n\n # Explained_variance tell us how much of the variance in the data each eigen value explains\n explained_variance = eig_val / (self.n_samples - 1)\n # total_var is the total variance in the data\n total_var = explained_variance.sum()\n explained_variance_ratio = explained_variance / total_var\n # The cumulative sum of all ratios\n ratio_cumsum = np.cumsum(explained_variance_ratio)\n\n # We search in the cumsum for the index of the value which, when added, corresponds to the quality_percent\n # The index of the cumsum gives us the components we need to add to explain X quality percent of our data\n n_components = np.searchsorted(ratio_cumsum, self.quality_percent, side='right') + 1\n\n self.components = Vt[:n_components]\n print(\"The principal components have been calculated using svd\", self.components.shape)\n\n return self.components", "def cov_matrix(gx, gy, winsize, alpha):\n\n gx = edge_mirror(gx, winsize)\n gy = edge_mirror(gy, winsize)\n radius_filter = gen_gaussian(winsize)\n radius_filter = numpy.rot90(radius_filter, 2)\n\n lenth = sum(sum(radius_filter))\n\n gx = signal.convolve2d(gx, radius_filter, mode='valid')\n gy = signal.convolve2d(gy, radius_filter, mode='valid')\n\n c11 = numpy.multiply(gx, gx)\n c22 = numpy.multiply(gy, gy)\n c12 = numpy.multiply(gx, gy)\n\n\n # SVD closed form\n lambda1 = (c11 + c22 + numpy.sqrt((c11 - c22)**2 + 4*c12**2)) / 2\n lambda2 = (c11 + c22 - numpy.sqrt((c11 - c22)**2 + 4*c12**2)) / 2\n numer = c11 + c12 - lambda1\n denom = c22 + c12 - lambda2\n\n ev1 = numpy.zeros_like(numer)\n ev2 = numpy.zeros_like(ev1)\n\n rows, cols = numer.shape\n for r in range(rows):\n for c in range(cols):\n if abs(denom[r, c]) < _opzero:\n if abs(numer[r, c]) < _opzero:\n if abs(denom[r, c]) > abs(numer[r, c]):\n ev1[r, c] = 0\n ev2[r, c] = 1\n else:\n ev1[r, c] = 1\n ev2[r, c] = 0\n else:\n ev1[r, c] = 1\n ev2[r, c] = 0\n else:\n theta = math.atan(-numer[r, c]/denom[r, c])\n ev1 = math.sin(theta)\n ev2 = math.cos(theta)\n\n sv1 = math.sqrt(abs(lambda1[r, c]))\n sv2 = math.sqrt(abs(lambda2[r, c]))\n p = ((sv1 * sv2 + _epsa) / lenth)**alpha\n s1 = (sv1 + 1) / (sv2 + 1)\n s2 = 1. / s1\n c11[r, c] = p * (s1 * ev2 ** 2 + s2 * ev1 ** 2)\n c22[r, c] = p * (s1 * ev1 ** 2 + s2 * ev2 ** 2)\n c12[r, c] = p * (s1 - s2) * ev1 * ev2\n\n c11 = edge_mirror(c11, winsize)\n c12 = edge_mirror(c12, winsize)\n c22 = edge_mirror(c22, winsize)\n\n return c11, c12, c22", "def normalize(self, matrix):\n eigvals, eigvecs = np.linalg.eig(matrix)\n Sdiag = np.diagonal(np.linalg.inv(eigvecs)@matrix@eigvecs)\n S12diag = Sdiag**-.5\n S12 = np.zeros((len(S12diag), len(S12diag)))\n np.fill_diagonal(S12, S12diag)\n return S12", "def Sa(self, x_surface, geom):\n\n return np.zeros((0, 0), dtype=float)", "def calculate_k_SVD(smooth_spreadsheet_matrix, k):\n U_unitary_matrix, singular_value, V_unitary_matrix = linalg.svd(smooth_spreadsheet_matrix)\n S_full_squared_matrix = np.zeros((k, k))\n np.fill_diagonal(S_full_squared_matrix, np.sqrt(singular_value[:k]))\n U_unitary_matrix = U_unitary_matrix[:, :k]\n return U_unitary_matrix, S_full_squared_matrix", "def posdef_eig_svd(mat):\n evals, evecs, _ = tf.svd(mat)\n\n return evals, evecs", "def svd(a, full_matrices=1, compute_uv=1):\n return SVD(full_matrices, compute_uv)(a)", "def _set_u_matirx(self):\n c_matrix = self.get_c_matrix()\n u_matrix, d_matrix, _ = np.linalg.svd(c_matrix)\n self.u_matrix = np.matrix(u_matrix)", "def svd_approx(A, k):\n U,s,Vh=la.svd(A,full_matrices=False)\n return U[:,:k].dot(np.diag(s[:k])).dot(Vh[:k,:])", "def implement_svd(data):\n u, s, v = torch.svd(data) # implement svd\n # note: the u returned by this function only includes the top values.\n # u * s will be equivalent due to the zero terms, but will run more efficiently with this implementation.\n s = torch.diag(s) # turn s into a diagonal matrix\n transformed_matrix = torch.mm(u, s) # u * s\n return l21_reg(s), transformed_matrix # return the L2,1 regularization term and matrix", "def calc_image_svd(img:list):\r\n img_r = img.swapaxes(1, 2).swapaxes(0, 1)\r\n svd_u, svd_s, svd_vh = np.linalg.svd(img_r, full_matrices=True)\r\n return [svd_u, svd_s, svd_vh]", "def svt(X, tau):\n U, S, Vt = la.svd(X,full_matrices=False)\n Xs = np.dot(U * st(S,tau), Vt)\n return Xs", "def pesudoInverse(matrix):\n\n # Calculate the SVD matrices\n U, S, Vt = svd(matrix)\n\n # A+ = V * S+ * U.T => The sigma (S) matrix shape needs to be inverted.\n pseudoSigma = S.T\n sigmaShape = np.shape(pseudoSigma)\n\n # Recalculate Sigma as Sigma+ (each value != 0 is now 1/value)\n for row in range(0, sigmaShape[0]):\n for col in range(0, sigmaShape[1]):\n # pylint: disable=E1136 # pylint/issues/3139\n if pseudoSigma[row][col] != 0:\n pseudoSigma[row][col] = 1 / pseudoSigma[row][col]\n\n # Return A+, being A+ = V * S+ * U.T\n return np.matmul(np.matmul(Vt.T, pseudoSigma), U.T)", "def estimate_fundamental_matrix(points_a, points_b):\n mean_a = np.mean(points_a, axis=0)\n mean_b = np.mean(points_b, axis=0)\n std_a = np.std(points_a, axis=0)\n std_b = np.std(points_b, axis=0)\n T_a = np.asarray([[1.0/std_a[0], 0, -mean_a[0]/std_a[0]],\n [0, 1.0/std_a[1], -mean_a[1]/std_a[1]],\n [0, 0, 1]])\n T_b = np.asarray([[1.0/std_b[0], 0, -mean_b[0]/std_b[0]],\n [0, 1.0/std_b[1], -mean_b[1]/std_b[1]],\n [0, 0, 1]])\n points_a = np.hstack((points_a, np.ones((len(points_a), 1)))).T\n points_b = np.hstack((points_b, np.ones((len(points_b), 1)))).T\n points_a = np.dot(T_a, points_a)[:2].T\n points_b = np.dot(T_b, points_b)[:2].T\n\n A = []\n for pa, pb in zip(points_a, points_b):\n ua, va = pa\n ub, vb = pb\n A.append([ua*ub, va*ub, ub, ua*vb, va*vb, vb, ua, va, 1])\n A = np.vstack(A)\n _, _, Vt = np.linalg.svd(A)\n F = Vt[-1, :].reshape((3, 3))\n\n # enforce the singularity constraint\n U, D, Vt = np.linalg.svd(F)\n D[-1] = 0\n F = np.dot(np.dot(U, np.diag(D)), Vt)\n\n F = np.dot(np.dot(T_b.T, F), T_a)\n\n return F", "def InverseMatrix(matrix,vector):\r\n # Unveri reversible matrix\r\n if Determinant(matrix, 1) == 0:\r\n print(\"Error,Singular Matrix\\n\")\r\n return\r\n # result matrix initialized as singularity matrix\r\n result = MakeIMatrix(len(matrix), len(matrix))\r\n # loop for each row\r\n for i in range(len(matrix[0])):\r\n # turn the pivot into 1 (make elementary matrix and multiply with the result matrix )\r\n # pivoting process\r\n matrix, vector = RowXchange(matrix, vector)\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[i][i] = 1/matrix[i][i]\r\n result = MultiplyMatrix(elementary, result)\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n # make elementary loop to iterate for each row and subtracrt the number below (specific) pivot to zero (make\r\n # elementary matrix and multiply with the result matrix )\r\n for j in range(i+1, len(matrix)):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n\r\n # after finishing with the lower part of the matrix subtract the numbers above the pivot with elementary for loop\r\n # (make elementary matrix and multiply with the result matrix )\r\n for i in range(len(matrix[0])-1, 0, -1):\r\n for j in range(i-1, -1, -1):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n return result", "def svd_approx(A, s):\n \n U, S, V = la.svd(A)\n V = V.conj().T\n if s > len(S):\n raise ValueError( str(len(S)) + \" = Rank(A) > s\" )\n \n U2 = U[:,:s]\n S2 = S[:s]\n V2 = V[:,:s]\n V2 = V2.conj().T\n \n S2 = np.diag(S2)\n \n Ag = U2@S2@V2\n ent = U2.size + len(S2) + V2.size\n return Ag, ent\n \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def initiateVMatrixes():\n global v, vNew, vExact\n # Initialize the grid to 0\n v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column\n # Set the boundary conditions\n for i in range(1,n):\n v[0,i] = 10\n v[n,i] = 10\n v[i,0] = 10\n v[i,n] = 10\n # Exact solution\n vExact = np.copy(v)\n for i in range(1,n):\n for j in range(1,n):\n vExact[i,j] = 10\n # Initial guess\n for i in range(1,n):\n for j in range(1,n):\n v[i,j] = 0.9*vExact[i,j]\n vNew = np.copy(v)", "def eight_point(points_lst):\r\n\r\n # get H for normalization and produce normalized points\r\n points_lst = np.array(points_lst)\r\n h_l = get_h(points_lst[:, 0])\r\n h_r = get_h(points_lst[:, 1])\r\n p_l_norm = [h_l @ np.array([p[0], p[1], 1]) for p in points_lst[:, 0]]\r\n p_r_norm = [h_r @ np.array([p[0], p[1], 1]) for p in points_lst[:, 1]]\r\n\r\n # create A using normalized points\r\n a = []\r\n for p_l, p_r in zip(p_l_norm, p_r_norm):\r\n x_l, y_l = p_l[0], p_l[1]\r\n x_r, y_r = p_r[0], p_r[1]\r\n a.append([x_r * x_l, x_r * y_l, x_r, y_r * x_l, y_r * y_l, y_r, x_l, y_l, 1])\r\n a = np.array(a)\r\n\r\n u, s, vh = np.linalg.svd(a)\r\n f_mat = np.reshape(vh[-1, :], (3, 3))\r\n\r\n # enforce singularity constraint\r\n u, s, vh = np.linalg.svd(f_mat)\r\n s[-1] = 0\r\n f_unscaled = (u * s) @ vh\r\n\r\n # rescale F\r\n return np.linalg.inv(h_r) @ f_unscaled @ np.linalg.inv(h_l)", "def get_singular_values(matrix, n):\n singular_values = None\n u, s, v = svd(matrix)\n ### YOUR CODE HERE\n singular_values=s[0:n]\n ### END YOUR CODE\n return singular_values", "def svm():", "def optimalSVHT(matrix):\n \n m, n = matrix.shape\n beta = 1.0 * m / n\n \n w = (8.0 * beta) / (beta + 1 + np.sqrt(beta**2 + 14 * beta +1))\n lambdaStar = np.sqrt(2.0 * (beta + 1) + w)\n \n omega = 0.56*beta**3 - 0.95*beta**2 + 1.82*beta + 1.43 \n uSVD, wSVD, vSVD = np.linalg.svd(matrix)\n medianSV = np.median(wSVD)\n \n thrKnownNoise = lambdaStar * np.sqrt(n)\n thrUnknownNoise = omega * medianSV\n \n muSqrt = lambdaStar / omega\n noiseEstimation = medianSV / (np.sqrt(n) * muSqrt) \n \n return thrKnownNoise, thrUnknownNoise, noiseEstimation, wSVD", "def _sigmainf(N, h, m, dW, Km0, Pm0):\n M = m*(m-1)/2\n Im = broadcast_to(np.eye(m), (N, m, m))\n IM = broadcast_to(np.eye(M), (N, M, M))\n Ims0 = np.eye(m**2)\n factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))\n factor2 = _kp2(Im, _dot(dW, _t(dW)))\n factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))\n return 2*IM + _dot(_dot(factor1, factor2), factor3)", "def update_model(X, U, S, k, n, mu,\n svdmethod='full',\n missingmethod='zero'):\n\n if len(X) == 0:\n printt(\"Error: No data in X.\")\n return None, None, None, -1, None\n #print('%d items in X' % X.shape[1])\n #print('init U:', U)\n\n # If there is no previous U, and we just got a single item in X,\n # set U to all 0's (degenerate SVD),\n # and return it with mu.\n # (PR #22 sets first value to 1; see decals implementation)\n if len(U) == 0 and X.shape[1] == 1:\n mu = X\n # Do this no matter what. Let mu get NaNs in it as needed.\n U = np.zeros_like(mu)\n U[0] = 1\n S = np.array([0])\n n = 1\n pcts = [1.0]\n return U, S, mu, n, pcts\n\n ###########################################################################\n # Do full SVD of X if this is requested, regardless of what is in U \n # Also, if n = 0 or U is empty, start from scratch\n output_k = False\n if svdmethod == 'full' or len(U) == 0 or n == 0:\n if n == 0:\n if len(U) == 0:\n printt(\"----- initial SVD -----\")\n output_k = True\n else:\n # Reshape so we don't have an empty dimension (yay python)\n U = U.reshape(-1, 1)\n elif len(U) == 0:\n printt(\"WARNING: N (number of items modeled by U) is %d, not zero, but U is empty!\" % n)\n\n # Bootstrap\n if missingmethod == 'ignore':\n printt(\"ERROR: ignore with full is not possible under ordinary circumstances.\")\n printt(\"Use --increm-brand to impute for NaNs.\")\n printt(\"For now, we are filling NaNs with 0.\")\n X = copy.deepcopy(X)\n z = np.where(np.isnan(X))\n X[z] = 0\n\n mu = np.mean(X, axis=1).reshape(-1,1)\n X = X - mu\n U, S, V = linalg.svd(X, full_matrices=False)\n printt('Just did full SVD on %d items.' % X.shape[1])\n #print('X:', X)\n #print('U:', U)\n # Reset U to all 0's if we only have one item in X (degenerate SVD)\n if X.shape[1] == 1:\n U = np.zeros_like(U)\n \n # Keep only the first k components\n S_full = S\n S = S[0:k]\n U = U[:,0:k]\n\n # Update n to number of new items in X\n n = X.shape[1]\n \n ###########################################################################\n # Incremental SVD from Ross\n elif svdmethod == 'increm-ross':\n # Incremental SVD from Ross et al. 2008\n # \"Incremental Learning for Robust Visual Tracking\"\n # based on Lim and Ross's sklm.m implementation in MATLAB.\n\n # This method DOES NOT handle missing values.\n if missingmethod == 'ignore':\n print('ERROR: increm-ross cannot handle missing values.')\n print('If they are present, try svdmethod=increm-brand')\n print(' or use missingmethod=zero to zero-fill.')\n print('If there are no missing values, specify missingmethod=none.')\n sys.exit(1)\n\n n_new = X.shape[1]\n \n # Compute mean\n # Weirdly, the later 'X-mu_new' is MUCH faster if you reshape as shown.\n # This is because of differences in the way numpy treats a 1d array versus a 2d column.\n mu_new = np.mean(X, axis=1).reshape(-1,1)\n\n # Subtract the mean, append it as a column vector, and update mu\n # X - mu_new will be zero if X has only 1 item\n mu_old = mu\n # New mu is a weighted sum of old and new mus\n mu = (n * mu_old + n_new * mu_new) / (n + n_new)\n B = np.hstack((X - mu,\n math.sqrt(n_new * n/float(n_new+n)) * \\\n (mu_old - mu_new)))\n printt(\"Now tracking mean for %d -> %d items; mu.min %f, mu.max %f \" % \\\n (n, n+n_new, np.nanmin(mu), np.nanmax(mu)))\n n = n + n_new\n\n if S.all() == 0:\n npcs = U.shape[1]\n diagS = np.zeros((npcs, npcs))\n else:\n diagS = np.diag(S)\n\n # I don't think this is right. At this point B is the augmented\n # matrix rather than the single observation.\n proj = np.dot(U.T, B)\n reproj_err = B - np.dot(U, proj)\n\n # to get orthogonal form of reproj_err\n # This should return q with dimensions [d(X) by n_new+1], square\n q, dummy = linalg.qr(reproj_err, mode='full')\n # print('q.shape should be 7x2: ', q.shape)\n Q = np.hstack((U, q))\n\n # From Ross and Lim, 2008\n # R = [ [ Sigma, U.T * X ] [ 0, orthog. component of reproj error ] ]\n k_now = diagS.shape[0]\n new_dim = k_now + n_new + 1\n R = np.zeros((new_dim, new_dim))\n R[0:k_now,0:k_now] = diagS\n R[0:k_now,k_now:] = proj\n orthog_reproj_err = np.dot(q.T, reproj_err)\n R[k_now:, k_now:] = orthog_reproj_err\n \n # Perform SVD of R. Then finally update U.\n U, S, V = linalg.svd(R, full_matrices=False)\n printt('Just did increm-ross SVD on %d items.' % n)\n\n U = np.dot(Q, U)\n \n # Keep only the first k components\n U = U[:,0:min([n,k])]\n S_full = S\n S = S[0:min([n,k])]\n\n ###########################################################################\n # Incremental SVD from Brand\n elif svdmethod == 'increm-brand':\n # Pulled out James's attempt to handle NaNs into\n # increm-brand-james.py. Starting over from scratch here.\n n_new = X.shape[1]\n\n if n_new != 1:\n print(\"WARNING: increm-brand will probably only work by adding one item at a time.\")\n input('\\nPress enter to continue or ^C/EOF to exit. ')\n\n if missingmethod == 'ignore':\n # 1. Update mu\n mu_old = mu\n mu_new = X\n\n # Be careful! For any pre-existing NaNs in mu,\n # let mu_new fill them in. Can't get any worse!\n naninds = np.where(np.isnan(mu_old))[0]\n if naninds.size > 0:\n mu_old[naninds,0] = mu_new[naninds,0]\n # And likewise for mu_new -- fill with good values from mu_old.\n naninds = np.where(np.isnan(mu_new))[0]\n if naninds.size > 0:\n mu_new[naninds,0] = mu_old[naninds,0]\n # At this point, the only NaNs that should appear are\n # values that were NaN for both mu and X to start with.\n # They will stay NaN and that's okay.\n \n # New mu is a weighted sum of old and new mus\n mu = (n * mu_old + n_new * mu_new) / (n + n_new)\n printt(\"Now tracking mean for %d -> %d items; mu.min %f, mu.max %f \" % \\\n (n, n+n_new, np.nanmin(mu), np.nanmax(mu)))\n n = n + n_new\n\n # 2. Subtract off the mean\n X = X - mu\n\n # 3. Compute L, the projection of X onto U\n # Note: this will only work for a single item in X\n goodinds = np.where(~np.isnan(X))[0]\n #print('X: %d of %d are good.' % (len(goodinds), X.shape[0]))\n\n diagS = np.diag(S)\n # This is Brand's method, which involves S:\n L = np.dot(diagS,\n np.dot(np.linalg.pinv(np.dot(U[goodinds,:],\n diagS)),\n X[goodinds,:]))\n # Simplified version that does not use S (but is probably wrong):\n #L = np.dot(U[goodinds,:].T,\n # X[goodinds,:])\n # Top row of the Q matrix (eqn 12, Brand 2002)\n Q1 = np.hstack([diagS, L])\n\n # 4. Compute J, the orthogonal basis of H, which is\n # the component of X orthog to U (i.e., unrepresentable direction)\n # 5. Compute K, the projection of X onto J (i.e., unrep. content)\n K = linalg.norm(X[goodinds,:] - np.dot(U[goodinds,:],\n np.dot(U[goodinds,:].T,\n X[goodinds,:])))\n # H = X - UL\n J = np.zeros((U.shape[0], 1))\n J[goodinds] = np.dot(K,\n np.linalg.pinv(X[goodinds,:] -\n np.dot(U[goodinds,:],\n L))).T\n \n # Bottom row of Q matrix (eqn 12, Brand 2002)\n Q2 = np.hstack([np.zeros([1, len(S)]), np.array(K).reshape(1,1)])\n Q = np.vstack([Q1, Q2])\n\n # 6. Take the SVD of Q\n Uq, Sq, Vq = linalg.svd(Q, full_matrices=False)\n\n # 7. Update U and S (eqn 4, Brand 2002)\n # Note: Since J is zero-filled for badinds, now U is too.\n # Alternatively, we give J NaNs and let them get into U as well.\n # I think that is a worse idea though.\n U = np.dot(np.hstack([U, J]), Uq)\n S = Sq\n # Updating V requires knowing old V,\n # but we don't need the new one either so it's okay to skip.\n\n printt('Just did increm-brand SVD on %d items.' % n)\n \n ############# end ###########\n \n else: # No missing values (or not 'ignore')\n # 1. Update mu\n mu_old = mu\n mu_new = X\n # New mu is a weighted sum of old and new mus\n mu = (n * mu_old + n_new * mu_new) / (n + n_new)\n n = n + n_new\n\n # 2. Subtract off the mean\n X = X - mu\n\n # 3. Compute L, the projection of X onto U\n L = np.dot(U.T, X)\n Q1 = np.hstack([np.diag(S), L])\n\n # 4. Compute J, the orthogonal basis of H, which is\n # the component of X orthog to U (i.e., unrepresentable direction)\n # 5. Compute K, the projection of X onto J (i.e., unrep. content)\n JK = X - np.dot(U, L)\n (J, K) = linalg.qr(JK)\n\n Q2 = np.hstack([np.zeros([1, len(S)]), np.array(K).reshape(1,1)])\n Q = np.vstack([Q1, Q2])\n\n # 6. Take the SVD of Q\n Uq, Sq, Vq = linalg.svd(Q, full_matrices=False)\n\n # 7. Update U and S (eqn 4, Brand 2002)\n U = np.dot(np.hstack([U, J]), Uq)\n S = Sq\n # V requires knowing old V,\n # but we don't need the new one either so it's okay.\n \n printt('Just did regular increm SVD on %d items.' % n)\n\n # Keep only the first k components\n U = U[:,0:min([n,k])]\n S = S[0:min([n,k])]\n\n Usum = U.sum(1)\n\n\n ###########################################################################\n # We have a bad svdmethod, but somehow didn't catch it earlier.\n else:\n printt(\"504: Bad Gateway in protocol <Skynet_authentication.exe>\")\n return None, None, None, None, None\n\n indivpcts = None\n\n # This only works if a full SVD was done\n if (svdmethod == 'full' and output_k and opts['k_var'] == -773038.0):\n # Calculate percent variance captured by each \n cumsum = np.cumsum(S_full)\n #print(cumsum.shape)\n if cumsum[-1] != 0:\n indivpcts = S / cumsum[-1]\n indivpcts = indivpcts[0:k] # truncate to first k\n cumpercents = cumsum / cumsum[-1]\n else:\n indivpcts = []\n\n # Calculate percent variance captured\n if k >= cumsum.shape[0]:\n printt('Cannot estimate data variance; specified k (%d) exceeds the number of SVs (%d).' % (k, cumsum.shape[0]))\n else:\n printt(\"Selected value of k=%d captures %5.2f%% of the data variance\" % \\\n (k, cumpercents[k-1] * 100))\n if opts['pause']: input(\"Press enter to continue\\n\")\n\n #print('U:', U)\n #print('mu:', mu)\n return U, S, mu, n, indivpcts", "def tsvd(A, threshold=0.99999, avoid_pathological=True):\n M,N = A.shape\n full_matrices = False\n\n if is_int(threshold):\n # Assume specific number is requested\n r = threshold\n assert 1 <= r <= max(M,N)\n if r > min(M,N):\n full_matrices = True\n r = min(M,N)\n\n U,s,VT = sla.svd(A, full_matrices)\n\n if isinstance(threshold,float):\n # Assume proportion is requested\n r = truncate_rank(s,threshold,avoid_pathological)\n\n # Truncate\n U = U [:,:r]\n VT = VT[ :r]\n s = s [ :r]\n return U,s,VT", "def svd_approx(A, s):\r\n U, Si, Vh = la.svd(A)\r\n zeros = list(Si).count(0)\r\n #raise error if there are not enough nonzero singular values\r\n if len(Si) - zeros < s:\r\n raise ValueError(\"s > rank(A)\")\r\n #Only save first s singular values for Si\r\n Si_hat = np.diag(Si[:s])\r\n #Save first s columns of U\r\n U_hat = U[:,:s]\r\n #Save first s rows of Vh\r\n Vh_hat = Vh[:s,:]\r\n\r\n # return new A and num of entries needed\r\n return U_hat@Si_hat@Vh_hat, U_hat.size+s+Vh_hat.size", "def ES_SVD(U, sigma, V, time, f_fault, f_side, PMItreshold, estimate_xi_func=get_SVDxi, estimate_xi_func_params=None):\n\n # Get the search region\n m = sigma.size\n f_fault = np.asanyarray(f_fault)\n f_side = np.asanyarray(f_side)\n dt = time[1] - time[0]\n Fs = 1.0/dt\n PMI = [] #PMI is here the envelope score\n W = []\n for i in range(0, f_fault.size):\n PMI.append(np.zeros(m))\n W.append(np.zeros(m))\n\n # Calculate PMI for each fault type\n for i in range(0, m):\n if estimate_xi_func_params is None:\n a_i = estimate_xi_func(U, sigma, V, i)\n else:\n a_i = estimate_xi_func(U, sigma, V, i, estimate_xi_func_params)\n a_i = envelope(a_i)\n Y, df = fft(a_i, Fs)\n # Calculate PMI for each fault type\n for k in range(0, f_fault.size):\n PMI[k][i] = diagnosefft(Y, df, f_fault[k], 1.0, f_side[k])\n\n # Calculate weights\n for k in range(0, f_fault.size):\n temp = 0.0\n for i in range(0, m):\n if PMI[k][i] > PMItreshold:\n temp += PMI[k][i]\n for i in range(0, m):\n if PMI[k][i] > PMItreshold:\n W[k][i] = PMI[k][i]/temp\n\n # Return data\n return PMI, W", "def spca(a, s, k, d):\n\n p = a.shape[0]\n X = np.zeros((p, k))\n\n for l in range(k):\n # 1\n [w, V] = linalg.eigh(a)\n idx = w.argsort()\n w = w[idx]\n V = V[:, idx]\n\n # 2\n xprime, value = spannogram(V[:, -d:], w[-d:], s=s)\n X[:, l] = xprime[:, 0]\n\n # 3\n idx = np.abs(xprime).argsort(axis=0)\n for i in idx[-s:]:\n a[i, :] = 0\n a[:, i] = 0\n\n return X", "def svd(T):\n try:\n U, S, V = splinalg.svd(T, full_matrices=False)\n except splinalg.LinAlgError:\n U, S, V = splinalg.svd(T, full_matrices=False, lapack_driver='gesvd')\n maxU, minU = U.max(0), U.min(0)\n maxV, minV = V.max(1), V.min(1)\n ind = (np.abs(minU) > maxU) & (np.abs(minV) > maxV)\n U[:, ind] *= -1\n V[ind] *= -1\n return U, S, V", "def svd_inverse(matrix):\n U, S, V = np.linalg.svd(matrix)\n\n dim = S.shape[0]\n S = la.diagsvd(S, dim, dim)\n V = np.matrix(V)\n U = np.matrix(U)\n\n # Compute the inverse SVD\n V_dag_S = np.dot(V.getH(), np.linalg.inv(S))\n V_dag_S_U_dag = np.dot(V_dag_S, U.getH())\n\n return V_dag_S_U_dag", "def prepare(self):\n ls=len(self.v)\n self.S=numpy.zeros(ls)\n self.A=numpy.zeros((ls,ls))\n\n for k,v in self.e.items():\n b,e=k\n bi,ei=self.rv[b],self.rv[e]\n self.A[bi,bi]-=v\n self.A[bi,ei]+=v", "def f_v(_a, _vs, _Ps, _Ps0): # _aはスカラ, _vsはベクトル, _Ps, _Ps0は3行2列の行列\n center_pos = _Ps[0]\n center_pos_0 = _Ps0[0]\n idx_iter = Index_iterator(1, 8)\n #中心点から各点へのベクトル\n x = []\n x0 = []\n for p in (_Ps):\n x.append(p - center_pos)\n for p in _Ps(_Ps0):\n x0.append(p - center_pos_0)\n\n x01 = (_Ps[1]-center_pos) \n x02 = (_Ps[2]-center_pos) \n x03 = (_Ps[3]-center_pos) \n x04 = (_Ps[4]-center_pos) \n x05 = (_Ps[5]-center_pos) \n x06 = (_Ps[6]-center_pos) \n x07 = (_Ps[7]-center_pos) \n x08 = (_Ps[8]-center_pos)\n print('p_id', center_pos, end='\\t')\n print('x01:', x01, end=\"\\t\")\n print('x03:', x03, end=\"\\t\")\n print('x05:', x05, end=\"\\t\")\n print('x07:', x07)\n x001 = (_Ps0[1]-_Ps0[0]) \n x002 = (_Ps0[2]-_Ps0[0]) \n x003 = (_Ps0[3]-_Ps0[0]) \n x004 = (_Ps0[4]-_Ps0[0]) \n x005 = (_Ps0[5]-_Ps0[0]) \n x006 = (_Ps0[6]-_Ps0[0]) \n x007 = (_Ps0[7]-_Ps0[0]) \n x008 = (_Ps0[8]-_Ps0[0]) \n \n #中心点周りの面の面積\n def calc_area(j,k,l):\n s = LA.norm(np.cross(x[j],x[k]))/2 \\\n + LA.norm(np.cross(x[k],x[l]))/2\n return s\n\n s = []\n s0 = []\n hen = [1,3,5,7]\n for i in range(4):\n j,k,l = [n for n in idx_iter.get_indexes(start_idx=hen[i], 3)]\n s[i] = calc_area(j,k,l)\n s0[i] = calc_area(j,k,l)\n\n # s0123 = LA.norm(np.cross(x[1],x[2]))/2\\\n # +LA.norm(np.cross(x[2],x[3]))/2\n # s4367 = LA.norm(np.cross(x[3],x[4]))/2\\\n # +LA.norm(np.cross(x[4],x[5]))/2\n # s4785 = LA.norm(np.cross(x[5],x[6]))/2\\\n # +LA.norm(np.cross(x[6],x[7]))/2\n # s4521 = LA.norm(np.cross(x[7],x[8]))/2\\\n # +LA.norm(np.cross(x[8],x[1]))/2\n # s04103 = LA.norm(np.cross(x0[1],x0[2]))/2\\\n # +LA.norm(np.cross(x0[2],x0[3]))/2\n # s04367 = LA.norm(np.cross(x0[3],x0[4]))/2\\\n # +LA.norm(np.cross(x0[4],x0[7]))/2\n # s04785 = LA.norm(np.cross(x0[7],x0[8]))/2\\\n # +LA.norm(np.cross(x0[8],x0[5]))/2\n # s04521 = LA.norm(np.cross(x0[5],x0[2]))/2\\\n # +LA.norm(np.cross(x0[2],x0[1]))/2\n \n #各方向への平均面積(ここだけ反時計回り順で設定してる)\n S_iminus = (s[1] + s[2]) / 2 #43方向\n S_Jminus = (s[1] + s[4]) / 2 #41方向\n S_iplus = (s[3] + s[4]) / 2 #45方向\n S_Jplus = (s[3] + s[2]) / 2 #47方向\n S_iminus0 = (s0[1] + s0[2]) / 2 #43方向\n S_Jminus0 = (s0[1] + s0[4]) / 2 #41方向\n S_iplus0 = (s0[3] + s0[4]) / 2 #45方向\n S_Jplus0 = (s0[3] + s0[2]) / 2 #47方向\n # 各方向への厚み\n h_iminus = h_0 / ((poisson/(1-poisson) * (S_iminus - S_iminus0) / S_iminus0) + 1) #43方向\n h_Jminus = h_0 / ((poisson/(1-poisson) * (S_Jminus - S_Jminus0) / S_Jminus0) + 1) #41方向\n h_iplus = h_0 / ((poisson/(1-poisson) * (S_iplus - S_iplus0) / S_iplus0) + 1) #45方向\n h_Jplus = h_0 / ((poisson/(1-poisson) * (S_Jplus - S_Jplus0) / S_Jplus0) + 1) #47方向\n # 各断片の重心\n g = []\n kado = [2,4,6,8]\n hen = [1,3,5,7]\n for i in range(len(kado)):\n _kado = kado[i]\n _hen1, _ = [idx for idx in idx_iter.get_indexes_reverse(_kado, 2)]\n _hen2, _ = [idx for idx in idx_iter.get_indexes(_kado, 2)]\n _hen = [_hen1, _hen2]\n _g1 = (center_pos + _Ps[_kado] + _Ps[_hen1])/3\n _g2 = (center_pos + _Ps[_kado] + _Ps[_hen2])/3\n g.append([_g1, _g2])\n\n g401 = (center_pos + _Ps[0] + _Ps[1]) / 3\n g430 = (center_pos + _Ps[3] + _Ps[0]) / 3\n g436 = (center_pos + _Ps[3] + _Ps[6]) / 3\n g467 = (center_pos + _Ps[6] + _Ps[7]) / 3\n g478 = (center_pos + _Ps[7] + _Ps[8]) / 3\n g485 = (center_pos + _Ps[8] + _Ps[5]) / 3\n g452 = (center_pos + _Ps[5] + _Ps[2]) / 3\n g421 = (center_pos + _Ps[2] + _Ps[1]) / 3\n g0401 = (_Ps0[4] + _Ps0[0] + _Ps0[1]) / 3\n g0430 = (_Ps0[4] + _Ps0[3] + _Ps0[0]) / 3\n g0436 = (_Ps0[4] + _Ps0[3] + _Ps0[6]) / 3\n g0467 = (_Ps0[4] + _Ps0[6] + _Ps0[7]) / 3\n g0478 = (_Ps0[4] + _Ps0[7] + _Ps0[8]) / 3\n g0485 = (_Ps0[4] + _Ps0[8] + _Ps0[5]) / 3\n g0452 = (_Ps0[4] + _Ps0[5] + _Ps0[2]) / 3\n g0421 = (_Ps0[4] + _Ps0[2] + _Ps0[1]) / 3\n \n # 各断片面積\n triangle_area = []\n kado = [2,4,6,8]\n for i in range(len(kado)):\n j, k = [idx for idx in idx_iter.get_indexes_reverse(kado[i], 1)]\n _s1 = LA.norm(np.cross(x[j],x[k]))/2\n j, k = [idx for idx in idx_iter.get_indexes(kado[i], 1)]\n _s2 = LA.norm(np.cross(x[j],x[k]))/2\n triangle_area.append([_s1, _s2])\n\n s410 = LA.norm(np.cross(x[1],x[2]))/2\n s403 = LA.norm(np.cross(x[2],x[3]))/2\n s436 = LA.norm(np.cross(x[3],x[4]))/2\n s467 = LA.norm(np.cross(x[4],x[5]))/2\n s478 = LA.norm(np.cross(x[5],x[6]))/2\n s485 = LA.norm(np.cross(x[6],x[7]))/2\n s452 = LA.norm(np.cross(x[7],x[8]))/2\n s421 = LA.norm(np.cross(x[8],x[1]))/2\n s0410 = LA.norm(np.cross(x0[1],x0[2]))/2\n s0403 = LA.norm(np.cross(x0[2],x0[3]))/2\n s0436 = LA.norm(np.cross(x0[3],x0[4]))/2\n s0467 = LA.norm(np.cross(x0[4],x0[5]))/2\n s0478 = LA.norm(np.cross(x0[5],x0[6]))/2\n s0485 = LA.norm(np.cross(x0[6],x0[7]))/2\n s0452 = LA.norm(np.cross(x0[7],x0[8]))/2\n s0421 = LA.norm(np.cross(x0[8],x0[1]))/2\n # 四角の重心\n\n center_g_square = []\n for i in range(len(g)):\n _g = (triangle_area[i][0]*g[i][0] + triangle_area[i][1]*g[i][1])/(triangle_area[i][0] + triangle_area[i][1])\n center_g.append(_g)\n g4103 = (s410*g401 + s403*g430) / (s410 + s403)\n g4367 = (s436*g436 + s467*g467) / (s436 + s467)\n g4785 = (s478*g478 + s485*g485) / (s478 + s485)\n g4521 = (s452*g452 + s421*g421) / (s452 + s421)\n g04103 = (s0410*g0401 + s0403*g0430) / (s0410 + s0403)\n g04367 = (s0436*g0436 + s0467*g0467) / (s0436 + s0467)\n g04785 = (s0478*g0478 + s0485*g0485) / (s0478 + s0485)\n g04521 = (s0452*g0452 + s0421*g0421) / (s0452 + s0421)\n # 各重心間の距離\n Lj82 = LA.norm(g4521 - g4103)\n Lj24 = LA.norm(g4103 - g4367)\n Lj46 = LA.norm(g4367 - g4785)\n Lj68 = LA.norm(g4785 - g4521)\n \n # ひずみ\n eps_i41 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J41 = (LA.norm(g4521 - g4103) - LA.norm(g04521 - g04103)) / LA.norm(g04521 - g04103)\n eps_i43 = (LA.norm(x03) - LA.norm(x043)) / LA.norm(x043)\n eps_J43 = (LA.norm(g4103 - g4367) - LA.norm(g04103 - g04367)) / LA.norm(g04103 - g04367)\n eps_i47 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J47 = (LA.norm(g4367 - g4785) - LA.norm(g04367 - g04785)) / LA.norm(g04367 - g04785)\n eps_i45 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J45 = (LA.norm(g4785 - g4521) - LA.norm(g04785 - g04521)) / LA.norm(g04785 - g04521)\n # 張力\n F_T1 = (young_modulus * h_Jminus * Lj82 * (eps_i41 + poisson * eps_J41) / (1 - poisson**2))*x01/LA.norm(x01)\n F_T3 = (young_modulus * h_iminus * Lj24 * (eps_i43 + poisson * eps_J43) / (1 - poisson**2))*x03/LA.norm(x03)\n F_T5 = (young_modulus * h_Jplus * Lj46 * (eps_i47 + poisson * eps_J47) / (1 - poisson**2))*x05/LA.norm(x05)\n F_T7 = (young_modulus * h_iplus * Lj68 * (eps_i45 + poisson * eps_J45) / (1 - poisson**2))*x07/LA.norm(x07)\n # せん断ひずみ\n gamma513 = (math.acos((np.dot(x07,x01))/(LA.norm(x07)*LA.norm(x01))) - math.acos((np.dot(x045,x041))/(LA.norm(x045)*LA.norm(x041)))\\\n + math.acos((np.dot(x03,x01))/(LA.norm(x03)*LA.norm(x01))) - math.acos((np.dot(x043,x041))/(LA.norm(x043)*LA.norm(x041))))/2\n gamma137 = (math.acos((np.dot(x01,x03))/(LA.norm(x01)*LA.norm(x03))) - math.acos((np.dot(x041,x043))/(LA.norm(x041)*LA.norm(x043)))\\\n + math.acos((np.dot(x03,x05))/(LA.norm(x03)*LA.norm(x05))) - math.acos((np.dot(x043,x047))/(LA.norm(x043)*LA.norm(x047))))/2\n gamma375 = (math.acos((np.dot(x05,x03))/(LA.norm(x05)*LA.norm(x03))) - math.acos((np.dot(x047,x043))/(LA.norm(x047)*LA.norm(x043)))\\\n + math.acos((np.dot(x07,x05))/(LA.norm(x07)*LA.norm(x05))) - math.acos((np.dot(x045,x047))/(LA.norm(x045)*LA.norm(x047))))/2\n gamma751 = (math.acos((np.dot(x05,x07))/(LA.norm(x05)*LA.norm(x07))) - math.acos((np.dot(x047,x045))/(LA.norm(x047)*LA.norm(x045)))\\\n + math.acos((np.dot(x07,x01))/(LA.norm(x07)*LA.norm(x01))) - math.acos((np.dot(x045,x041))/(LA.norm(x045)*LA.norm(x041))))/2\n # せん断力\n F_S41 = ((young_modulus * h_Jminus * LA.norm(x01) * gamma513)/(2 * (1 + poisson)))*x01/LA.norm(x01)\n F_S43 = ((young_modulus * h_Jminus * LA.norm(x03) * gamma137)/(2 * (1 + poisson)))*x03/LA.norm(x03)\n F_S47 = ((young_modulus * h_Jminus * LA.norm(x05) * gamma375)/(2 * (1 + poisson)))*x05/LA.norm(x05)\n F_S45 = ((young_modulus * h_Jminus * LA.norm(x07) * gamma751)/(2 * (1 + poisson)))*x07/LA.norm(x07)\n \n # J方向の曲げ力\n n_j_cross = np.cross(x05, x01)\n if any(n_j_cross):\n n_J = n_j_cross/LA.norm(n_j_cross)\n else: \n\n l_Jalfa = LA.norm(_Ps[1] - _Ps[7])\n cos_Jalfa = (LA.norm(x01)**2 + LA.norm(x05)**2 - l_Jalfa**2) / (2 * LA.norm(x01) * LA.norm(x05))\n if cos_Jalfa > 1.0:\n cos_Jalfa = 1.0\n elif cos_Jalfa < -1.0:\n cos_Jalfa = -1.0\n sin_Jalfa = math.sqrt(1 - cos_Jalfa**2)\n CJa2 = math.sqrt((cos_Jalfa + 1)/2)\n SJa2 = math.sqrt((1 - cos_Jalfa)/2)\n zJC = (_Ps[7][2]-_Ps[1][2])/(_Ps[7][0]-_Ps[1][0]) * (center_pos[0]-_Ps[1][0]) + _Ps[1][2] #曲げ力の方向の場合わけに必要\n if center_pos[2] > zJC:\n e_j = np.dot(np.array([[CJa2 + (n_J[0]**2) * (1 - CJa2), n_J[0] * n_J[1] * (1 - CJa2) + n_J[2] * SJa2, n_J[0] * n_J[2] * (1 - CJa2) - n_J[1] * SJa2],\\\n [n_J[1] * n_J[0] * (1 - CJa2) - n_J[2] * SJa2, CJa2 + (n_J[1]**2) * (1 - CJa2), n_J[1] * n_J[2] * (1 - CJa2) + n_J[0] * SJa2],\\\n [n_J[2] * n_J[0] * (1 - CJa2) + n_J[1] * SJa2, n_J[2] * n_J[1] * (1 - CJa2) - n_J[0] * SJa2, CJa2 + (n_J[2]**2) * (1 - CJa2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n else:\n e_j = np.dot(np.array([[CJa2 + (n_J[0]**2) * (1 - CJa2), n_J[0] * n_J[1] * (1 - CJa2) - n_J[2] * SJa2, n_J[0] * n_J[2] * (1 - CJa2) + n_J[1] * SJa2],\\\n [n_J[1] * n_J[0] * (1 - CJa2) + n_J[2] * SJa2, CJa2 + (n_J[1]**2) * (1 - CJa2), n_J[1] * n_J[2] * (1 - CJa2) - n_J[0] * SJa2],\\\n [n_J[2] * n_J[0] * (1 - CJa2) - n_J[1] * SJa2, n_J[2] * n_J[1] * (1 - CJa2) + n_J[0] * SJa2, CJa2 + (n_J[2]**2) * (1 - CJa2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n d_etha_J = (2 * sin_Jalfa / l_Jalfa) - (2 * math.sqrt(1 - np.dot(x041,x047)**2/(LA.norm(x041)*LA.norm(x047))**2)/(LA.norm(x041 - x047)))\n\n n_i = np.cross(x07,x03)/LA.norm(np.cross(x03,x07)) \n cos_ialfa = np.dot(x03,x07) / (LA.norm(x03) * LA.norm(x07))\n sin_ialfa = math.sqrt(1 - cos_ialfa**2)\n Cia2 = math.sqrt((cos_ialfa + 1)/2)\n Sia2 = math.sqrt((1 - cos_ialfa)/2)\n ziC = (_Ps[5][2]-_Ps[3][2])/(_Ps[5][0]-_Ps[3][0]) * (center_pos[0]-_Ps[3][0]) + _Ps[3][2]\n if center_pos[2] > ziC:\n e_i = np.dot(np.array([[Cia2 + (n_i[0]**2) * (1 - Cia2), n_i[0] * n_i[1] * (1 - Cia2) + n_i[2] * Sia2, n_i[0] * n_i[2] * (1 - Cia2) - n_i[1] * Sia2],\\\n [n_i[1] * n_i[0] * (1 - Cia2) - n_i[2] * Sia2, Cia2 + (n_i[1]**2) * (1 - Cia2), n_i[1] * n_i[2] * (1 - Cia2) + n_i[0] * Sia2],\\\n [n_i[2] * n_i[0] * (1 - Cia2) + n_i[1] * Sia2, n_i[2] * n_i[1] * (1 - Cia2) - n_i[0] * Sia2, Cia2 + (n_i[2]**2) * (1 - Cia2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n else:\n e_i = np.dot(np.array([[Cia2 + (n_i[0]**2) * (1 - Cia2), n_i[0] * n_i[1] * (1 - Cia2) - n_i[2] * Sia2, n_i[0] * n_i[2] * (1 - Cia2) + n_i[1] * Sia2],\\\n [n_i[1] * n_i[0] * (1 - Cia2) + n_i[2] * Sia2, Cia2 + (n_i[1]**2) * (1 - Cia2), n_i[1] * n_i[2] * (1 - Cia2) - n_i[0] * Sia2],\\\n [n_i[2] * n_i[0] * (1 - Cia2) - n_i[1] * Sia2, n_i[2] * n_i[1] * (1 - Cia2) + n_i[0] * Sia2, Cia2 + (n_i[2]**2) * (1 - Cia2)]]), (_Ps[5] - center_pos)/LA.norm(_Ps[5] - center_pos))\n d_etha_i = (2 * sin_ialfa / LA.norm(x07 - x03)) - (2 * math.sqrt(1 - np.dot(x043,x045)**2/(LA.norm(x043)*LA.norm(x045))**2)/(LA.norm(x043 - x045)))\n\n\n l_J = (Lj20 + Lj06 + Lj68 + Lj82) / 4\n h = (h_iminus + h_iplus + h_Jminus + h_Jplus) / 4\n I = (l_J * h**3) / 12\n M_i = (young_modulus * I * (d_etha_i + poisson * d_etha_J)/(1 - poisson**2))\n M_J = (young_modulus * I * (d_etha_J + poisson * d_etha_i)/(1 - poisson**2))\n #曲げ力\n F_Bi = M_i / LA.norm(x03) + M_i / LA.norm(x07) * e_i\n F_BJ = M_J / LA.norm(x01) + M_J / LA.norm(x05) * e_j\n #空気力\n # S = (S_iminus + S_iplus + S_Jminus + S_Jplus) / 4\n # F_A = p * S\n F_A = np.array([0.0, 0.0, -0.1]) * _a\n\n # 運動方程式(支配方程式)\n S_0 = (S_iminus0 + S_iplus0 + S_Jminus0 + S_Jplus0) / 4\n F_T = F_T41 + F_T43 + F_T45 + F_T47\n F_S = F_S41 + F_S43 + F_S45 + F_S47\n F_B = F_Bi + F_BJ\n return (F_T + F_S + F_B + F_A) / (rho * h_0 * S_0) - c * _vs", "def to_s_matrix(w,v):\n pass", "def ssa_decompose(y, dim):\n n = len(y)\n t = n - (dim - 1)\n\n yy = linalg.hankel(y, np.zeros(dim))\n yy = yy[:-dim + 1, :] / np.sqrt(t)\n\n # here we use gesvd driver (as in Matlab)\n _, s, v = linalg.svd(yy, full_matrices=False, lapack_driver='gesvd')\n\n # find principal components\n vt = np.matrix(v).T\n pc = np.matrix(yy) * vt\n\n return np.asarray(pc), s, np.asarray(vt)", "def visualize_svd(A):\r\n theta = np.linspace(0,2*np.pi,200)\r\n #Set S as unit circle\r\n S = np.array([np.cos(theta), np.sin(theta)])\r\n #Set E as orthogonal basis\r\n E = np.array([[1,0,0],[0,0,1]])\r\n U,Si,Vh = la.svd(A)\r\n Si = np.diag(Si)\r\n\r\n #plot original S and E\r\n first = plt.subplot(221)\r\n first.plot(S[0], S[1])\r\n first.plot(E[0], E[1])\r\n first.axis(\"equal\")\r\n\r\n #rotate S,E and plot S,E\r\n second = plt.subplot(222)\r\n vhs = Vh@S\r\n vhe = Vh@E\r\n second.plot(vhs[0], vhs[1])\r\n second.plot(vhe[0], vhe[1])\r\n second.axis(\"equal\")\r\n\r\n #scale S,E and plot S,E\r\n third = plt.subplot(223)\r\n sivhs = Si@vhs\r\n sivhe = Si@vhe\r\n third.plot(sivhs[0],sivhs[1])\r\n third.plot(sivhe[0],sivhe[1])\r\n third.axis([-4,4,-4,4])\r\n\r\n #rotate S,E and plot S,E\r\n fourth = plt.subplot(224)\r\n usivhs = U@sivhs\r\n usivhe = U@sivhe\r\n fourth.plot(usivhs[0],usivhs[1])\r\n fourth.plot(usivhe[0],usivhe[1])\r\n fourth.axis([-4,4,-4,4])\r\n\r\n plt.show()", "def _compute_s_matrix(self, system_std_dev: tf.Tensor) -> None:\n self.s_matrix_inv = self._kronecker_product(\n tf.diag(tf.reshape(tf.ones_like(system_std_dev, dtype=tf.float64)\n / system_std_dev, [-1])),\n tf.eye(self.n_points_int, dtype=tf.float64))\n return", "def S1(A,B):\n C = np.subtract(A,B)\n s = np.linalg.svd(C)[1]\n return (np.sum(s))", "def spcaold(a, s, k, d):\n\n p = a.shape[0]\n X = np.zeros((p, k))\n\n for l in range(k):\n # 1\n [w, V] = linalg.eigh(a)\n idx = w.argsort()\n w = w[idx]\n V = V[:, idx]\n\n # 2\n xprime, value = spannogram(V[:, -d:], w[-d:])\n\n # 4\n idx = np.abs(xprime).argsort(axis=0)\n for i in idx[:-s]:\n xprime[i] = 0\n\n X[:, l] = xprime[:, 0]\n\n # 5\n for i in idx[-s:]:\n a[i, :] = 0\n a[:, i] = 0\n\n return X", "def __init__(self, A, rank=0):\r\n _u, _s, _v = np.linalg.svd(A, full_matrices=0)\r\n \r\n self.rank = rank\r\n\r\n self.U = _u[:,:self.rank].copy()\r\n self.S = _s[:self.rank].copy()\r\n self.SI = np.matrix(np.diag(self.S)).getI()\r\n self.VT = _v[:self.rank,:].copy()\r\n \r\n self._var = [ e/(_s**2).sum() for e in (_s**2).cumsum() ][self.rank-1]", "def calculate_posvij_matrices(main_tetrad_ark):\n\n # Import all the possible solutions to the Vij matrices\n vij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n vij_matrices = []\n\n print(\" \")\n print(\" Calculating Vij matrices\")\n print(\" \")\n # for i in range(0, len(main_tetrad_ark)):\n for i in range(0, len(vij_possibilities)):\n tet_i = [x[1] for x in main_tetrad_ark[i]]\n tri_tet = [np.transpose(i) for i in tet_i]\n print(\"# ********************************\")\n # print(\" \")\n print(\"MATRIX i: \", i)\n print(\" \")\n for j in range(0, len(main_tetrad_ark)):\n tet_j = [x[1] for x in main_tetrad_ark[j]]\n trj_tet = [np.transpose(j) for j in tet_j]\n vij_temp = []\n # print(\"# ********************************\")\n print(\" \")\n print(\"MATRIX j: \", j)\n temp_zero = np.zeros((4,4), dtype=int)\n for x in range(0,len(tet_i)):\n test_1half = np.dot(tri_tet[x],tet_j[x])\n test_2half = np.dot(trj_tet[x],tet_i[x])\n test_difs = np.subtract(test_1half, test_2half)\n # print(\" \")\n # print(test_difs)\n temp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n vij_temp.append(temp_mat)\n # print(\"\")\n temp_add1 = np.add(vij_temp[0], vij_temp[1])\n temp_add2 = np.add(temp_add1, vij_temp[2])\n tempf = np.add(temp_add2, vij_temp[3])\n # tempf = np.divide(temp_add3, 2)\n for ijx in vij_possibilities:\n if np.array_equal(temp_addf, ijx[0]):\n print(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n print(\"l-solution found:\", ijx[1])\n print(temp_addf)\n print(\"\")\n print(ijx[0])\n if np.array_equal(temp_addf, temp_zero):\n pass\n else:\n vij_matrices.append(temp_addf)\n # print(\"\")\n print(temp_addf)\n # vij_matrices.append(temp_addf)\n vijmats_size = sys.getsizeof(vij_matrices)\n print(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n print(\"Length of Vij Matrices\")\n print(len(vij_matrices))\n print(vij_matrices)\n pass", "def compute_matrix(self):\n\n fac = self.a / self.dx ** 2\n\n diagonal = np.ones(self.nx) * 2 * fac\n lower = np.ones(self.nx - 1) * -fac\n upper = np.ones(self.nx - 1) * -fac\n\n matrix = sp.diags(\n diagonals=[diagonal, lower, upper],\n offsets=[0, -1, 1], shape=(self.nx, self.nx),\n format='csr')\n\n return matrix", "def decompress_svd(size:tuple, svd_u, svd_s, svd_vh):\r\n m, n = size[0:2]\r\n u = np.zeros((3, m, m), dtype=np.float64)\r\n s = np.zeros((3, min(m, n)), dtype=np.float64)\r\n vh = np.zeros((3, n, n), dtype=np.float64)\r\n\r\n _,p = svd_s.shape\r\n u[:, 0:m, 0:p] = svd_u[:, :, :]\r\n s[:, 0:p] = svd_s[:, :]\r\n vh[:, 0:p, 0:n] = svd_vh[:, :, :]\r\n\r\n # SVD equation: A = U * D * VH\r\n img_svd = np.zeros(size, dtype=np.uint8)\r\n for k in range(3):\r\n d = np.zeros((m, n), dtype=np.float64)\r\n d[:min(m, n), :min(m, n)] = np.diag(s[k, :])[:, :]\r\n img_svd[:,:,k] = np.dot(np.dot(u[k,:,:], d), vh[k,:,:])\r\n return img_svd", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def svd_shrink(X, tau):\n U,s,V = np.linalg.svd(X, full_matrices=False)\n return np.dot(U, np.dot(np.diag(shrink(s, tau)), V))", "def calculate_posvij_matrices(main_tetrad_ark):\n\n\t# Import all the possible solutions to the Vij matrices\n\tvij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n\tvij_matrices = []\n\n\tprint(\"\t\t\t\t\t\t\t\")\n\tprint(\"\tCalculating Vij matrices\")\n\tprint(\"\t\t\t\t\t\t\t\")\n\t# for i in range(0, len(main_tetrad_ark)):\n\tfor i in range(0, len(vij_possibilities)):\n\t\ttet_i = [x[1] for x in main_tetrad_ark[i]]\n\t\ttri_tet = [np.transpose(i) for i in tet_i]\n\t\tprint(\"# ********************************\")\n\t\t# print(\"\t\t\t\t\t\t\t\t \")\n\t\tprint(\"MATRIX i: \", i)\n\t\tprint(\"\t\t\t\t\t\t\t\t \")\n\t\tfor j in range(0, len(main_tetrad_ark)):\n\t\t\ttet_j = [x[1] for x in main_tetrad_ark[j]]\n\t\t\ttrj_tet = [np.transpose(j) for j in tet_j]\n\t\t\tvij_temp = []\n\t\t\t# print(\"# ********************************\")\n\t\t\tprint(\"\t\t\")\n\t\t\tprint(\"MATRIX j: \", j)\n\t\t\ttemp_zero = np.zeros((4,4), dtype=int)\n\t\t\tfor x in range(0,len(tet_i)):\n\t\t\t\ttest_1half = np.dot(tri_tet[x],tet_j[x])\n\t\t\t\ttest_2half = np.dot(trj_tet[x],tet_i[x])\n\t\t\t\ttest_difs = np.subtract(test_1half, test_2half)\n\t\t\t\t# print(\" \")\n\t\t\t\t# print(test_difs)\n\t\t\t\ttemp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n\t\t\t\tvij_temp.append(temp_mat)\n\t\t\t\t# print(\"\")\n\t\t\ttemp_add1 = np.add(vij_temp[0], vij_temp[1])\n\t\t\ttemp_add2 = np.add(temp_add1, vij_temp[2])\n\t\t\ttempf = np.add(temp_add2, vij_temp[3])\n\t\t\t# tempf = np.divide(temp_add3, 2)\n\t\t\tfor ijx in vij_possibilities:\n\t\t\t\tif np.array_equal(temp_addf, ijx[0]):\n\t\t\t\t\tprint(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n\t\t\t\t\tprint(\"l-solution found:\", ijx[1])\n\t\t\t\t\tprint(temp_addf)\n\t\t\t\t\tprint(\"\")\n\t\t\t\t\tprint(ijx[0])\n\t\t\tif np.array_equal(temp_addf, temp_zero):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tvij_matrices.append(temp_addf)\n\t\t\t# print(\"\")\n\t\t\tprint(temp_addf)\n\t\t\t# vij_matrices.append(temp_addf)\n\t\tvijmats_size = sys.getsizeof(vij_matrices)\n\t\tprint(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n\tprint(\"Length of Vij Matrices\")\n\tprint(len(vij_matrices))\n\tpass", "def prepare4iCSD(self, index=0):\n if (\n self.surveys[index].x0_ini_guess == True\n or self.surveys[index].x0_prior == True\n ):\n self._estimateM0_(index=index)\n\n # CONSTRAINSTED INVERSION\n # -----------------------\n # Create vector with weight (data weigth, constrainsts weight and regularisation weigth)\n if self.surveys[index].x0_prior == True: # if relative smallness\n self.reg_w_0_b, self.reg_w_0_A = regularize_w(\n self.surveys[index].reg_A,\n self.wr,\n self.surveys[index].x0_prior,\n x0=self.x0,\n )\n\n # stack weight matrix\n self.W_s_A, self.W_s_b = stack_w(\n self.surveys[index].obs_w,\n self.surveys[index].con_w,\n self.surveys[index].x0_prior,\n reg_w_0_A=self.reg_w_0_A,\n reg_w_0_b=self.reg_w_0_b,\n )\n\n # apply weight to A and b (data weigth, constrainsts weight and regularisation weigth)\n self.A_w = weight_A(\n self.surveys[index].x0_prior, self.surveys[index].A_s, W_s_A=self.W_s_A\n )\n self.b_w = weight_b(\n self.surveys[index].x0_prior, self.surveys[index].b_s, W_s_b=self.W_s_b\n )\n\n # UNCONSTRAINSTED INVERSION\n # -------------------------\n else:\n self.reg_w = regularize_w(self.surveys[index].reg_A, self.wr, self.x0_prior)\n\n self.W_s = stack_w(\n self.surveys[index].obs_w,\n self.surveys[index].con_w,\n self.x0_prior,\n reg_w=self.reg_w,\n )\n self.A_w = weight_A(self.x0_prior, self.surveys[index].A_s, W_s=self.W_s)\n self.b_w = weight_b(self.x0_prior, self.surveys[index].b_s, W_s=self.W_s)", "def get_prox_nuclear(self, x_matrix, scale_factor, prev_u0=None):\n if self.gamma_num_s is None or self.gamma_num_s > 18:\n u, s, vt = np.linalg.svd(x_matrix)\n else:\n tol = scale_factor/10.\n try:\n k = max(1, self.gamma_num_s)\n if prev_u0 is not None:\n u, s, vt = sp.sparse.linalg.svds(x_matrix, v0=prev_u0, k=k, which=\"LM\", tol=tol)\n else:\n u, s, vt = sp.sparse.linalg.svds(x_matrix, k=k, which=\"LM\", tol=tol)\n u = np.matrix(u)\n vt = np.matrix(vt)\n except ValueError as e:\n print \"value error svd\", e\n u, s, vt = np.linalg.svd(x_matrix)\n\n num_nonzero_orig = (np.where(s > scale_factor))[0].size\n thres_s = np.maximum(s - scale_factor, 0)\n nuc_norm = np.linalg.norm(thres_s, ord=1)\n self.gamma_num_s = (np.where(thres_s > 0))[0].size\n\n if s.size > 0:\n prev_u0 = u[:,0]\n else:\n prev_u0 = None\n\n return u * np.diag(thres_s) * vt, nuc_norm, prev_u0", "def svd_images(imagear):\n n = np.shape(imagear)[1]\n u, s, v = np.linalg.svd(imagear/np.sqrt(n-1),full_matrices=False)\n \n return(u, s, v)", "def get_sigmazinv(self):\n\n try:\n out = np.diag(1 / self.eigen_x)\n except AttributeError:\n self.get_eigen(predictor=True)\n out = np.diag(1 / self.eigen_x)\n return out", "def regular(P):\n try:\n cols = P.shape[0]\n ans = np.ones((1, cols))\n # eq = np.matmul(ans, P)\n # s = np.array(np.arange(1, cols + 1))\n eq = np.vstack([P.T - np.identity(cols), ans])\n # va, vec = np.linalg .eig(P)\n results = np.zeros((cols, 1))\n results = np.vstack([results, np.array([1])])\n statetionary = np.linalg.solve(eq.T.dot(eq), eq.T.dot(results)).T\n # print(statetionary)\n # print(np.argwhere(statetionary < 0))\n if len(np.argwhere(statetionary < 0)) > 0:\n return None\n return statetionary\n except Exception as e:\n return None", "def lowest_rank_approx(A,e):\n \n \n U,s,Vh=la.svd(A,full_matrices=False)\n t=s.copy()\n t[t>e]=0\n i=t.nonzero()[0][0]\n \n return U[:,:i].dot(np.diag(s[:i])).dot(Vh[:i,:])", "def compute_svd(self,data,k):\n m, n =data.shape\n n = self.comm1.allreduce(n)\n print(m,n)\n if k==-1:\n k = min(m,n)\n args = parse()\n args.m,args.n,args.k,args.comm = m,n,k,self.comms\n args.eps = np.finfo(data.dtype).eps\n if args.m<args.n: args.p_r,args.p_c = 1,self.size\n dsvd = DistSVD(args, data)\n singularValues, U, V = dsvd.svd()\n rel_error = dsvd.rel_error(U, np.diag(singularValues), V)\n if self.global_rank==0: print('relative error is:', rel_error )\n return singularValues,U,V,rel_error", "def MATSOL(N,A):\r\n\r\n X = np.zeros((N+1),dtype=float) # X.shape = N+1\r\n NROW = np.arange(0,N+1,dtype=int) # NROW.shape = N+1\r\n\r\n for i in np.arange(N): # loop through rows\r\n AMAX = np.max(np.abs(A[NROW[i:],i])) # max value for column, all later rows\r\n ip = np.argmax(np.abs(A[NROW[i:],i]))+i # index of above\r\n \r\n if(abs(AMAX) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n if(NROW[i] != NROW[ip]): # swap rows\r\n NC = NROW[i].copy()\r\n NROW[i] = NROW[ip].copy()\r\n NROW[ip] = NC.copy()\r\n \r\n \r\n COEF = A[NROW[i+1:],i]/A[NROW[i],i] # normalize column values by maximum magnitude value (AMAX > 0)\r\n A[NROW[i+1:],i+1:] = A[NROW[i+1:],i+1:] - np.dot(COEF[:,None],A[NROW[i],i+1:][None,:]) # normalize/reduce matrix\r\n \r\n \r\n if(abs(A[NROW[N],N]) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n X[N] = A[NROW[N],N+1]/A[NROW[N],N] # downstream edge\r\n i = N-1\r\n while (i >= 0):\r\n# SUMM = 0.0\r\n# j = i+1\r\n \r\n SUMM = np.sum(A[NROW[i],i+1:N+1]*X[i+1:N+1]) # do not include final column\r\n \r\n# while (j <= N-1):\r\n# SUMM = A[NROW[i],j]*X[j] + SUMM\r\n# j = j+1\r\n # print(SUMM,SUMM2)\r\n \r\n X[i] = (A[NROW[i],N+1] - SUMM)/A[NROW[i],i]\r\n i = i-1\r\n return X", "def TR_algo8(self, h):\n ve = 0\n vd = self._vd\n k = 0\n p = [0,]*self._N\n m = max(self._compact_M)\n vM = sum(self._compact_M)\n for i in range(m-1, -1, -1):\n mu = self.extract_mask(i)\n mu_norm = sum([bit_component(mu, j) for j in range(self._N)])\n mu = rotate_right(mu, vd+1)\n pi = rotate_right(ve, vd+1) & (~mu & 2**self._N-1)\n r = [bit_component(h, vM - k - (j+1)) for j in range(mu_norm)][::-1]\n r = sum( [rx*2**j for j, rx in enumerate(r)] )\n k = k + mu_norm\n w = gcr_inv(r, mu, pi)\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(self._N):\n p[j] |= bit_component(l, j) << i\n ve = ve ^ (rotate_left(e(w), vd+1))\n vd = (vd + d(w) + 1) % self._N\n return p", "def svd(matrix, rank=None):\n if matrix.ndim != 2:\n raise ValueError('Input should be a two-dimensional array. matrix.ndim is {} != 2'.format(matrix.ndim))\n dim_1, dim_2 = matrix.shape\n if dim_1 <= dim_2:\n min_dim = dim_1\n else:\n min_dim = dim_2\n\n if rank is None or rank >= min_dim:\n # Default on standard SVD\n U, S, V = scipy.linalg.svd(matrix)\n U, S, V = U[:, :rank], S[:rank], V[:rank, :]\n return U, S, V\n\n else:\n # We can perform a partial SVD\n # First choose whether to use X * X.T or X.T *X\n if dim_1 < dim_2:\n S, U = scipy.sparse.linalg.eigsh(np.dot(matrix, matrix.T), k=rank, which='LM')\n S = np.sqrt(S)\n V = np.dot(matrix.T, U * 1 / S[None, :])\n else:\n S, V = scipy.sparse.linalg.eigsh(np.dot(matrix.T, matrix), k=rank, which='LM')\n S = np.sqrt(S)\n U = np.dot(matrix, V) * 1 / S[None, :]\n\n # WARNING: here, V is still the transpose of what it should be\n U, S, V = U[:, ::-1], S[::-1], V[:, ::-1]\n return U, S, V.T", "def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv", "def getEssentialMatrix(K, F):\n E = np.dot(K.T, np.dot(F, K))\n u, s, v = np.linalg.svd(E)\n\n # We correct the singular values of the E matrix\n s_new = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]]).reshape(3, 3)\n final_E = np.dot(u, np.dot(s_new, v))\n return final_E", "def normal_modes(self, finite_step):\n\n # Get the mass weighted hessian matrix in amu\n hessian = self.calculate_hessian(finite_step)\n\n # Now get the eigenvalues and vectors\n e_vals, e_vectors = np.linalg.eig(hessian)\n print(e_vals)\n print(e_vectors)", "def _set_ls_matrices(self):\n zz_t = self.z_matrix * self.z_matrix.transpose()\n l, s, l_t = np.linalg.svd(zz_t)\n s[self.p:] = 0\n self.l_matrix = np.matrix(l)\n self.s_matirx = np.matrix(np.diag(s))", "def truncated_svd(A,k=None):\n \n \n \n AHA=np.conj(A).T.dot(A)\n evals,evecs=la.eig(AHA)\n order=np.argsort(evals)\n\n evals=evals[order][::-1].copy()\n evecs=evecs.T[order][::-1].copy()\n m,n=AHA.shape\n \n tol=1e-12\n Vh=[]\n for i in xrange(0,m):\n\t\t if np.abs(evals[i])>=tol:\n\t \t\tVh+=[evecs[i]]\n \n Vh=np.array(Vh)\n s=np.sqrt(evals[:Vh.shape[0]])\n U=[]\n for i in xrange(0,len(s)):\n U+=[(1./s[i])*A.dot(Vh[i])]\n U=np.array(U).T\n \n return U,s,Vh", "def check_non_singular(self, Am):\r\n det = self.detf(Am)\r\n if det != 0:\r\n return det\r\n else:\r\n raise ArithmeticError(\"Singular Matrix!\")", "def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr", "def project_Lnuc_ball(X: \"fasta.linalg.Matrix\", t: float) -> \"fasta.linalg.Matrix\":\n U, s, V = la.svd(X)\n\n # Construct the diagonal matrix of singular values, S, as a shrunken version of the original signal values\n S = np.zeros(X.shape)\n S[:len(s),:len(s)] = np.diag(shrink(s, t))\n return U @ S @ V", "def matI(a):\n shape=matShape(a)\n if shape[0]!=shape[1]: raise ValueError\n n=shape[0]\n ret=matZeros((n,n*2))\n for i in range(n):\n for j in range(n):\n matSet(ret,i,j,matGet(a,i,j))\n for i in range(n):\n matSet(ret,i,i+n,1)\n for row in range(n):\n rm=row\n ap=abs(matGet(ret,rm,row))\n for rint in range(row+1,n):\n p=abs(matGet(ret,rint,row))\n if ap<p:\n ap=p\n rm=rint\n if 0.000000001 > ap:\n return matCopy(a) # Not invertible\n di=matGet(ret,rm,row)\n if rm!=row:\n for i in range(n*2):\n t=matGet(ret,rm,i)\n matSet(ret,rm,i,matGet(ret,row,i))\n matSet(ret,row,i,t)\n idi=1.0/di\n for rint in range(row+1,n):\n f=idi*matGet(ret,rint,row)\n if f!=0:\n for co in range(row,n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-f*matGet(ret,row,co))\n row=n-1\n while row>=0:\n ic=1.0/matGet(ret,row,row)\n for rint in range(row):\n icx=ic*matGet(ret,rint,row)\n if icx!=0:\n for co in range(row, n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-icx*matGet(ret,row,co))\n matSet(ret,row,row,ic*matGet(ret,row,row))\n for co in range(n,n*2):\n matSet(ret,row,co,ic*matGet(ret,row,co))\n row-=1\n return matPart(ret,0,n,n,n*2)", "def initialize (A, S):\n ind = get_indicator_vector(A[S,:][:,S])\n x_v = np.zeros(shape=A.shape[0])\n x_v[S] = ind\n # Candidate set to contain only the edges on the periphery of As\n C = []\n for v in S:\n for u in A[v, :].nonzero()[1]:\n if (x_v[u] == 0):\n C.append((v, u))\n return x_v, C", "def fir(timeseries, design):\r\n X = np.matrix(design)\r\n y = np.matrix(timeseries)\r\n h = np.array(linalg.pinv(X.T * X) * X.T * y.T)\r\n return h", "def zsx_0(self):\n return self.glb[iz0]/self.glb[ix0]", "def smat(v):\n \n k = len(v)\n n = TriLengthToSquare(k)\n \n A = np.zeros([n,n])\n A[np.triu_indices(n)] = v\n A[np.triu_indices(n,1)] *= 2 / np.sqrt(2)\n return (A + A.T) / 2", "def _vect_matrix_inverse(A):\n identity = np.identity(A.shape[2], dtype=A.dtype)\n return np.array([np.linalg.solve(x, identity) for x in A])", "def initialize(self):\n self.U = range(self.K)\n self.H = np.identity(self.rank)\n temp = 0\n self.S = np.zeros([self.rank, self.rank, self.K])\n for k in range(self.K):\n self.S[:, :, k] = np.identity(self.rank)\n temp += self.X[k].T.dot(self.X[k])\n [eigval, eigvec] = np.linalg.eig(temp)\n self.V = eigvec[:, range(self.rank)]", "def _compute_terms_to_make_leading_submatrix_singular(hessian_info, k):\n hessian_plus_lambda = hessian_info.hessian_plus_lambda\n upper_triangular = hessian_info.upper_triangular\n n = len(hessian_plus_lambda)\n\n delta = (\n np.sum(upper_triangular[: k - 1, k - 1] ** 2)\n - hessian_plus_lambda[k - 1, k - 1]\n )\n\n v = np.zeros(n)\n v[k - 1] = 1\n\n if k != 1:\n v[: k - 1] = solve_triangular(\n upper_triangular[: k - 1, : k - 1], -upper_triangular[: k - 1, k - 1]\n )\n\n return delta, v", "def svd_S(T):\n try:\n S = splinalg.svd(T, full_matrices=False, compute_uv=False)\n except splinalg.LinAlgError:\n S = splinalg.svd(T, full_matrices=False, lapack_driver='gesvd', compute_uv=False)\n return S", "def Sinv(self):\n Wplus = np.matrix(np.diag(self.w**2 + 1.0))\n return self.priorSinvh * self.V.T * Wplus * self.V * self.priorSinvh", "def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # SVD Factorization\n U, s, Vt = scipy.linalg.svd(A, full_matrices=False)\n\n # Remove dimensions related with very small singular values\n U = U[:, s > tol]\n Vt = Vt[s > tol, :]\n s = s[s > tol]\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(z)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n z = U.dot(aux2)\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = V 1/s U.T x\n aux1 = U.T.dot(x)\n aux2 = 1/s*aux1\n z = Vt.T.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def svd(a, full_matrices=False, compute_uv=True):\n #-> gesvd\n a, cv, isMatrix = get_computation_matrix(a)\n t_dtype = TypeUtil.to_numpy_dtype(a.get_dtype())\n (u, s, vt, _) = gesvd(a, compute_uv=compute_uv, \\\n full_matrices=full_matrices, lwork=0,\\\n overwrite_a=1, dtype=t_dtype)\n if not compute_uv:\n if cv:\n return s.to_numpy_array() # ndarray\n else:\n return s # FrovedisVector\n else:\n if cv and isMatrix:\n return (u.to_numpy_matrix(), s.to_numpy_array(),\\\n vt.to_numpy_matrix())\n elif cv and not isMatrix:\n return (u.to_numpy_array(), s.to_numpy_array(),\\\n vt.to_numpy_array())\n else:\n return (u, s, vt)", "def pca(x):\n\t\n\tx = (x - x.mean(axis = 0)) # Subtract the mean of column i from column i, in order to center the matrix.\n\t\n\tnum_observations, num_dimensions = x.shape\n\t\n\t# Often, we have a large number of dimensions (say, 10,000) but a relatively small number of observations (say, 75). In this case, instead of directly computing the eigenvectors of x^T x (a 10,000 x 10,000 matrix), it's more efficient to compute the eigenvectors of x x^T and translate these into the eigenvectors of x^T x by using the transpose trick. \n\t# The transpose trick says that if v is an eigenvector of M^T M, then Mv is an eigenvector of MM^T.\n\t# We arbitrarily select \"100\" as the switching threshold. Another approach is to switch by comparing num_observations and num_dimensions.\n\tif num_dimensions > 100:\n\t\teigenvalues, eigenvectors = linalg.eigh(dot(x, x.T))\n\t\tv = (dot(x.T, eigenvectors).T)[::-1] # Unscaled, but the relative order is still correct.\n\t\ts = sqrt(eigenvalues)[::-1] # Unscaled, but the relative order is still correct.\n\telse:\n\t\tu, s, v = linalg.svd(x, full_matrices = False)\n\t\t\n\treturn v, s", "def project(self, new_expn):\n \"\"\"\n data = numpy.array(self.parent.serialisedArrayDataList)\n import sklearn\n skpca = sklearn.decomposition.PCA()\n X_r = skpca.fit(data).transform(data)\n \n self.__v = X_r\n \"\"\"\n # old martrisx\n matrix = numpy.array(self.parent.serialisedArrayDataList)\n U, S, V = numpy.linalg.svd(matrix.T, full_matrices=False)\n \n print(\"matrix\", matrix.shape)\n \n # set-ups\n self.parent = new_expn\n if self.rowwise:\n self.labels = new_expn[self.label_key]\n else:\n self.labels = new_expn.getConditionNames()\n \n matrix = numpy.array(self.parent.serialisedArrayDataList)\n S = numpy.diag(S)\n print(\"U\", U.shape)\n print(\"V\", V.shape)\n print(\"S\", S.shape)\n print(\"matrix\", matrix.shape)\n \n #data = np.dot(U, np.dot(S, V))\n #X_transformed = np.dot(X_transformed, self.V.T)\n print(numpy.dot(S, V).shape)\n\n pr = numpy.dot(matrix, S)\n print(\"pr\", pr.shape)\n #y = x*W;\n #y0 = Y(1,:);\n #sum(abs(y0 - y)) %\n \n # I want a new v. U and D are the same.\n \n self.__v = pr\n \n print(U)\n print()\n print(pr)\n \n print(numpy.allclose(U, pr)) \n print(numpy.allclose(matrix.T, numpy.dot(U, numpy.dot(S, V))))\n return(True)" ]
[ "0.66048837", "0.6466162", "0.6259937", "0.6250825", "0.62505597", "0.62274474", "0.6104567", "0.6089218", "0.6025379", "0.5982765", "0.597328", "0.590215", "0.58907986", "0.58582675", "0.58575904", "0.584388", "0.58408606", "0.58376825", "0.581499", "0.58008623", "0.5792866", "0.57560784", "0.5754681", "0.573956", "0.5721776", "0.5710557", "0.56707674", "0.5659692", "0.5650968", "0.5639219", "0.5630885", "0.5629088", "0.5626873", "0.5615836", "0.5613928", "0.55852765", "0.5566613", "0.5562814", "0.55594915", "0.5549646", "0.55289227", "0.55209863", "0.5490764", "0.54743916", "0.5464799", "0.54568446", "0.5453528", "0.5440958", "0.5440758", "0.54392815", "0.54341847", "0.5433504", "0.54313093", "0.54305494", "0.5421267", "0.54145354", "0.54045105", "0.53959453", "0.5387335", "0.53868663", "0.53835815", "0.5366867", "0.5357951", "0.53560305", "0.53364813", "0.5334518", "0.5326498", "0.53012824", "0.5294708", "0.5294111", "0.5280381", "0.52732414", "0.5269597", "0.52694637", "0.5258422", "0.52538425", "0.5249476", "0.52431375", "0.52352005", "0.52111375", "0.5202409", "0.51951665", "0.518155", "0.5176293", "0.5174969", "0.5166565", "0.51636213", "0.5161851", "0.5161104", "0.51589423", "0.5157191", "0.51358396", "0.51332694", "0.5129083", "0.51276344", "0.5126066", "0.51243526", "0.5123419", "0.5115548", "0.5110807" ]
0.68444854
0
Leverages the 8point algorithm and implement RANSAC algorithm to find the inliers and the best fundamental matrix
def getInlierRANSAC(pts1, pts2): # global finalFundamentalMatrix iterations = 50 threshold = 0.01 max_count = 0 n = len(pts1) finalFundamentalMatrix = np.zeros((3, 3)) for i in range(iterations): count = 0 idx = random.sample(range(n - 1), 8) left_pts = pts1[idx] right_pts = pts2[idx] F = computeFundamentalMatrix(left_pts, right_pts) left_feature_inlier = [] right_feature_inlier = [] # print("Sample index: ", len(idx)) for j in range(0, n): homogeneous_right = np.array([pts2[j, 0], pts2[j, 1], 1]) homogeneous_left = np.array([pts1[j, 0], pts1[j, 1], 1]) fit = np.dot(homogeneous_right.T, np.dot(F, homogeneous_left)) # print("Fit for iteration ", i," ", np.abs(fit)) if np.abs(fit) < threshold: left_feature_inlier.append(pts1[j]) right_feature_inlier.append(pts2[j]) count = count + 1 # print('Inlier count', count) inlier_Left = np.array(left_feature_inlier) inlier_Right = np.array(right_feature_inlier) if count > max_count: max_count = count finalFundamentalMatrix = F final_inlier_Left = inlier_Left final_inlier_Right = inlier_Right return finalFundamentalMatrix, final_inlier_Left, final_inlier_Right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ransac(data, hypothesis, metric, sample_size, num_iter, inlier_thresh):\n N,d = data.shape\n best_frac, best_hypothesis, best_mask = 0, None, None\n for i in range(num_iter):\n js = np.random.choice(N,size=sample_size,replace=False)\n hypothesis_elements = data[js,:]\n H = hypothesis(hypothesis_elements)\n badness = np.array([metric(row,H) for row in data])\n inlier_mask = (badness<inlier_thresh)\n inlier_frac = inlier_mask.mean()\n if inlier_frac>best_frac:\n best_frac, best_hypothesis, best_mask = inlier_frac,H,inlier_mask\n # print(H)\n # print(inlier_mask)\n return best_hypothesis, best_mask", "def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r", "def estimate_F_ransac(corr, num_iter, inlier_thresh):\n _, inlier_mask = ransac(corr, estimate_F, sym_epipolar_dist, 8, num_iter, inlier_thresh)\n # inlier_mask = np.ones(9)\n # inlier_mask[0] = 0\n F = estimate_F(corr[inlier_mask.astype(np.bool)])\n return F", "def ransac(matches, kp1, kp2, s=4, threshold=3, maxIterations=2000, returnMatches=False, inlierRatio=0.05, ransacRatio=0.6):\n\n sizes_kp1 = [kp1[dt[0].queryIdx].size for dt in matches]\n sizes_kp2 = [kp1[dt[0].trainIdx].size for dt in matches]\n tup_matches_kp1 = [kp1[dt[0].queryIdx].pt for dt in matches]\n tup_matches_kp2 = [kp2[dt[0].trainIdx].pt for dt in matches]\n matches_kp1 = np.array([[h for h in kp] + [1] for kp in tup_matches_kp1])\n matches_kp2 = np.array([[h for h in kp] + [1] for kp in tup_matches_kp2])\n\n cnt_matches = len(matches)\n\n max_matches = []\n max_p1, max_p2 = [], []\n max_p1_sizes, max_p2_sizes = [], []\n max_total = 0\n\n for iter in range(maxIterations):\n # Find Homography based on random sample\n data = random.sample(matches, s)\n data_p1 = np.array([matches_kp1[dt[0].queryIdx] for dt in data])\n data_p2 = np.array([matches_kp2[dt[0].trainIdx] for dt in data])\n homography = homomat(data_p1[:, :2], data_p2[:, :2])\n\n # Find P1 projection from the homography matrix\n projected_p2 = np.dot(homography, matches_kp1.transpose())\n projected_p2 = projected_p2[0:3] / projected_p2[2] # make sure w' is 1\n projected_p2 = projected_p2.transpose()\n\n # Initialize Current Matches\n current_matches = []\n current_p1, current_p2 = [], []\n current_p1_sizes, current_p2_sizes = [], []\n current_total = 0\n\n # Check for inliers and outliers for each matches\n for i, (match) in enumerate(matches):\n # normalize the error\n error = np.linalg.norm(matches_kp2[i] - projected_p2[i])\n\n # Check for inliers\n if error < threshold:\n current_matches.append([cv.DMatch(current_total, current_total, match[0].distance)])\n current_p1.append(matches_kp1[i][0:2])\n current_p2.append(matches_kp2[i][0:2])\n current_p1_sizes.append(sizes_kp1[i])\n current_p2_sizes.append(sizes_kp2[i])\n current_total += 1\n\n # If\n if current_total > max_total and current_total >= np.round(inlierRatio*cnt_matches):\n max_matches = current_matches\n max_p1 = current_p1\n max_p2 = current_p2\n max_p1_sizes = current_p1_sizes\n max_p2_sizes = current_p2_sizes\n max_total = current_total\n\n # # we are done in case we have enough inliers\n if current_total > cnt_matches * ransacRatio:\n break\n\n\n # Re-evaluate the Homography based on the best inliers\n max_homography = homomat(np.array(max_p1), np.array(max_p2))\n\n if returnMatches:\n max_kp1 = [cv.KeyPoint(d[0], d[1], max_p1_sizes[i]) for i, d in enumerate(max_p1)]\n max_kp2 = [cv.KeyPoint(d[0], d[1], max_p2_sizes[i]) for i, d in enumerate(max_p2)]\n return max_homography, max_matches, max_kp1, max_kp2\n\n return max_homography", "def create_cands(data):\n\n best = np.zeros(data.dim+1)\n best[0:data.dim] = data.xbest\n best[data.dim] = 1-np.sum(data.xbest)\n\n # Ncand times the best value\n cp_e = np.kron(np.ones((data.Ncand, 1)), np.asmatrix(best))\n # This generates random perturbations\n # need dim+1 to account for the \"missing\" value\n r = np.random.rand(data.Ncand, data.dim+1)\n a = r < data.pertP\n idx = np.where(np.sum(a, axis=1) == 0)\n for ii in range(len(idx[0])):\n f = np.random.permutation(data.dim+1)\n a[idx[0][ii], f[0]] = True\n randnums = np.random.randn(data.Ncand, data.dim+1)\n randnums[a == False] = 0\n pv = randnums*data.sigma_stdev\n # Create new points by adding random fluctucations to best point\n new_pts = cp_e+pv\n\n # Iterative, column wise procedure to force the randomly\n # sampled point to be in [0,1]\n for ii in range(data.dim+1):\n vec_ii = new_pts[:, ii]\n adj_l = np.where(vec_ii < data.xlow)\n vec_ii[adj_l[0]] = data.xlow + (data.xlow - vec_ii[adj_l[0]])\n adj_u = np.where(vec_ii > data.xup)\n vec_ii[adj_u[0]] = data.xup - (vec_ii[adj_u[0]]-data.xup)\n stillout_u = np.where(vec_ii > data.xup)\n vec_ii[stillout_u[0]] = data.xlow\n stillout_l = np.where(vec_ii < data.xlow)\n vec_ii[stillout_l[0]] = data.xup\n new_pts[:, ii] = copy.copy(vec_ii)\n\n new_pts = new_pts / np.sum(new_pts, axis=1)\n\n cp_e = copy.copy(new_pts)\n rand_pts = np.asmatrix(np.random.uniform(0, 1, [data.Ncand, data.dim + 1]))\n cp_r = rand_pts/np.sum(rand_pts, axis=1)\n\n CandPoint = np.concatenate((cp_e, cp_r), axis=0)\n # return only data.dim candidate points\n CandPoint_out = CandPoint[:, 0:data.dim]\n\n return CandPoint_out", "def ransac(cloud_s, cloud_t, \n depth_s, depth_t,\n A_prev, b_prev,\n n_iter, n_inlier_cutoff, d_cutoff):\n import random\n n_s = len(cloud_s)\n n_t = len(cloud_t)\n n_inliers = [0] * n_iter\n# Initialization\n A_init = A_prev\n b_init = b_prev\n pred_t = A_init.dot(cloud_s.T).T + b_init\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n max_inliers = sum(inliers)\n print(\"Have \" + str(n_s) + \" features that could be inliers\")\n print(\"Starting with \" + str(max_inliers) + \" inliers\")\n for iter in range(n_iter):\n assert n_s == n_t, \"clouds not of equal size in ransac()\"\n # TODO: replace this random choice with 3 corresponding feature descriptors\n points_inds = random.sample(range(n_s), 3)\n x_vals = np.array([cloud_s[i] for i in points_inds])\n y_vals = np.array([cloud_t[i] for i in points_inds])\n\n # Using Horn 1987, Closed-form solution of absolute orientation\n # using unit quaternions.\n A_init_tmp, b_init_tmp = horn_adjust(x_vals, y_vals)\n\n # TODO: find inliers to the transformation T\n pred_t = A_init_tmp.dot(cloud_s.T).T + b_init_tmp\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n n_inliers = sum(inliers)\n\n # TODO: do we want to refit on the inliers?\n if n_inliers > max_inliers:\n A_init = A_init_tmp\n b_init = b_init_tmp\n max_inliers = n_inliers\n print(\"Adjusting A and b again!\")\n print(A_init)\n print(b_init)\n\n # TODO: are we using n_inlier_cutoff in this way? Check the paper!\n if max_inliers < n_inlier_cutoff:\n raise Exception('insufficient inliers! Want ' + str(n_inlier_cutoff) +\n ' but got ' + str(max_inliers))\n #max_index = n_inliers.index(max(n_inliers)) \n # Compute the best transformation T_star\n# TODO: actually optimize over the depth field!! using spatial.KDTree and spatial.KDTree.query\n# Need to shift depth1XYZ by our initial transformation first\n depth1XYZ = A_init.dot(depth_s.T).T + b_init\n depth2XYZ = depth_t\n tree = spatial.KDTree(depth2XYZ)\n tree_q = tree.query(depth1XYZ)\n# Keep only matches within the cutoff.\n# depth_pair_inds has indeces for depth1XYZ and depth2XYZ\n cutoff = 0.01\n depth_pair_inds = [(i,tree_q[1][i]) for i in range(len(tree_q[0]))\n if tree_q[0][i] < cutoff]\n #depth_cloud_s = np.array([depth1XYZ[k[0]] for k in depth_pair_inds])\n depth_cloud_s = np.array([depth_s[k[0]] for k in depth_pair_inds])\n depth_cloud_t = np.array([depth2XYZ[k[1]] for k in depth_pair_inds])\n\n# A_d = list(range(n_s))\n# A, b = find_argmin_T(cloud_s, cloud_t, A_d,\n# A_init, b_init)\n A_d = list(range(depth_cloud_s.shape[0]))\n A, b = find_argmin_T(depth_cloud_s, depth_cloud_t, A_d,\n A_init, b_init)\n print(\"A_init value:\")\n print(A_init)\n print(\"b_init value:\")\n print(b_init)\n \n print(\"Returning A, b\")\n print(\"A value:\")\n print(A)\n print(\"b value:\")\n print(b)\n print(\"inliers:\")\n print(max_inliers)\n return(A, b)", "def estimateFundamentalMatrix(x1, x2):\n A = correspondence_matrix(x1, x2)\n # compute linear least square solution\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # constrain F. Make rank 2 by zeroing out last singular value\n U, S, V = np.linalg.svd(F)\n S[-1] = 0\n \n F = np.dot(U, np.dot(np.diag(S), V))\n return F", "def pareto_frontier(cmrf,featlist) :\n\tQ = []\n\ttaboodict = {}\n\tnStates = len(featlist)\n\tfeat1,feat2 = featlist\n\tEaxa,Xa = cmrf.decode(feat1)\n\tEbxb,Xb = cmrf.decode(feat2)\n\tif Xa == Xb : \n\t\treturn [Xa],[(Eaxa,Ebxb)]\n\tEaxb = cmrf.score(Xb,feat1)\n\tEbxa = cmrf.score(Xa,feat2)\n\tQ.append((Xa,Xb))\n\tfrontier,frontier_energy = [],[]\n\tfrontier.extend([Xa,Xb])\n\tfrontier_energy.extend([(Eaxa,Ebxa),(Eaxb,Ebxb)])\n\ttaboodict[(Eaxa,Ebxa)] = 1;\n\ttaboodict[(Eaxb,Ebxb)] = 1;\n\twhile len(Q) > 0 :\n\t\t### Optimize \n\t\tXa,Xb = Q[0]\n\t\tQ = Q[1:] # Dequeue\n\t\tEaxb = cmrf.score(Xb,feat1)\n\t\tEbxa = cmrf.score(Xa,feat2)\t\n\t\tEaxa = cmrf.score(Xa,feat1)\n\t\tEbxb = cmrf.score(Xb,feat2)\t\n\t\tm = (Ebxa - Ebxb)/(Eaxa-Eaxb)\n\t\tif m > 0 : \n\t\t\t#stop()\n\t\t\tsys.stderr.write(\"### WARNING : Slope > 0. Cvxhull failed\")\n\t\t\treturn frontier,frontier_energy\n\t\tthetaa = -m/(1-m)\n\t\tthetab = 1/(1-m)\n\t\ttmrf = TMRF(cmrf,[thetaa,thetab],[feat1,feat2])\n\t\tXab = tmrf.decode()[1]\n\t\tEaxab = cmrf.score(Xab,feat1)\n\t\tEbxab = cmrf.score(Xab,feat2)\n\t\tif Xab != Xa and Xab != Xb and \\\n\t\t\tnot taboodict.has_key((Eaxab,Ebxab)) :\n\t\t\t# Check almost equal condition\n\t\t\tif any(map(lambda(x):almost_eq(Eaxab,x[0] or \\\n\t\t\t\talmost_eq(Ebxab,x[1])),taboodict.keys())) : \n\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\tfrontier.append(Xab)\n\t\t\tfrontier_energy.append((Eaxab,Ebxab))\n\t\t\ttaboodict[(Eaxab,Ebxab)]=1\n\t\t\tQ.extend([(Xa,Xab),(Xab,Xb)])\n\t# Calculate energy of frontier elements\t\n\treturn frontier,frontier_energy", "def MATSOL(N,A):\r\n\r\n X = np.zeros((N+1),dtype=float) # X.shape = N+1\r\n NROW = np.arange(0,N+1,dtype=int) # NROW.shape = N+1\r\n\r\n for i in np.arange(N): # loop through rows\r\n AMAX = np.max(np.abs(A[NROW[i:],i])) # max value for column, all later rows\r\n ip = np.argmax(np.abs(A[NROW[i:],i]))+i # index of above\r\n \r\n if(abs(AMAX) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n if(NROW[i] != NROW[ip]): # swap rows\r\n NC = NROW[i].copy()\r\n NROW[i] = NROW[ip].copy()\r\n NROW[ip] = NC.copy()\r\n \r\n \r\n COEF = A[NROW[i+1:],i]/A[NROW[i],i] # normalize column values by maximum magnitude value (AMAX > 0)\r\n A[NROW[i+1:],i+1:] = A[NROW[i+1:],i+1:] - np.dot(COEF[:,None],A[NROW[i],i+1:][None,:]) # normalize/reduce matrix\r\n \r\n \r\n if(abs(A[NROW[N],N]) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n X[N] = A[NROW[N],N+1]/A[NROW[N],N] # downstream edge\r\n i = N-1\r\n while (i >= 0):\r\n# SUMM = 0.0\r\n# j = i+1\r\n \r\n SUMM = np.sum(A[NROW[i],i+1:N+1]*X[i+1:N+1]) # do not include final column\r\n \r\n# while (j <= N-1):\r\n# SUMM = A[NROW[i],j]*X[j] + SUMM\r\n# j = j+1\r\n # print(SUMM,SUMM2)\r\n \r\n X[i] = (A[NROW[i],N+1] - SUMM)/A[NROW[i],i]\r\n i = i-1\r\n return X", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def fit_plane_ransac(pts, neighbors=None,z_pos=None, dist_inlier=0.05, \n min_inlier_frac=0.60, nsample=3, max_iter=100):\n n,_ = pts.shape\n ninlier,models = [],[]\n for i in range(max_iter):\n if neighbors is None:\n p = pts[np.random.choice(pts.shape[0],nsample,replace=False),:]\n else:\n p = pts[neighbors[:,i],:]\n m = fit_plane(p,z_pos)\n ds = np.abs(pts.dot(m[:3])+m[3])\n nin = np.sum(ds < dist_inlier)\n if nin/pts.shape[0] >= min_inlier_frac:\n ninlier.append(nin)\n models.append(m)\n\n if models == []:\n print (\"RANSAC plane fitting failed!\")\n return #None\n else: #refit the model to inliers:\n ninlier = np.array(ninlier)\n best_model_idx = np.argsort(-ninlier)\n n_refit, m_refit, inliers = [],[],[]\n for idx in best_model_idx[:min(10,len(best_model_idx))]:\n # re-estimate the model based on inliers:\n dists = np.abs(pts.dot(models[idx][:3])+models[idx][3])\n inlier = dists < dist_inlier\n m = fit_plane(pts[inlier,:],z_pos)\n # compute new inliers:\n d = np.abs(pts.dot(m[:3])+m[3])\n inlier = d < dist_inlier/2 # heuristic\n n_refit.append(np.sum(inlier))\n m_refit.append(m)\n inliers.append(inlier)\n best_plane = np.argmax(n_refit)\n return m_refit[best_plane],inliers[best_plane]", "def als(matrix, n_factors=8,n_iterations=15, lambda_=10):\r\n\tm, n = matrix.shape\r\n\tQ = matrix\r\n\tW = Q > 0.5\r\n\tW = W.astype(int)\r\n\tprint('X and Y randomly initialzied.')\r\n\tX = 5 * np.random.rand(m, n_factors) \r\n\tY = 5 * np.random.rand(n_factors, n)\r\n\tfor ii in range(n_iterations):\r\n\t\tfor u, Wu in enumerate(W):\r\n\t\t\tX[u] = np.linalg.solve(np.dot(Y, np.dot(np.diag(Wu), Y.T)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(Y, np.dot(np.diag(Wu), Q[u].T))).T\r\n\t\tfor i, Wi in enumerate(W.T):\r\n\t\t\tY[:,i] = np.linalg.solve(np.dot(X.T, np.dot(np.diag(Wi), X)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(X.T, np.dot(np.diag(Wi), Q[:, i])))\r\n\t\tprint('{}th iteration is completed of {}'.format(ii + 1,n_iterations))\r\n\tprediction = np.dot(X,Y)\r\n\tprint('Done.')\r\n\treturn prediction, X, Y", "def question27():\n global conv_residuals\n def catch(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n def iterate(rk):\n \"\"\" Preconditioner Function for GMRES.\"\"\"\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk\n\n\n N_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_N = np.zeros(N_search.size)\n\n fig271 = plt.figure(figsize=(13, 8))\n\n for i, n in enumerate(N_search):\n n2 = n**2\n A = construct_matrix_A(n)\n b = np.random.randn(n2)\n M, N = construct_M_N(n)\n mu_max = scipy.sparse.linalg.eigs(M, k=1, which='LM', return_eigenvectors=False)[0].real\n mu_min = scipy.sparse.linalg.eigs(M, k=1, which='SM', return_eigenvectors=False)[0].real\n gamma = np.sqrt(mu_max*mu_min)\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n2, n2), format=\"csr\")\n P1 = gammaI + M\n P2 = gammaI - N\n P3 = gammaI + N\n P4 = gammaI - M\n M = scipy.sparse.linalg.LinearOperator((n2, n2), matvec=iterate)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, M=M, callback=catch)\n steps_till_conv_N[i] += len(conv_residuals)\n n_steps = len(conv_residuals)\n plt.semilogy(range(n_steps), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Steps Required for Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 271 - GMRES + Preconditioner Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(f\"figures/figure271.png\")\n plt.show()\n\n\n fig270 = plt.figure(figsize=(13, 8))\n plt.plot(N_search, steps_till_conv_N)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps until convergence\")\n plt.title(\"Figure 270 - GMRES + Preconditioner Convergence Required for Varying N\", fontsize=13)\n plt.grid()\n plt.savefig(f\"figures/figure270.png\")\n plt.show()\n return", "def scoreCirc_PassiveFilter(circuit, gen, indi, makeRedundancyInMatrix):#TODO\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluatePassiveFilter_SUHAD(gen, indi)#TODO\n \n disfCount = 0\n\n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 0) if gain < 0 else 0\n\n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5)# if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping)# if damping < 60 else 0\n \n #THD = np.array(results['THD']['nominal'], dtype=float)\n #if np.isnan(THD):\n # disfCount = disfCount + 1\n # thd = 0\n #else:\n # thd = THD-1 if THD > 1 else 0\n \n score = 10*r + g + 10*d\n\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n\n score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n return score, matrixDensity, matrixQuaziID, results", "def ws06(adp1, adp2):\n # print sum(adp1[:3])/3. - sum(adp2[:3])/3.\n adp1 = get_matrix(adp1)\n adp2 = get_matrix(adp2)\n adp1i = np.linalg.inv(adp1)\n adp2i = np.linalg.inv(adp2)\n a = 2 ** 1.5\n b = np.dot(adp1i, adp2i)\n c = np.linalg.det(b)\n\n # if c <= 0:\n # c *= -1\n d = c ** 0.25\n up = a * d\n\n x = adp1i + adp2i\n y = np.linalg.det(x)\n # if y <= 0:\n # y *= -1\n z = y ** 0.5\n R = up / z\n return 100 * (1 - R)", "def _inexact_alm_l1(imgflt_stack,options):\n # Get basic image information and reshape input\n img_width = imgflt_stack.shape[0]\n img_height = imgflt_stack.shape[1]\n img_size = img_width* img_height\n img_3d = imgflt_stack.shape[2]\n imgflt_stack = np.reshape(imgflt_stack,(img_size, img_3d))\n options['weight'] = np.reshape(options['weight'],imgflt_stack.shape)\n\n # Matrix normalization factor\n temp = np.linalg.svd(imgflt_stack,full_matrices=False,compute_uv=False)\n norm_two = np.float64(temp[0])\n del temp\n\n # A is a low rank matrix that is being solved for\n A = np.zeros(imgflt_stack.shape,dtype=np.float64)\n A_coeff = np.ones((1, img_3d),dtype=np.float64) # per image scaling coefficient, accounts for things like photobleaching\n A_offset = np.zeros((img_size,1),dtype=np.float64) # offset per pixel across all images\n\n # E1 is the additive error. Since the goal is determining the background signal, this is the real signal at each pixel\n E1 = np.zeros(imgflt_stack.shape,dtype=np.float64)\n\n # Normalization factors\n ent1 = np.float64(1) # flatfield normalization\n ent2 = np.float64(10) # darkfield normalization\n\n # Weights\n weight_upd = _dct2(np.mean(np.reshape(A,(img_width, img_height, img_3d)),2))\n\n # Initialize gradient and weight normalization factors\n Y1 = np.float64(0)\n mu = np.float64(12.5)/norm_two\n mu_bar = mu * 10**7\n rho = np.float64(1.5)\n\n # Frobenius norm\n d_norm = np.linalg.norm(imgflt_stack,'fro')\n\n # Darkfield upper limit and offset\n B1_uplimit = np.min(imgflt_stack)\n B1_offset = np.float64(0)\n\n # Perform optimization\n iternum = 0\n converged = False\n while not converged:\n iternum += 1\n\n # Calculate the flatfield using existing weights, coefficients, and offsets\n W_idct_hat = _idct2(weight_upd)\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n temp_W = np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n\n # Update the weights\n temp_W = np.reshape(temp_W,(img_width, img_height, img_3d))\n temp_W = np.mean(temp_W,2)\n weight_upd = weight_upd + _dct2(temp_W)\n weight_upd = np.max(np.reshape(weight_upd - options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0) + np.min(np.reshape(weight_upd + options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0)\n W_idct_hat = _idct2(weight_upd)\n\n # Calculate the flatfield using updated weights\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n\n # Determine the error\n E1 = E1 + np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n E1 = np.max(np.reshape(E1 - options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0) + np.min(np.reshape(E1 + options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0)\n\n # Calculate the flatfield coefficients by subtracting the errors from the original data\n R1 = imgflt_stack-E1\n A_coeff = np.reshape(np.mean(R1,0)/np.mean(R1),(1, img_3d))\n A_coeff[A_coeff<0] = 0 # pixel values should never be negative\n\n # Calculate the darkfield component if specified by the user\n if options['darkfield']:\n # Get images with predominantly background pixels\n validA1coeff_idx = np.argwhere(A_coeff<1)[:,1]\n R1_upper = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1)).astype(np.float64)>(np.float64(np.mean(W_idct_hat))-np.float64(10**-5)))[:,0],:]\n R1_upper = np.mean(R1_upper[:,validA1coeff_idx],0)\n R1_lower = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1))<np.mean(W_idct_hat)+np.float64(10**-5))[:,0],:]\n R1_lower = np.mean(R1_lower[:,validA1coeff_idx],0)\n B1_coeff = (R1_upper-R1_lower)/np.mean(R1)\n k = validA1coeff_idx.size\n\n # Calculate the darkfield offset\n temp1 = np.sum(np.square(A_coeff[0,validA1coeff_idx]))\n temp2 = np.sum(A_coeff[0,validA1coeff_idx])\n temp3 = np.sum(B1_coeff)\n temp4 = np.sum(A_coeff[0,validA1coeff_idx]*B1_coeff)\n temp5 = temp2 * temp3 - k*temp4\n if temp5 == 0:\n B1_offset = np.float64(0)\n else:\n B1_offset = (temp1*temp3-temp2*temp4)/temp5\n B1_offset = np.max(B1_offset,initial=0)\n B1_offset = np.min(B1_offset,initial=B1_uplimit/(np.mean(W_idct_hat)+10**-7))\n B_offset = B1_offset * np.mean(W_idct_hat) - B1_offset*np.reshape(W_idct_hat,(-1,1))\n\n # Calculate darkfield\n A1_offset = np.reshape(np.mean(R1[:,validA1coeff_idx],1),(-1,1)) - np.mean(A_coeff[0,validA1coeff_idx]) * np.reshape(W_idct_hat,(-1,1))\n A1_offset = A1_offset - np.mean(A1_offset)\n A_offset = A1_offset - np.mean(A1_offset) - B_offset\n\n # Update darkfield weights\n W_offset = _dct2(np.reshape(A_offset,(img_width, img_height)))\n W_offset = np.max(np.reshape(W_offset - options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0) \\\n + np.min(np.reshape(W_offset + options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0)\n\n # Calculate darkfield based on updated weights\n A_offset = _idct2(W_offset)\n A_offset = np.reshape(A_offset,(-1,1))\n A_offset = np.max(np.reshape(A_offset - options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0) \\\n + np.min(np.reshape(A_offset + options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0)\n A_offset = A_offset + B_offset\n\n # Loss\n Z1 = imgflt_stack - A - E1\n\n # Update weight regularization term\n Y1 = Y1 + mu*Z1\n\n # Update learning rate\n mu = np.min(mu*rho,initial=mu_bar)\n\n # Stop if loss is below threshold\n stopCriterion = np.linalg.norm(Z1,ord='fro')/d_norm\n if stopCriterion < options['optimization_tol'] or iternum > options['max_iterations']:\n converged = True\n\n # Calculate final darkfield image\n A_offset = A_offset + B1_offset * np.reshape(W_idct_hat,(-1,1))\n\n return A,E1,A_offset", "def computeFundamentalMatrix(pts1, pts2):\n A = np.empty((8, 9))\n for i in range(len(pts1)-1):\n x1 = pts1[i][0]\n x2 = pts2[i][0]\n y1 = pts1[i][1]\n y2 = pts2[i][1]\n A[i] = np.array([x1 * x2, x2 * y1, x2,\n y2 * x1, y2 * y1, y2,\n x1, y1, 1])\n # Compute F matrix by evaluating SVD\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # Constrain the F matrix to rank 2\n U1, S1, V1 = np.linalg.svd(F)\n # print('Old S', S)\n # S[2] = 0\n S2 = np.array([[S1[0], 0, 0], [0, S1[1], 0], [0, 0, 0]])\n # print('New S', S)\n F = np.dot(np.dot(U1, S2), V1)\n\n return F", "def ransac(matches, kp1, kp2, sample_points=4, iterations=5, inlier_tolerance=3, inlier_ratio=0.45, check=True, return_max_x=False):\n\n best_inlier_count = 0\n best_h = None\n best_inlier_indices = None\n\n # Get all the corresponing matching pairs for both the images\n pts1 = np.array([kp1[m.queryIdx].pt for m in matches])\n pts2 = np.array([kp2[m.trainIdx].pt for m in matches])\n\n # Re-usable variables for all iterations\n homogeneous_pts1 = np.hstack((pts1, np.ones((pts1.shape[0], 1)))).T\n indices = np.arange(len(pts1))\n num_pts = pts1.shape[0]\n required_inliers = inlier_ratio * num_pts\n\n # For number of iterations\n for _ in range(iterations):\n\n # Sample a small set of points from the point match pairs\n indices_to_sample = np.random.choice(indices, sample_points)\n pts1_sample = pts1[indices_to_sample]\n pts2_sample = pts2[indices_to_sample]\n\n # Get the homography matrix\n h = get_homography_matrix(pts1_sample, pts2_sample)\n\n # Find the new points using the homography matrix\n transformed_points = np.dot(h, homogeneous_pts1).T\n\n # Convert it to world coordinates\n last_col = np.copy(transformed_points[:, -1])\n last_col = last_col[:, np.newaxis]\n transformed_points /= last_col\n transformed_points = transformed_points[:, :-1]\n\n # Find the distance between the actual and the mapped points\n distance = np.linalg.norm(pts2 - transformed_points, axis=1)\n inlier_indices = distance < inlier_tolerance\n inlier_count = inlier_indices.sum()\n\n # Update the best_h if the current h has more inliers\n if inlier_count > best_inlier_count:\n best_h = h\n best_inlier_indices = inlier_indices\n best_inlier_count = inlier_count\n\n # If required inliers is reached break\n if inlier_count > required_inliers:\n break\n\n # Verbose mode - Print the number of inliers\n if check:\n transformed_points = np.dot(best_h, homogeneous_pts1).T\n # Convert it to world coordinates\n last_col = np.copy(transformed_points[:, -1])\n last_col = last_col[:, np.newaxis]\n transformed_points /= last_col\n transformed_points = transformed_points[:, :-1]\n distance = np.linalg.norm(pts2 - transformed_points, axis=1)\n inlier_count = len(distance[distance < inlier_tolerance])\n print('%2.2f of the points are inliers' %\n (inlier_count / num_pts * 100))\n\n # If x coordinates are needed\n if return_max_x:\n max_x_inlier_1 = ceil(pts1[best_inlier_indices].max(axis=0)[0])\n max_x_inlier_2 = ceil(pts2[best_inlier_indices].max(axis=0)[0])\n return best_h, max_x_inlier_1, max_x_inlier_2\n return best_h", "def ransac(data, model, n, k, t, d, debug=False, return_all=False):\n iterations = 0\n bestfit = None\n # besterr = np.inf\n best_inlier_idxs = None\n while iterations < k:\n maybe_idxs, test_idxs = random_partition(n, data.shape[0])\n maybeinliers = data[maybe_idxs, :]\n test_points = data[test_idxs, :]\n maybemodel = model.fit(maybeinliers)\n test_err = model.get_error(test_points, maybemodel)\n # select indices of rows with accepted points\n also_idxs = test_idxs[test_err < t]\n alsoinliers = data[also_idxs, :]\n if len(alsoinliers) > d:\n betterdata = np.concatenate((maybeinliers, alsoinliers))\n bestfit = model.fit(betterdata)\n # better_errs = model.get_error(betterdata, bettermodel)\n # thiserr = np.mean(better_errs)\n # if thiserr < besterr:\n # bestfit = bettermodel\n # besterr = thiserr\n best_inlier_idxs = np.concatenate((maybe_idxs, also_idxs))\n break\n iterations += 1\n if bestfit is None:\n raise ValueError(\"did not meet fit acceptance criteria\")\n if return_all:\n return bestfit, {'inliers': best_inlier_idxs}\n else:\n return bestfit", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def _cce(func, s, sf, bl, bu, mask, icall, maxn, alpha, beta, maxit, printit):\n\n \"\"\"\n List of local variables\n sb(.) = the best point of the simplex\n sw(.) = the worst point of the simplex\n w2(.) = the second worst point of the simplex\n fw = function value of the worst point\n ce(.) = the centroid of the simplex excluding wo\n snew(.) = new point generated from the simplex\n iviol = flag indicating if constraints are violated\n = 1 , yes\n = 0 , no\n \"\"\"\n # Assign the best and worst points:\n sb = s[0,:]\n fb = sf[0]\n sw = s[-1,:]\n fw = sf[-1]\n\n # Compute the centroid of the simplex excluding the worst point:\n ce = np.mean(s[:-1,:],axis=0)\n\n # Attempt a reflection point\n snew = ce + alpha*(ce-sw)\n snew = np.where(mask, snew, sb) # sb should have initial params at mask==False\n\n # Check if is outside the bounds:\n ibound = 0\n # s1 = snew-bl\n # idx = (s1<0).nonzero()\n # if idx[0].size != 0: ibound = 1\n if np.ma.any(np.ma.array(snew-bl, mask=~mask) < 0.): ibound = 1\n\n # s1 = bu-snew\n # idx = (s1<0).nonzero()\n # if idx[0].size != 0: ibound = 2\n if np.ma.any(np.ma.array(bu-snew, mask=~mask) < 0.): ibound = 2\n\n if ibound >= 1:\n snew = _SampleInputMatrix(1,bl,bu,distname='randomUniform')[0]\n snew = np.where(mask, snew, sb)\n\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # Reflection failed; now attempt a contraction point:\n if fnew > fw:\n snew = sw + beta*(ce-sw)\n snew = np.where(mask, snew, sb)\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # Both reflection and contraction have failed, attempt a random point;\n if fnew > fw:\n snew = _SampleInputMatrix(1,bl,bu,distname='randomUniform')[0]\n snew = np.where(mask, snew, sb)\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # end of _cce\n return snew, fnew, icall", "def scoreCirc_ActiveFilter_3(circuit, gen, indi, makeRedundancyInMatrix):\n\n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n score = 0\n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateActiveFilter_2(gen, indi)\n\n disfCount = 0\n \n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5) if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(20 - damping) if damping < 20 else 0\n \n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 0)# if gain < 10 else 0.01\n \n THD_Lf = np.array(results['THD_Lf']['nominal'], dtype=float)\n if np.isnan(THD_Lf):\n disfCount = disfCount + 1\n thd_lf = 0\n else:\n thd_lf = THD_Lf-1 if THD_Lf > 1 else 0\n \n THD_Hf = np.array(results['THD_Hf']['nominal'], dtype=float)\n if np.isnan(THD_Hf):\n disfCount = disfCount + 1\n thd_hf = 0\n else:\n thd_hf = THD_Hf-1 if THD_Hf > 1 else 0\n \n #RIN = np.array(results['rin_meas']['nominal'], dtype=float) #--------not in use\n #if np.isnan(RIN):\n # disfCount = disfCount + 1\n # rin = 0\n #else:\n # rin = 1/RIN*1e6 if RIN < 1e7 else 0\n\n isLP = np.array(results['is_LP']['nominal'], dtype=float)\n if np.isnan(isLP):\n disfCount = disfCount + 1\n islp = 0\n else:\n islp = 0 if isLP>0 else 100# np.abs(isLP)\n \n #slope = np.array(results['maxDampingSlope']['nominal'], dtype=float)\n #print slope\n #if np.isnan(slope):\n # disfCount = disfCount + 1\n # slo = 0\n #else:\n # slo = 0 if slope>60 else 60-slope\n \n maxSlope = results['maxDampingSlope']['nominal']\n if type(np.nan) == type(maxSlope) or type(None) == type(maxSlope):\n disfCount = disfCount + 2\n slo = 0\n slof = 0 \n else:\n if len(maxSlope)==2:\n\tslo = 0 if maxSlope[0]>60 else 60-maxSlope[0]\n\tslof = np.log10(abs(maxSlope[1]-1000))\n else:\n\tslo = 0\n\tslof = 0\n\tdisfCount = disfCount + 1 \n \n \n bandwidth = np.array(results['bw']['nominal'], dtype=float)\n if np.isnan(bandwidth):\n #disfCount = disfCount + 1\n bandwidth = 0\n bw = abs(bandwidth-1000)\n \n StaticOut = not results['isOutVNonStationary']['nominal']\n score = 10*slo + 10*r + (100*StaticOut + 10*(thd_lf + thd_hf) + 1*islp + g)#rin!\n\n #print disfCount\n if disfCount > 0:\n score = 0 + np.exp(disfCount) * 1e3\n #print \"disfCount was there\"\n\n #score = score + (IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n \n print \"\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n #print \".\",\n #circuit.objectivesScore = copy(score)\t#id does not work with mpirun since mpirun works with copies\n #circuit.matrixDensity = matrixDensity\n return score, matrixDensity, matrixQuaziID, results", "def question26():\n n = 10\n n2 = n**2\n A = construct_matrix_A(n)\n x0 = np.random.randn(n2)\n b = np.random.randn(n2)\n\n # Compute optimal gamma:\n M, N = construct_M_N(n)\n\n # Eigenvalues of M and N are the same, so just use M for this now\n mu_max = scipy.sparse.linalg.eigsh(M, k=1, which='LM', return_eigenvectors=False)[0]\n mu_min = scipy.sparse.linalg.eigsh(M, k=1, which='SM', return_eigenvectors=False)[0]\n\n optimal_gamma_theoretical = np.sqrt(mu_min * mu_max)\n\n # We now verify this using our code:\n gamma_search = np.linspace(0.1, 4, 500)\n iters_array = np.zeros(500, dtype=int)\n\n for i, g in enumerate(gamma_search):\n iters_array[i] = alternative_iterative_method(x0, n, g, b)[1]\n\n min_graph = np.argmin(iters_array)\n min_iter = np.min(iters_array)\n min_gamma = gamma_search[min_graph]\n\n fig260 = plt.figure(figsize=(13, 8))\n plt.plot(gamma_search, iters_array)\n plt.plot(min_gamma, min_iter, 'ro',\n label=f\"Theoretical Gamma = {optimal_gamma_theoretical:.3f}\\n\" \\\n f\"Min Iterations at (Gamma={min_gamma:.3f}, Iters={min_iter})\")\n plt.axvline(x=optimal_gamma_theoretical)\n plt.legend()\n plt.grid()\n plt.xlabel(\"Gamma\")\n plt.ylabel(\"Iterations til Convergence\")\n plt.title(\"Figure 260 - Convergence Steps for Varying Gamma (N=10)\")\n plt.savefig(\"figures/figure260.png\")\n plt.show()\n return", "def scoreCirc_ActiveFilter(circuit, gen, indi, makeRedundancyInMatrix):#TODO\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n #FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateActiveFilter_SUHAD(gen, indi)#TODO\n \n \n disfCount = 0\n \n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5) if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping) if damping < 40 else 0\n \n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 10) if gain < 10 else 0\n \n THD = np.array(results['THD']['nominal'], dtype=float)\n if np.isnan(THD):\n disfCount = disfCount + 1\n thd = 0\n else:\n thd = THD-1 if THD > 1 else 0\n\t \n StaticOut = not results['isOutVNonStationary']['nominal']\n \n score = 5*r + 4*d + 2*g + (100*StaticOut + 10*thd)\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n \n ##add a little salt!\n #score = score + random.uniform(0.0, 1)\n\n score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n \n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n #print \".\",\n return score, matrixDensity, matrixQuaziID, results", "def find_inlier(self):\n len_of_matches = len(self.match)\n # The last line of W stores the whole number of consistency of this match\n self.W = np.zeros((len_of_matches+1, len_of_matches))\n for i in np.arange(len_of_matches):\n for j in np.arange(len_of_matches):\n if i >= j:\n continue\n\n # ASSUMPTION : the index of descriptor is the same with the index of image\n wa = self.featureFrameA[self.match[i].queryIdx].pt[0]-self.featureFrameA[self.match[j].queryIdx].pt[0]\n wb = self.featureFrameA[self.match[i].queryIdx].pt[1]-self.featureFrameA[self.match[j].queryIdx].pt[1]\n wa_ = self.featureFrameB[self.match[i].trainIdx].pt[0]-self.featureFrameB[self.match[j].trainIdx].pt[0]\n wb_ = self.featureFrameB[self.match[i].trainIdx].pt[1]-self.featureFrameB[self.match[j].trainIdx].pt[1]\n\n # Compare and complete the matrix W\n if abs(wa-wa_) + abs(wb-wb_) <= INLIER_DIST_THRE:\n self.W[i, j] = 1\n self.W[j, i] = 1\n self.W[len_of_matches, j] += 1\n\n # Choose the best inlier features\n self.best_matches = []\n candidate = np.arange(len_of_matches)\n while True:\n best_matchIdx = self.find_most_compatible_match(candidate)\n if not best_matchIdx or best_matchIdx == -1: # in case no best match is found\n break\n else:\n self.best_matches.append(self.match[best_matchIdx])\n candidate = np.delete(candidate, np.where(candidate == best_matchIdx), axis=0)", "def getRowHeuristics(matrix):\n row, col = matrix.shape\n rHeuristic = np.zeros((row,2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica\n for i in range(0,row):\n rHeuristic[i,0] = int(i)\n #print (i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i])))\n rHeuristic[i,1] = 1/sum(matrix[i,:])\n return rHeuristic[rHeuristic[:,1].argsort()]", "def __finalize(self,final_data):\n\t\tcopy_input_data = copy.deepcopy(self.matrix)\n\t\tbest_matrix = self.__set_format_info(copy_input_data,0)\n\t\tbest_matrix = self.__fill_data(best_matrix,final_data,0)\n\t\tmin_penalty = lost_point(best_matrix)\n\t\tbest_mask_pattern = 0\n\t\tfor i in range(1,8):\n\t\t\tcopy_input_data = copy.deepcopy(self.matrix)\n\t\t\ttemp_matrix = self.__set_format_info(copy_input_data,i)\n\t\t\ttemp_matrix = self.__fill_data(temp_matrix,final_data,i)\n\t\t\tpenalty = lost_point(temp_matrix)\n\n\t\t\tif penalty < min_penalty:\n\t\t\t\tbest_matrix = copy.deepcopy(temp_matrix)\n\t\t\t\tbest_mask_pattern = i\n\t\t\t\tmin_penalty = penalty\n\n\t\treturn best_matrix,best_mask_pattern", "def ransac(cloud, sacmodel):\n # Create the segmentation object\n seg = cloud.make_segmenter()\n\n # Set the model you wish to fit \n seg.set_model_type(sacmodel)\n seg.set_method_type(pcl.SAC_RANSAC)\n\n # Max distance for a point to be considered fitting the model\n # Experiment with different values for max_distance \n # for segmenting the table\n max_distance = 0.01\n seg.set_distance_threshold(max_distance)\n\n # Call the segment function to obtain set of inlier indices and model coefficients\n inliers, coefficients = seg.segment()\n return inliers, coefficients", "def scoreCirc_PassiveFilter_2(circuit, gen, indi, makeRedundancyInMatrix):\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluatePassiveFilter_2(gen, indi)\n \n disfCount = 0\n\n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 0) if gain < 0 else 0\n\n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5) if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping) if damping < 40 else 0\n\n slope = np.array(results['dumpingSlope']['nominal'], dtype=float)\n if np.isnan(slope):\n disfCount = disfCount + 1\n slo = 0\n else:\n slo = 0 if slope>60 else 60-slope\n \n bandwidth = np.array(results['bw']['nominal'], dtype=float)\n if np.isnan(bandwidth):\n disfCount = disfCount + 1\n bw = 0\n else:\n bw = abs(bandwidth-1000)/100\n \n #THD = np.array(results['THD']['nominal'], dtype=float)\n #if np.isnan(THD):\n # disfCount = disfCount + 1\n # thd = 0\n #else:\n # thd = THD-1 if THD > 1 else 0\n #print 10*r, g, d, slo, bw\n score = 10*r + g + d + slo + bw\n\n if disfCount > 0:\n score += np.exp(disfCount) * 1e3\n\n #score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n return score, matrixDensity, matrixQuaziID, results", "def calcIntrasec(self, gui):\n count = len(self.points)\n numpoints = (self.chessSize[0]*self.chessSize[1])\n\n #create matrices that are needed to compute calibration\n mat = cv.CreateMat(3,3,cv.CV_32FC1)\n distCoeffs = cv.CreateMat(4,1,cv.CV_32FC1)\n p3d = cv.CreateMat(count,3,cv.CV_32FC1) #compute 3D points\n p2d = cv.CreateMat(count,2,cv.CV_32FC1) #compute 2D points\n pointCounts = cv.CreateMat( self.nframe ,1,cv.CV_32SC1) #give numpoints per images\n cv.Set(pointCounts,numpoints)\n rvecs = cv.CreateMat(self.nframe,3,cv.CV_32FC1)\n tvecs = cv.CreateMat(self.nframe,3,cv.CV_32FC1)\n\n i = 0\n row = 0\n col = 0\n cv.Set(p3d,0.0) #to set every values to 0.0... and not set Z value\n\n #this compute points in row and cols...\n for p in self.points:\n p2d[i,0] = p[0]\n p2d[i,1] = p[1]\n \n p3d[i,0] = col\n p3d[i,1] = row\n col+=1\n if col >= self.chessSize[0]: \n row+=1\n col=0\n if row >= self.chessSize[1]:\n row = 0\n i+=1\n\n #and now, calibrate...\n cv.CalibrateCamera2(p3d, p2d, pointCounts, self.framesize, mat, distCoeffs, rvecs, tvecs, flags=0)\n gui.setMessage(\"Intrasinc camera parameters checked\")\n\n return (mat, distCoeffs)", "def RANSAC(kp1, kp2, iterations):\n k_max = 0\n m_foe = (0, 0)\n m_inliers = []\n m_outliers = []\n for k in range(iterations):\n # random select 2 different points as sample\n sample = np.random.randint(0, len(kp1), 2)\n if sample[0] == sample[1]:\n continue\n\n # calculate the line through the 2 points\n p1 = kp1[sample[0]], kp2[sample[0]]\n p2 = kp1[sample[1]], kp2[sample[1]]\n\n # the intersection\n foe = get_intersect(p1, p2)\n if foe == (np.inf, np.inf):\n continue\n\n # calculate the inliers and outliers\n inliers, outliers = get_inliers(kp1, kp2, foe, 5)\n\n # update the best feo\n if len(inliers) > k_max:\n k_max = len(inliers)\n m_foe = foe\n m_inliers = inliers\n m_outliers = outliers\n\n return k_max, m_foe, m_inliers, m_outliers", "def scoreCirc_CmosVoltageReference(circuit, gen, indi, makeRedundancyInMatrix): #TODO 6.9.2016 napisi cost function ki se sklada z evaluateCmosVoltageRef\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateCmosVoltageRef(gen, indi)\n disfCount = 0\n \n \n #Vdd sweeps on 3 temperatures - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # -20 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t1 = np.array(results['vout_vdd_temp1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t1)):\n disfCount = disfCount + 1\n vdd_s_t1 = 0\n vdd_s_t1_d = 0\n else:\n x = np.median(vdd_sweep_t1)\n vdd_s_t1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t1_d = np.max(vdd_sweep_t1) - np.min(vdd_sweep_t1)\n \n \n # 25 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t2 = np.array(results['vout_vdd_temp2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t2)):\n disfCount = disfCount + 1\n vdd_s_t2 = 0\n vdd_s_t2_d = 0\n else:\n x = np.median(vdd_sweep_t2)\n vdd_s_t2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t2_d = np.max(vdd_sweep_t2) - np.min(vdd_sweep_t2) \n \n # 120 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t3 = np.array(results['vout_vdd_temp3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t3)):\n disfCount = disfCount + 1\n vdd_s_t3 = 0\n vdd_s_t3_d = 0\n else:\n x = np.median(vdd_sweep_t3)\n vdd_s_t3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t3_d = np.max(vdd_sweep_t3) - np.min(vdd_sweep_t3) \n \n #Vdd sweeps on 3 loads - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # 10e6 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r1 = np.array(results['vout_vdd_res1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r1)):\n disfCount = disfCount + 1\n vdd_s_r1 = 0\n vdd_s_r1_d = 0\n else:\n x = np.median(vdd_sweep_r1)\n vdd_s_r1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r1_d = np.max(vdd_sweep_r1) - np.min(vdd_sweep_r1)\n \n # 10e4 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r2 = np.array(results['vout_vdd_res2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r2)):\n disfCount = disfCount + 1\n vdd_s_r2 = 0\n vdd_s_r2_d = 0\n else:\n x = np.median(vdd_sweep_r2)\n vdd_s_r2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r2_d = np.max(vdd_sweep_r2) - np.min(vdd_sweep_r2) \n \n # 10e2 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r3 = np.array(results['vout_vdd_res3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r3)):\n disfCount = disfCount + 1\n vdd_s_r3 = 0\n vdd_s_r3_d = 0\n else:\n x = np.median(vdd_sweep_r3)\n vdd_s_r3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r3_d = np.max(vdd_sweep_r3) - np.min(vdd_sweep_r3) \n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n #---COST FUNCTION DEFINITION---#\n score = vdd_s_t1 + vdd_s_t1_d + \\\n\t vdd_s_t2 + vdd_s_t2_d + \\\n\t vdd_s_t3 + vdd_s_t3_d + \\\n\t vdd_s_r1 + vdd_s_r1_d + \\\n\t vdd_s_r2 + vdd_s_r2_d + \\\n\t vdd_s_r3 + vdd_s_r3_d + \\\n\t (100*powe)\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n if np.isnan(score):\n score = 2e4\n score = score + (IcNc+1) #add small punishment if not all nodes connected\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n \n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n\n return score, matrixDensity, matrixQuaziID, results", "def ani1_feature(Z, R):\n assert len(Z) == R.shape[1]\n assert R.shape[0] == 3\n atom_Z_arr = np.asarray(Z)\n atom_R_arr = R.T\n '''constants'''\n rad_cutoff = 4.6\n ang_cutoff = 3.1\n rad_shifts = np.asarray([\n 5.0000000e-01, 7.5625000e-01, 1.0125000e+00, 1.2687500e+00,\n 1.5250000e+00, 1.7812500e+00, 2.0375000e+00, 2.2937500e+00,\n 2.5500000e+00, 2.8062500e+00, 3.0625000e+00, 3.3187500e+00,\n 3.5750000e+00, 3.8312500e+00, 4.0875000e+00, 4.3437500e+00])\n rad_eta = 1.6000000e+01\n zeta = 8.0000000e+00\n ang_theta_shifts = np.asarray([\n 0.0000000e+00, 7.8539816e-01, 1.5707963e+00, 2.3561945e+00,\n 3.1415927e+00, 3.9269908e+00, 4.7123890e+00, 5.4977871e+00])\n ang_eta = 6.0000000e+00\n ang_shifts = np.asarray([\n 5.0000000e-01, 1.1500000e+00, 1.8000000e+00, 2.4500000e+00])\n\n\n '''compute'''\n ref_Z_list = [1, 6, 7, 8]\n ref_Z_indices_list = [atom_Z_arr == ref_Z for ref_Z in ref_Z_list]\n atom_feat_list = []\n for atom_i, (atom_Z, atom_R) in enumerate(zip(atom_Z_arr, atom_R_arr)):\n atom_feat = []\n for ref_Z_i, ref_Z_indices1 in enumerate(ref_Z_indices_list):\n env_indices1 = ref_Z_indices1.copy()\n env_indices1[atom_i] = False\n env_R_arr1 = atom_R_arr[env_indices1]\n dist1 = calc_dist(atom_R, env_R_arr1)\n rad_func = piece_wise_cutoff(dist1, rad_cutoff)\n rad_exp = np.exp(-rad_eta * (dist1 - rad_shifts[:, np.newaxis])) # num_shifts by num_env\n rad_vec = rad_exp.dot(rad_func)\n atom_feat.append(rad_vec)\n for ref_Z_indices2 in ref_Z_indices_list[ref_Z_i:]:\n env_indices2 = ref_Z_indices2.copy()\n env_indices2[atom_i] = False\n env_R_arr2 = atom_R_arr[env_indices2]\n dist2 = calc_dist(atom_R, env_R_arr2)\n ang_func1 = piece_wise_cutoff(dist1, ang_cutoff)\n ang_func2 = piece_wise_cutoff(dist2, ang_cutoff)\n prod_func12 = ang_func1[:, np.newaxis] * ang_func2\n angle = calc_angle(atom_R, env_R_arr1, env_R_arr2)\n\n cos_factor = (1.0 + np.cos(angle[np.newaxis, :, :] - ang_theta_shifts[:, np.newaxis, np.newaxis]))**zeta # num_theta_shifts by num_env1 by num_env2\n ang_exp = np.exp(-ang_eta * (((dist1[:, np.newaxis] + dist2) / 2)[np.newaxis, :, :] - ang_shifts[:, np.newaxis, np.newaxis])**2) # num_shifts by num_env1 by num_env2\n cos_exp = cos_factor[:, None, :, :] * ang_exp[None, :, :, :]\n\n ang_vec = (cos_exp * prod_func12[np.newaxis, np.newaxis, :, :]).sum(axis=-1).sum(axis=-1).ravel()\n ang_vec *= 2**(1.0 - zeta)\n atom_feat.append(ang_vec)\n atom_feat_list.append(np.concatenate(atom_feat))\n return np.stack(atom_feat_list)", "def scoreCirc_VoltageReference(circuit, gen, indi, makeRedundancyInMatrix):\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n \n results = None\n badSweep = 0\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateVoltageRef(gen, indi)\n disfCount = 0\n \n vdd_sweep = np.array(results['vout_vdd']['nominal'], dtype=float) #This line changes Nones to np.nans\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep)):\n disfCount = disfCount + 1\n vdd_s = 0\n vdd_s_d = 0\n #print \"tukej!\", vdd_sweep_scale\n else:\n x = np.median(vdd_sweep)\n vdd_s = abs(x - VREF) #if x > VREF else 0\n vdd_s_d = np.max(vdd_sweep) - np.min(vdd_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n #print \"tukiii\", vdd_sweep_scale\n if (vdd_sweep_scale[-1]<20): #20V\n\tbadSweep = badSweep + 1\n \n rload_sweep = np.array(results['vout_rload']['nominal'], dtype=float)\n rload_sweep_scale = np.array(results['vout_rload_scale']['nominal'], dtype=float)\n # if measurement is empty\n if np.any(np.isnan(rload_sweep)):\n disfCount = disfCount + 1\n rload_s = 0\n rload_s_d = 0\n else:\n x = np.median(rload_sweep)\n rload_s = abs(x - VREF) #if x > VREF else 0\n rload_s_d = np.max(rload_sweep) - np.min(rload_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n if (rload_sweep_scale[-1]<100e3): #100kOhm\n\tbadSweep = badSweep + 1\n \n temp_sweep = np.array(results['vout_temp']['nominal'], dtype=float)\n temp_sweep_scale = np.array(results['vout_temp_scale']['nominal'], dtype=float)\n # if measurement is empty OR sweep did not finish completely - check last scale value in runme2!!\n if np.any(np.isnan(temp_sweep)):\n disfCount = disfCount + 1\n temp_s = 0\n temp_s_d = 0\n else:\n x = np.median(temp_sweep)\n temp_s = abs(x - VREF) #if x > VREF else 0\n temp_s_d = np.max(temp_sweep) - np.min(temp_sweep)\n if (temp_sweep_scale[-1]<120): #120 deg celsius\n\tbadSweep = badSweep + 1\n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n #---COST FUNCTION DEFINITION---#\n score = (vdd_s) + (vdd_s_d) + 5*(rload_s) + 5*(rload_s_d) + (100*temp_s) + (100*temp_s_d) + (100*powe) + badSweep*100\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n if np.isnan(score):\n score = 2e4\n score = score + (IcNc+1) #add small punishment if not all nodes connected\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, temp_s, temp_s_d, powe\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, 100*temp_s, 100*temp_s_d, 100*powe\n \n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n\n return score, matrixDensity, matrixQuaziID, results", "def question5():\n \n null_dist = {38: 1, 39: 1, 40: 6, 41: 8, 42: 17, 43: 31, 44: 41, 45: 47, 46: 65, 47: 76,\n 48: 66, 49: 63, 50: 62, 51: 69, 52: 69, 53: 57, 54: 49, 55: 37, 56: 26, 57: 30,\n 58: 33, 59: 23, 60: 25, 61: 20, 62: 5, 63: 11, 64: 11, 65: 7, 66: 5, 67: 4,\n 68: 5, 69: 4, 70: 5, 71: 3, 72: 3, 73: 4, 74: 1, 75: 2, 80: 3, 81: 1, 82: 1,\n 85: 1, 87: 1, 92: 1}\n \n # find mean\n mean = 0\n for key in null_dist.keys():\n mean += key / float(1000) * null_dist[key]\n \n print mean\n \n # find standard deviation\n variance = 0\n for key in null_dist.keys():\n variance += (key - mean) ** 2 / float(1000) * null_dist[key]\n stdev = variance ** 0.5\n \n print stdev\n \n # find z-score\n score = 875\n z_score = (score - mean) / stdev\n \n print z_score", "def get_stain_matrix(I):", "def fit_greedy(data, nnbr=10, threshold=0.05, refit=refit_pll):\n n,m = data.shape;\n L = np.zeros((n,n)) # initialize parameters\n scores = np.zeros(n) \n data = data.astype(int)\n for i in range(n):\n Ni = []\n while (len(Ni)<nnbr):\n Vi = (0*data[i,:] + sum(data[j,:]*(2**jj) for jj,j in enumerate(Ni))).astype(int)\n Vsz = int(Vi.max()+1)\n for j in range(n):\n if j==i or j in Ni: scores[j]=0.; continue\n pIJV = Factor( [Var(0,2),Var(1,2),Var(2,Vsz)] , 0.)\n # pIJV[data[i,:],data[j,:],Vi] += 1. # Test??\n for k in range(m): pIJV[data[i,k],data[j,k],Vi[k]] += 1.\n pV = pIJV.marginal([2]); pV /= (pV.sum()+1e-20);\n pIJV /= (pIJV.sum([0])+1e-20)\n scores[j] = ((pIJV.condition({0:1,1:1})-pIJV.condition({0:1,1:0})).abs()*pV).sum()\n jmax = int(np.argmax(scores))\n if scores[jmax] < threshold: break\n Ni.append(jmax)\n # TODO: prune back each list?\n #print(i,\" : \",Ni)\n L[i,Ni] = 1.\n L = L*L.T # \"and\" connectivity: keep only if edges (i,j) and (j,i) present?\n model = Ising(L);\n refit(model,data)\n return model", "def _calc_corrections(self): \n searchIter= self._niter-1\n while searchIter > 0:\n trySavefilename= self._createSavefilename(searchIter)\n if os.path.exists(trySavefilename):\n trySavefile= open(trySavefilename,'rb')\n corrections= sc.array(pickle.load(trySavefile))\n trySavefile.close()\n break\n else:\n searchIter-= 1\n if searchIter == 0:\n corrections= sc.ones((self._npoints,2))\n for ii in range(searchIter,self._niter):\n if ii == 0:\n currentDF= self._dftype(surfaceSigma=self._surfaceSigmaProfile,\n beta=self._beta)\n else:\n currentDF= self._dftype(surfaceSigma=self._surfaceSigmaProfile,\n beta=self._beta,\n corrections=corrections,\n npoints=self._npoints,\n rmax=self._rmax,\n savedir=self._savedir,\n interp_k=self._interp_k)\n newcorrections= sc.zeros((self._npoints,2))\n for jj in range(self._npoints):\n thisSurface= currentDF.surfacemass(self._rs[jj],\n use_physical=False)\n newcorrections[jj,0]= currentDF.targetSurfacemass(self._rs[jj],use_physical=False)/thisSurface\n newcorrections[jj,1]= currentDF.targetSigma2(self._rs[jj],use_physical=False)*thisSurface\\\n /currentDF.sigma2surfacemass(self._rs[jj],\n use_physical=False)\n #print(jj, newcorrections[jj,:])\n corrections*= newcorrections\n #Save\n picklethis= []\n for arr in list(corrections):\n picklethis.append([float(a) for a in arr])\n save_pickles(self._savefilename,picklethis) #We pickle a list for platform-independence)\n return corrections", "def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # QRFactorization\n Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')\n\n if np.linalg.norm(R[-1, :], np.inf) < tol:\n warn('Singular Jacobian matrix. Using SVD decomposition to ' +\n 'perform the factorizations.')\n return svd_factorization_projections(A, m, n,\n orth_tol,\n max_refin,\n tol)\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v = np.zeros(m)\n v[P] = aux2\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(z)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v[P] = aux2\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n z = np.zeros(m)\n z[P] = aux2\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = Q inv(R.T) P.T x\n aux1 = x[P]\n aux2 = scipy.linalg.solve_triangular(R, aux1,\n lower=False,\n trans='T')\n z = Q.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def solve_least_squares(train_feat, res_feat):\n\n training = train_feat\n\n result_training = res_feat\n\n trans = get_transpose(training)\n\n mat_mul_trans = matrix_mul(trans, training)\n\n mat_mul_trans = get_inverse(mat_mul_trans)\n\n second_prod = matrix_mul(mat_mul_trans, trans)\n\n return matrix_mul(second_prod, result_training)", "def leastsquares(A,b,qr=qrfact.qri_mgs_piv,alpha=0.5):\n \n\n A = numpy.array(A, dtype=float)\n m,n = A.shape\n z = numpy.zeros( n )\n a = numpy.zeros( n )\n x = numpy.zeros( n )\n b = numpy.transpose(b)[0]\n\n # do the QR factorization\n try:\n Q,R = qr(A)[:2] # Some QR routines return a third permutation P solving AP=QR.\n PA = A\n except TypeError:\n Q,R,P = qr(A,alpha)[:3] # Some QR routines return a third permutation P solving AP=QR.\n AP = numpy.dot( A, P )\n\n # Step 1'': orthogonalization of b against Q\n u = b\n for j in range( 0, n ) :\n # print \"Qj = \", Q[:,j]\n # print \"u = \", u\n # print \"dot = \", numpy.dot( Q[:,j], u )\n z[j] = numpy.dot( Q[:,j], u )\n u = u - z[j] * Q[:,j]\n\n # Step 2'': iterative orthogonalization of u\n ul2norm = numpy.linalg.norm( u )\n ii = 0\n while True : # iterate\n for j in range( 0, n ) :\n a[j] = numpy.dot( Q[:,j], u )\n z[j] = z[j] + a[j]\n u = u - a[j] * Q[:,j]\n\n ii = ii + 1\n ulnorm = ul2norm\n ul2norm = numpy.linalg.norm( u )\n\n #print ul2norm, ulnorm\n \n if (ul2norm > alpha * ulnorm) or ul2norm == 0 :\n # print \"used\", ii, \"orthogonalizations\"\n break\n\n #print z\n #print R\n\n # Step 3'': use back substitution to solve Rx = z\n for i in range( n-1, -1, -1 ) :\n x[i] = z[i]\n for j in range( i+1, n ) :\n x[i] = x[i] - R[i,j] * x[j]\n x[i] = x[i] / R[i,i]\n #print x\n\n #need to permute x according to permutation matrix P\n \n return numpy.dot( P, x )", "def ransac(keypoints1, keypoints2, matches, sampling_ratio=0.5, n_iters=500, threshold=20):\n N = matches.shape[0]\n n_samples = int(N * sampling_ratio)\n\n # Please note that coordinates are in the format (y, x)\n matched1 = pad(keypoints1[matches[:,0]])\n matched2 = pad(keypoints2[matches[:,1]])\n matched1_unpad = keypoints1[matches[:,0]]\n matched2_unpad = keypoints2[matches[:,1]]\n\n max_inliers = np.zeros(N)\n n_inliers = 0\n\n # RANSAC iteration start\n ### YOUR CODE HERE\n raise NotImplementedError() # Delete this line\n ### END YOUR CODE\n return H, matches[max_inliers]", "def estimate_lsq(fp, tp):\n\n fp = asarray(fp, float)\n tp = asarray(tp, float)\n\n if fp.shape[0] != 3:\n raise RuntimeError, 'number of rows in fp must be 3 (there were %d)' % fp.shape[0]\n\n if tp.shape[0] != 3:\n raise RuntimeError, 'number of rows in tp must be 3 (there were %d)' % tp.shape[0]\n\n if fp.shape[1] != tp.shape[1]:\n raise RuntimeError, 'number of points do not match'\n\n #condition points (important for numerical reasons)\n #--from points--\n m = mean(fp[:2], axis=1)\n maxstd = max(std(fp[:2], axis=1))\n if abs(maxstd) < 1e-8:\n # This is a degenerate configuration\n raise linalg.LinAlgError\n\n C1 = diag([1/maxstd, 1/maxstd, 1]) \n C1[0][2] = -m[0]/maxstd\n C1[1][2] = -m[1]/maxstd\n fp = dot(C1,fp)\n\n #--to points--\n m = mean(tp[:2], axis=1)\n #C2 = C1.copy() #must use same scaling for both point sets\n maxstd = max(std(tp[:2], axis=1))\n if abs(maxstd) < 1e-8:\n # This is a degenerate configuration\n raise linalg.LinAlgError\n\n C2 = diag([1/maxstd, 1/maxstd, 1])\n C2[0][2] = -m[0]/maxstd\n C2[1][2] = -m[1]/maxstd\n tp = dot(C2,tp)\n\n #create matrix for linear method, 2 rows for each correspondence pair\n nbr_correspondences = fp.shape[1]\n A = zeros((2*nbr_correspondences,9))\n for i in range(nbr_correspondences): \n A[2*i] = [-fp[0][i],-fp[1][i],-1,0,0,0,tp[0][i]*fp[0][i],tp[0][i]*fp[1][i],tp[0][i]]\n A[2*i+1] = [0,0,0,-fp[0][i],-fp[1][i],-1,tp[1][i]*fp[0][i],tp[1][i]*fp[1][i],tp[1][i]]\n\n U,S,V = linalg.svd(A)\n\n H = V[8].reshape((3,3)) \n\n #decondition and return\n return dot(linalg.inv(C2),dot(H,C1))", "def SCF(N, R, Zeta1, Zeta2, Za, Zb, G):\n Crit = 1e-11 # Convergence critera\n Maxit = 250 # Maximum number of iterations\n Iter = 0\n\n ######## STEP 1. Guess an initial density matrix ########\n # Use core hamiltonian for initial guess of F, I.E. (P=0)\n P = np.zeros([2, 2])\n\n Energy = 0.0\n\n while (Iter < Maxit):\n Iter += 1\n print(Iter)\n\n ######## STEP 2. calculate the Fock matrix ########\n # Form two electron part of Fock matrix from P\n G = np.zeros([2, 2]) # This is the two electron contribution in the equations above\n for i in range(2):\n for j in range(2):\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])\n\n # Add core hamiltonian H^CORE to get fock matrix\n F = H + G\n\n # Calculate the electronic energy\n Energy = np.sum(0.5 * P * (H + F))\n\n print('Electronic energy = ', Energy)\n\n ######## STEP 3. Calculate F' (remember S^-1/2 is X and S^1/2 is X.T) ########\n G = np.matmul(F, X)\n Fprime = np.matmul(X.T, G)\n\n ######## STEP 4. Solve the eigenvalue problem ########\n # Diagonalise transformed Fock matrix\n Diag(Fprime, Cprime, E)\n\n ######## STEP 5. Calculate the molecular orbitals coefficients ########\n # Transform eigen vectors to get matrix C\n C = np.matmul(X, Cprime)\n\n ######## STEP 6. Calculate the new density matrix from the old P ########\n Oldp = np.array(P)\n P = np.zeros([2, 2])\n\n # Form new density matrix\n for i in range(2):\n for j in range(2):\n # Save present density matrix before creating a new one\n for k in range(1):\n P[i, j] += 2.0 * C[i, k] * C[j, k]\n\n ######## STEP 7. Check to see if the energy has converged ########\n Delta = 0.0\n # Calculate delta the difference between the old density matrix Old P and the new P\n Delta = (P - Oldp)\n Delta = np.sqrt(np.sum(Delta ** 2) / 4.0)\n print(\"Delta\", Delta)\n\n # Check for convergence\n if (Delta < Crit):\n # Add nuclear repulsion to get the total energy\n Energytot = Energy + Za * Zb / R\n print(\"Calculation converged with electronic energy:\", Energy)\n print(\"Calculation converged with total energy:\", Energytot)\n print(\"Density matrix\", P)\n print(\"Mulliken populations\", np.matmul(P, S))\n print(\"Coeffients\", C)\n\n break", "def screening_graph_estimate(S, lambdaL, p, maxdf, idx_scr, threshold=1e-4, max_iter=10000):\n nlambda = lambdaL.shape[0]\n nscr = idx_scr.shape[0]\n x = np.zeros(p * maxdf * nlambda)\n col_cnz = np.zeros(p + 1).astype(int)\n row_idx = np.zeros(p * maxdf * nlambda).astype(int)\n idx_a = np.zeros(nscr).astype(int)\n w1 = np.zeros(p)\n\n cnz = 0\n for m in range(p):\n idx_i = np.copy(idx_scr[:, m])\n w0 = np.zeros(p)\n size_a = 0\n\n for i in range(nlambda):\n ilambda = lambdaL[i]\n gap_ext = 1\n iter_ext = 0\n while gap_ext > 0 and iter_ext < max_iter:\n size_a_prev = size_a\n for j in range(nscr):\n w_idx = idx_i[j]\n if w_idx != -1:\n r = S[m, w_idx]\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[w_idx, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[w_idx] = r - ilambda\n else:\n w1[w_idx] = r + ilambda\n idx_a[size_a] = w_idx\n size_a += 1\n idx_i[j] = -1\n else:\n w1[w_idx] = 0\n w0[w_idx] = w1[w_idx]\n\n gap_ext = size_a - size_a_prev\n\n gap_int = 1\n iter_int = 0\n while gap_int > threshold and iter_int < max_iter:\n tmp1 = 0\n tmp2 = 1e-4\n for j in range(size_a):\n w_idx = idx_a[j]\n r = S[m, w_idx] + w0[w_idx]\n\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[w_idx, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[w_idx] = r - ilambda\n else:\n w1[w_idx] = r + ilambda\n tmp2 += abs(w1[w_idx])\n else:\n w1[w_idx] = 0\n tmp1 += abs(w1[w_idx] - w0[w_idx])\n w0[w_idx] = w1[w_idx]\n gap_int = tmp1 / tmp2\n iter_int += 1\n iter_ext += 1\n\n for j in range(size_a):\n w_idx = idx_a[j]\n x[cnz] = w1[w_idx]\n row_idx[cnz] = i * p + w_idx\n cnz += 1\n col_cnz[m + 1] = cnz\n\n return col_cnz, row_idx, x", "def test_main_interior_sparse():\n\n bound = None\n Name, obj_Netlib = benchmark()\n Name_work = benchmark_work()\n name_benchmark = {}\n\n # Dict name\n j = 0\n for name in Name:\n name_benchmark[name] = obj_Netlib[j]\n j += 1\n\n line = open(\"conclusion1.txt\", \"w\")\n line.write(\n \"{0:17s} {2:>17s} {3:>20s} {1:>20s} {4:>20s} {5:>20s}\\r\\n\".format(\n \"Name\", \"Obj fun\", \"Interi time\", \"Scipy time\", \"Interi\", \"Scipy\"\n )\n )\n line.close()\n\n for i in Name_work[1:12]:\n # REMARK usually use Name_work[1:12]\n print(\"\\n\\nProblem name: {}\".format(i))\n c, Aineq, bineq, Aeq, beq, lb, ub = create_problem_from_mps_matlab(i)\n\n # Scipy\n start_time1 = time.time()\n bounds = create_bound_for_scipy(lb, ub)\n res = linprog(\n c=c,\n A_ub=Aineq,\n b_ub=bineq,\n A_eq=Aeq,\n b_eq=beq,\n bounds=bounds,\n method=\"interior-point\",\n options={\"disp\": True},\n )\n # res = np.nan\n end_time1 = time.time()\n\n # Interior\n start_time2 = time.time()\n # obj_fun = interior_sparse(A=A, b=b, c=c, cTlb=cTb, tol=1e-8)\n obj_fun = new_interior_sparse(\n c=c, Aineq=Aineq, bineq=bineq, Aeq=Aeq, beq=beq, lb=lb, ub=ub, tol=1e-6\n )\n end_time2 = time.time()\n\n # information\n print(\"File name : {}\".format(i))\n print(\"obj fun Netlib: {0}\".format(name_benchmark[i]))\n print(\"obj fun interi: {0}\".format(obj_fun))\n print(\"obj fun scipy : {0}\".format(res.fun))\n # print(\"obj fun scipy : {0}\".format(np.nan))\n print(\"interior time : {}\".format(end_time2 - start_time2))\n print(\"scipy time : {}\".format(end_time1 - start_time1))\n\n # save to text file\n line = open(\"conclusion1.txt\", \"a\")\n line.write(\n \"{0:17s} {2:17.2f} {3:>20.2f} {1:20.2f} {4:20.2f} {5:20.2f}\\r\\n\".format(\n i,\n name_benchmark[i],\n end_time2 - start_time2,\n end_time1 - start_time1,\n obj_fun,\n res.fun,\n # np.nan\n )\n )\n line.close()", "def second_heuristic(self):\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n # aceasta matrice indica valoarea pe care o are mutarea unei piese pe o celula aleasa\r\n # se va aduna la media ponderilor adunate in lista weights\r\n\r\n # mijlocul tablei este punctul cel mai vulnerabil\r\n # in timp ce lateralele sunt sigure,iar linia bazei transforma piesa in rege\r\n\r\n points = [[0, 4, 0, 4, 0, 4, 0, 4],\r\n [4, 0, 3, 0, 3, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 1, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 4, 0, 4, 0, 4, 0]]\r\n\r\n weights = [0 for i in range(4)]\r\n whites, blacks = 0, 0\r\n for i in range(8):\r\n for j in range(8):\r\n\r\n # numaram discurile de fiecare culoarea\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n\r\n if self.matrix[i][j] in [self.current_player, self.current_player.upper()]:\r\n\r\n # daca e piesa normala\r\n if self.matrix[i][j] == self.current_player:\r\n weights[0] += 4\r\n\r\n # cat de aproape este piesa de a deveni rege ( nr de linii din tabla - cate mai are pana ajunge pe ultima linie)\r\n\r\n # cu cat se apropie piesa mai multe de a deveni rege, scorul creste( negru - rege pentru i=0, alb -rege pentru i =7)\r\n if self.matrix[i][j] == 'n':\r\n weights[1] += (7 - i)\r\n elif self.matrix[i][j] == 'a':\r\n weights[1] += i\r\n else:\r\n # daca e piesa rege\r\n weights[0] += 8\r\n\r\n # cat de aproape este piesa rege de celelalte piese\r\n for d in directions:\r\n if self.matrix[i][j] == self.current_player.upper():\r\n # gaseste pe diagonala in directia d, o piesa adversara,daca exista\r\n x, y = self.find_piesa(i, j, d)\r\n if x and y:\r\n weights[2] += (x - i) * (x - i) + (y - j) * (y - j)\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n # piesele pe care le poate captura jucatorul, daca e piesa rege are un scor mai mare\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n if self.matrix[next_x][next_y] == self.opponent().upper():\r\n weights[3] += 7\r\n else:\r\n weights[3] += 4\r\n # piese care pot fi capturate; la fel daca este piesa rege atunci se scade mai mult scorul\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[3] -= 6\r\n else:\r\n weights[3] -= 3\r\n # adunam piesa la media sumei date pentru a face AI-ul in caz de egalitate a scorului\r\n # sa imi aleaga piesa care ma pozitioneaza mai bine\r\n if self.move:\r\n return sum(weights) / 4 + points[self.move[0]][self.move[1]]\r\n return sum(weights) / 4\r\n\r\n def __str__(self):\r\n s = ' '\r\n for i in range(8):\r\n s += str(i) + ' '\r\n s += '\\n'\r\n for index, line in enumerate(self.matrix):\r\n s += str(chr(index + ord('a'))) + ' '\r\n for el in line:\r\n s += str(el) + ' '\r\n s += '\\n'\r\n\r\n return s", "def one_against_rest_all(expi,X,Y):\n pp=expi.pp\n Ntot=Y.size\n numfeat=X.shape[1]\n numclass=len(expi.ddtr)\n W=np.zeros((numfeat,numclass))\n II=range(Ntot)\n # Initial stepsize\n td=1\n # Check stepsize factor isn't too large\n eps=.01\n locltp=pp.pltp\n if pp.stoch<0:\n locltp=min(pp.pltp,abs(1./pp.stoch)-eps)\n print('Time Step', pp.pltp, locltp)\n up=0\n dup=0\n # Iterations.\n for it in range(pp.numit):\n np.random.shuffle(II)\n up=0\n dup=0\n fac1=0\n # Loop over data\n for i in II:\n # Penalized weights\n fac2=locltp/td\n if pp.stoch<0:\n fac1=(1.+pp.stoch*fac2)\n W*=fac1\n\n tempx=X[i,:]\n tempy=Y[i]\n h=np.dot(tempx,W)\n # We are using binary features so updates in the direction of the example occurs only when the features are nonzero\n # This is NOT the general case.\n pi=tempx>0\n # Update perceptrons for each class.\n for c in range(numclass):\n # A different weight constrain version -Max weight contraint\n if pp.stoch==0:\n if tempy==c:\n tempw=W[:,c]<=pp.Jmax-fac2\n if np.sum(tempw)<len(tempw):\n print('hit upper bound')\n tempw.shape=pi.shape\n pi=np.logical_and(pi, tempw)\n else:\n tempw=W[:,c]>=-pp.Jmax+fac2\n if np.sum(tempw)<len(tempw):\n print('hit lower bound')\n tempw.shape=pi.shape\n pi=np.logical_and(pi,tempw)\n # Update weights on class\n if (tempy==c and h[c]<=pp.deltaP):\n dup+=1\n # Count number of updates.\n up+=np.count_nonzero(pi)\n W[pi,c]=W[pi,c]+fac2\n # Update weight for off class examples.\n elif (tempy!=c and h[c]>=-pp.deltaD):\n dup+=1\n # Count number of updates.\n up+=np.count_nonzero(pi)\n W[pi,c]=W[pi,c]-fac2\n # Reduce time step after a sweep through all data.\n if pp.stoch<0:\n td=td+1\n # Show energy value.\n if (np.mod(it,pp.showing)==0):\n DD=0\n for c in range(numclass):\n Yc=2*(Y.T==c)-1\n DD+=np.sum(np.maximum(np.zeros(Ntot),pp.deltaP-np.dot(X,W[:,c])*Yc));\n PP=-.5*pp.stoch*np.sum(W*W)\n EE=DD+PP\n print('td ', td, 'it ', it, 'Number of syn changes ', up, ' at ', dup, ' Data term ', DD, 'Prior term ', PP, 'Total ', EE)\n # Nothing is changing - stop the algorithm.\n if up==0:\n break\n DD=0\n # Final energy.\n for c in range(numclass):\n DD+=np.sum(np.maximum(np.zeros(Ntot),pp.deltaP-np.dot(X,W[:,c])*(2*(Y.T==c)-1)));\n PP=-.5*pp.stoch*np.sum(W*W)\n EE=DD+PP\n print('td ', td, 'it ', it, 'Number of syn changes ', up, ' at ', dup, ' Data term ', DD, 'Prior term ', PP, 'Total ', EE)\n return W", "def compute_cost_matrix(self):\n\n if rank == 0:\n #do random sampling of a parameters\n if self.sampling == \"LHS\":\n lhs = Lhs(lhs_type=\"classic\", criterion=None)\n param_samples = lhs.generate(self.sample_space, self.niters)\n elif self.sampling == \"rsampling\":\n param_samples = self.sample_space.rvs(self.niters)\n elif self.sampling == \"Sobol\":\n sobol = Sobol()\n param_samples = sobol.generate(self.sample_space.dimensions, self.niters)\n \n # generate param samples split\n niters_rank0 = self.niters//size + self.niters % size\n niters_rank = self.niters//size\n count_scatter = [niters_rank0]\n count_scatter.extend((size-2)*[niters_rank])\n count_scatter = np.cumsum(count_scatter)\n\n param_samples_split = np.split(param_samples,count_scatter)\n else:\n param_samples_split = None\n \n #scatter parameter samples data\n param_samps = comm.scatter(param_samples_split,root=0)\n\n # initialize data\n param_samples_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n jac_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n qoi_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n\n \n\n # evaluate QoI at random sampling\n for sample in param_samps: \n qoi_sample, jac_sample = self.jac(sample).values()\n # store output\n for qoi_name in self.funcnames:\n if not (jac_sample[qoi_name] is None):\n param_samples_dict_rank[qoi_name].append(jac_sample[qoi_name])\n jac_dict_rank[qoi_name].append(jac_sample[qoi_name])\n qoi_dict_rank[qoi_name].append(qoi_sample[qoi_name])\n else:\n param_samples_diff_dict_rank[qoi_name].append(sample)\n\n # gather data\n param_samples = None\n param_samples_diff_int = None\n jac_dict = None\n qoi_dict= None\n\n param_samples_dict = comm.gather(param_samples_dict_rank, root=0)\n params_samples_diff_dict = comm.gather(param_samples_diff_dict_rank, root=0)\n jac_dict = comm.gather(jac_dict_rank, root=0)\n qoi_dict = comm.gather(qoi_dict_rank, root=0)\n\n # format gathered data\n if rank == 0:\n #flatten data\n param_samples_dict_flattened = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n jac_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n qoi_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n\n for cpurank in range(size):\n for qoi_name in self.funcnames:\n param_samples_dict_flattened[qoi_name].extend(param_samples_dict[cpurank][qoi_name]) \n param_samples_diff_dict_flattened[qoi_name].extend(params_samples_diff_dict[cpurank][qoi_name])\n jac_dict_flattened[qoi_name].extend(jac_dict[cpurank][qoi_name])\n qoi_dict_flattened[qoi_name].extend(qoi_dict[cpurank][qoi_name])\n\n #compute outer product\n jac_outer_dict = {qoi_name: [] for qoi_name in self.funcnames}\n nfuncs_dict = {qoi_name: 0 for qoi_name in self.funcnames}\n\n for qoi_name in self.funcnames:\n for i in range(len(jac_dict_flattened[qoi_name])):\n jac_sample = jac_dict_flattened[qoi_name][i]\n jac_outer_dict[qoi_name].append(np.outer(jac_sample,jac_sample))\n nfuncs_dict[qoi_name] += 1\n\n # compute cost matrix and norm convergence\n cost_matrix_dict = {}\n cost_matrix_cumul_dict = {}\n norm_convergence_dict = {}\n\n for qoi_name in self.funcnames:\n cost_cumsum = np.cumsum(jac_outer_dict[qoi_name],axis=0)/np.arange(1,nfuncs_dict[qoi_name]+1)[:,None,None]\n cost_matrix_cumul_dict[qoi_name] = cost_cumsum\n cost_matrix_dict[qoi_name] = cost_cumsum[-1,:,:]\n norm_convergence_dict[qoi_name] = np.linalg.norm(cost_cumsum,ord='fro',axis=(1,2))\n\n # compute variance matrix\n variance_matrix_dict = {}\n for qoi_name in self.funcnames:\n variance_mat = np.sum((jac_outer_dict[qoi_name]-cost_matrix_dict[qoi_name])**2/(nfuncs_dict[qoi_name]-1),axis=0) \n variance_matrix_dict[qoi_name] = variance_mat\n\n param_results = {\"PARAM_SAMPLES\": param_samples_dict_flattened,\n \"DIFFICULT_PARAM_SAMPLES\": param_samples_diff_dict_flattened}\n\n fun_results = {\"NUMBER_OF_FUNCTION_SUCCESS\": nfuncs_dict,\n \"NORM_OF_SEQ_OF_CUMUL_SUMS\": norm_convergence_dict,\n \"SEQ_OF_CUMUL_SUMS\": cost_matrix_cumul_dict, \n \"VARIANCE_OF_ENTRIES\": variance_matrix_dict,\n \"FINAL_COST_MATRIX\":cost_matrix_dict}\n\n return {'PARAMETER_RESULTS': param_results, 'FUNCTION_RESULTS': fun_results}", "def reassignedSmethod(fx,nh=2**7-1,tstep=2**4,nfbins=2**9,df=1.0,alpha=4,\r\n thresh=.01,L=5): \r\n \r\n# if type(fx) is list:\r\n# fx=np.array(fx)\r\n# try:\r\n# fn,fm=fx.shape\r\n# if fm>fn:\r\n# fm,fn=fx.shape\r\n# except ValueError:\r\n# fn=len(fx)\r\n# fm=1\r\n# if fm>1:\r\n# print 'computing cross spectra'\r\n# #compute the analytic signal of function f and dctrend\r\n# #fa=sps.hilbert(dctrend(fx[0]))\r\n# #fb=sps.hilbert(dctrend(fx[1]))\r\n# fa=fx[0]\r\n# fb=fx[1]\r\n# fa=fa.reshape(fn)\r\n# fb=fb.reshape(fn)\r\n# else:\r\n# fa=fx\r\n# fa=fa.reshape(fn)\r\n# fb=fa.copy()\r\n\r\n \r\n nx=len(fx) \r\n \r\n #compute gaussian window\r\n h=gausswin(nh,alpha=alpha)\r\n #h=np.hanning(nh)\r\n lh=(nh-1)/2\r\n \r\n #compute ramp window\r\n th=h*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n #compute derivative of window\r\n dh=dwindow(h)\r\n \r\n #make a time list of indexes\r\n tlst=np.arange(start=0,stop=nx,step=tstep)\r\n nt=len(tlst)\r\n \r\n #make frequency list for plotting\r\n flst=np.fft.fftfreq(nfbins,1./df)[:nfbins/2]\r\n \r\n #initialize some time-frequency arrays\r\n tfh=np.zeros((nfbins,nt),dtype='complex128')\r\n tfth=np.zeros((nfbins,nt),dtype='complex128')\r\n tfdh=np.zeros((nfbins,nt),dtype='complex128')\r\n \r\n #compute components for reassignment\r\n for ii,tt in enumerate(tlst):\r\n #create a time shift list\r\n tau=np.arange(start=-min([np.round(nx/2.),lh,tt-1]),\r\n stop=min([np.round(nx/2.),lh,nx-tt-1])+1)\r\n #compute the frequency spots to be calculated\r\n ff=np.remainder(nfbins+tau,nfbins)\r\n #make lists of data points for each window calculation\r\n xlst=tt+tau\r\n hlst=lh+tau\r\n normh=np.sqrt(np.sum(abs(h[hlst])**2))\r\n tfh[ff,ii]=fx[xlst]*h[hlst].conj()/normh\r\n tfth[ff,ii]=fx[xlst]*th[hlst].conj()/normh\r\n tfdh[ff,ii]=fx[xlst]*dh[hlst].conj()/normh\r\n \r\n #compute Fourier Transform\r\n spech=np.fft.fft(tfh,axis=0)\r\n specth=np.fft.fft(tfth,axis=0)\r\n specdh=np.fft.fft(tfdh,axis=0)\r\n \r\n #get only positive frequencies\r\n spech=spech[nfbins/2:,:]\r\n specth=specth[nfbins/2:,:]\r\n specdh=specdh[nfbins/2:,:]\r\n \r\n #check to make sure no spurious zeros floating around\r\n szf=np.where(abs(spech)<1.E-6)\r\n spech[szf]=0.0+0.0j\r\n zerofind=np.nonzero(abs(spech))\r\n twspec=np.zeros((nfbins/2,nt),dtype='float')\r\n dwspec=np.zeros((nfbins/2,nt),dtype='float')\r\n twspec[zerofind]=np.round(np.real(specth[zerofind]/spech[zerofind]))\r\n dwspec[zerofind]=np.round(np.imag((nfbins/2.)*specdh[zerofind]/\r\n spech[zerofind])/(np.pi))\r\n \r\n #get shape of spectrogram\r\n nf,nt=spech.shape\r\n \r\n #-----calculate s-method-----\r\n Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')\r\n\r\n #make and empty array of zeros\r\n sm=np.zeros_like(spech)\r\n \r\n #put values where L cannot be value of L, near top and bottom\r\n sm[0:L/2,:]=abs(spech[0:L/2,:])**2\r\n sm[-L/2:,:]=abs(spech[-L/2:,:])**2\r\n\r\n #calculate s-method\r\n for ff in range(L/2,nf-L/2-1):\r\n sm[ff,:]=2*np.real(np.sum(spech[ff+Llst,:]*spech[ff-Llst,:].conj(),\r\n axis=0))/L\r\n \r\n #------compute reassignment----- \r\n\r\n \r\n rtfarray=np.zeros((nfbins/2,nt))\r\n \r\n threshold=thresh*np.max(abs(sm))\r\n \r\n for nn in range(nt):\r\n for kk in range(nf):\r\n if abs(spech[kk,nn])>threshold:\r\n #get center of gravity index in time direction from spectrogram \r\n nhat=int(nn+twspec[kk,nn])\r\n nhat=int(min([max([nhat,1]),nt-1]))\r\n #get center of gravity index in frequency direction from spec\r\n khat=int(kk-dwspec[kk,nn])\r\n khat=int(np.remainder(np.remainder(khat-1,nfbins/2)+nfbins/2,\r\n nfbins/2))\r\n rtfarray[khat,nhat]=rtfarray[khat,nhat]+abs(sm[kk,nn])\r\n else:\r\n rtfarray[kk,nn]=rtfarray[kk,nn]+sm[kk,nn]\r\n\r\n #place values where L cannot be L \r\n rtfarray[:L/2,:]=abs(sm[:L/2,:])\r\n rtfarray[-L/2:,:]=abs(sm[-L/2:,:])\r\n \r\n tz=np.where(rtfarray==0)\r\n rtfarray[tz]=1.0\r\n \r\n tz=np.where(sm==0.0)\r\n sm[tz]=1.0 \r\n \r\n #scale\r\n rtfarray=abs(rtfarray)\r\n \r\n return rtfarray,tlst,flst,sm", "def _matrix_store_smooth_downhill(self):\n \n import time\n from scipy import sparse as sparse\n from scipy.sparse import linalg as linalgs \n \n\n t = time.clock()\n\n\n size = 0\n for nl in self.neighbour_array_lo_hi:\n size += 3 # len(nl)\n\n row_array = np.empty(size, dtype = int)\n col_array = np.empty(size, dtype = int)\n slope_array = np.zeros(size)\n local_slope_array = np.zeros(64)\n\n\n idx=0 \n for row in range(0, len(self.neighbour_array_lo_hi)): \n neighbours = self.neighbour_array_lo_hi[row] \n npoints = self.tri.points[neighbours]\n\n ## work out (downhill) gradient to (max of three) nearby neighbours\n \n\n for col, column in enumerate(neighbours[0:3]): \n \n delta_h = self.height[column] - self.height[row] \n\n\n if delta_h < 0.0:\n delta_s2 = (self.x[column] - self.x[row])**2 + (self.y[column] - self.y[row])**2\n local_slope_array[col] = ( delta_h**2 / delta_s2 )**5\n\n elif delta_h == 0.0 and self.bmask[row] == False:\n local_slope_array[col] = 1.0e-20\n\n else:\n local_slope_array[col] = 1.0e-20 \n \n # Normalise this so that it conserves mass (note - low points will have no contributions here !) \n \n norm = local_slope_array[0:len(neighbours)].sum()\n if norm != 0.0:\n norm = 1.0 / norm\n\n for col, column in enumerate(neighbours[0:3]): \n row_array[idx] = row\n col_array[idx] = column \n slope_array[idx] = local_slope_array[col] * norm\n\n idx += 1\n\n # We can re-pack this array into a sparse matrix for v. fast computation of downhill operator \n\n slopeCOO = sparse.coo_matrix( (slope_array, (row_array, col_array)) ).T\n slopeMat = slopeCOO.tocsr() \n \n print \"SlopeMat.shape \", slopeMat.shape, size\n\n # slopeNormVec = np.array(slopeMat.sum(axis=1)).T[0]\n # slopeNormVec[slopeNormVec != 0.0] = 1.0 / slopeNormVec[slopeNormVec != 0.0]\n # slopeNormMat = sparse.eye(self.tri.npoints)\n # slopeNormMat.setdiag(slopeNormVec)\n # slopeMat = slopeNormMat.dot(slopeMat)\n\n slopeMat.eliminate_zeros()\n self.smoothDownhillMat = slopeMat\n\n return", "def cp_apr(X, Y1, R, Minit=None, tol=1e-4, maxiters=1000, maxinner=50,\n epsilon=1e-10, kappatol=1e-10, kappa=1e-2):\n N = X.ndims()\n \n ## Random initialization\n if Minit == None:\n F = tensorTools.randomInit(X.shape, R)\n Minit = ktensor.ktensor(np.ones(R), F);\n nInnerIters = np.zeros(maxiters);\n\n ## Initialize M and Phi for iterations\n M = Minit\n M.normalize(1)\n Phi = [[] for i in range(N)]\n kktModeViolations = np.zeros(N)\n kktViolations = -np.ones(maxiters)\n nViolations = np.zeros(maxiters)\n\n lambda2=0.1\n lambda3=0.1\n sita=np.random.rand(R+1,1);\n ## statistics\n cpStats = np.zeros(7)\n '''\n print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'\n print M.U[0][1,:]\n print M.U[0].shape\n print Demog[1]\n print DemoU[1]\n print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'\n '''\n for iteration in range(maxiters):\n startIter = time.time()\n isConverged = True;\n for n in range(N):\n startMode = time.time()\n ## Make adjustments to M[n] entries that violate complementary slackness\n if iteration > 0:\n V = np.logical_and(Phi[n] > 1, M.U[n] < kappatol)\n if np.count_nonzero(V) > 0:\n nViolations[iteration] = nViolations[iteration] + 1\n #print 'V:',V.shape,V.dtype\n #print 'M.U[n]',M.U[n].shape,M.U[n].dtype\n M.U[n][V > 0] = M.U[n][V > 0] + kappa\n if n==0:\n sita=__solveLinear(M.U[n],Y1,lambda3)\n # lr=LogisticRegression()\n #sita=lr.fit(M.U[n],Y1).coef_\n #print 'sita'\n #print sita\n #print 'demoU'\n #print DemoU[0]\n M, Phi[n], inner, kktModeViolations[n], isConverged = __solveSubproblem1(X, M, n, maxinner, isConverged, epsilon, tol,sita,Y1, lambda2)\n else:\n M, Phi[n], inner, kktModeViolations[n], isConverged = __solveSubproblem0(X, M, n, maxinner, isConverged, epsilon, tol)\n elapsed = time.time() - startMode\n # only write the outer iterations for now\n #cpStats = np.vstack((cpStats, np.array([iteration, n, inner, tensorTools.lsqrFit(X,M), tensorTools.loglikelihood(X,[M]), kktModeViolations[n], elapsed])))\n\n kktViolations[iteration] = np.max(kktModeViolations)\n elapsed = time.time()-startIter\n #cpStats = np.vstack((cpStats, np.array([iter, -1, -1, kktViolations[iter], __loglikelihood(X,M), elapsed])))\n print(\"Iteration {0}: Inner Its={1} with KKT violation={2}, nViolations={3}, and elapsed time={4}\".format(iteration, nInnerIters[iteration], kktViolations[iteration], nViolations[iteration], elapsed))\n if isConverged:\n break\n\n cpStats = np.delete(cpStats, (0), axis=0) # delete the first row which was superfluous\n ### Print the statistics\n #fit = tensorTools.lsqrFit(X,M)\n #ll = tensorTools.loglikelihood(X,[M])\n print(\"Number of iterations = {0}\".format(iteration))\n #print(\"Final least squares fit = {0}\".format(fit))\n #print(\"Final log-likelihood = {0}\".format(ll))\n print(\"Final KKT Violation = {0}\".format(kktViolations[iteration]))\n print(\"Total inner iterations = {0}\".format(np.sum(nInnerIters)))\n \n #modelStats = {\"Iters\" : iter, \"LS\" : fit, \"LL\" : ll, \"KKT\" : kktViolations[iteration]}\n return M, cpStats", "def determine_analytic_solution(self):\n\n self._Janalytic = np.where(self.xr <= self.xint, self.S, 0.5 * self.S)\n self._Hanalytic = np.where(self.xr <= self.xint, 0, 0.25 * self.S)\n self._Kanalytic = np.where(self.xr <= self.xint, 1./3. * self.S,\n 1./6. * self.S)", "def calc_F1_cv(SOM_data_cv, GTD_cv, best_cluster_set, persis_thresh, nodes_arr, seas):\r\n blocked_days = GTD_cv.values\r\n SOM_data_node_list_cv = [(SOM_data_cv[:,:,i]*nodes_arr).sum(axis=0).sum(axis=0) for i in range(SOM_data_cv.shape[2])]\r\n SOM_nodenum_cv = xr.concat(SOM_data_node_list_cv, dim = \"node_num\") \r\n \r\n blocked_days_clus = calc_blocked_days_clus(blocked_days, persis_thresh, SOM_nodenum_cv, best_cluster_set)\r\n blocked_days_clus_xr = xr.DataArray(blocked_days_clus, name = \"blocking\", dims = {\"time\": GTD_cv['time']})\r\n blocked_days_clus_xr['time'] = GTD_cv['time']\r\n blocked_days_clus_sel = blocked_days_clus_xr.sel(time = np.isin(blocked_days_clus_xr['time.season'], seas))\r\n GTD_cv_seas = GTD_cv.sel(time = np.isin(blocked_days_clus_xr['time.season'], seas))\r\n prec, recall, F1 = calc_pr_rc_F1(GTD_cv_seas, blocked_days_clus_sel) \r\n if F1 == np.nan:\r\n F1 = 0\r\n return F1, prec, recall", "def get_score_matrix(self) -> int:", "def GramSchmidt(A):\r\n n = len(A)\r\n # Finds the number of lists in the list, which is also the number of rows\r\n m = len(A[0])\r\n # Finds the number of elements in list one, which is also the number of columns\r\n V = A\r\n R = [[0]*n for i in range(n)]\r\n # creates an empty list R with dimensions of n rows and n columns\r\n Q = [[0]*m for i in range(n)]\r\n # creates an empty list Q with dimensions of n rows and m columns\r\n inputStatus = True\r\n # inputStatus is true at this point until proven otherwise\r\n for i in range(n):\r\n for j in range(m):\r\n if ((type(A[i][j]) != int) and (type(A[i][j]) != float) and (type(A[i][j]) != complex)):\r\n inputStatus = False\r\n print(\"Invalid Input\")\r\n # this checks each value in the matrix A to make sure it is some time of number, if it isnt a number then the input status will be false \r\n # if the input status is false then an error message will be displayed stating that this is an invalid input\r\n if inputStatus == True:\r\n # if the given list does not fall under the previous if statement then the input status will continue to be true and we can continue to find the QR factorization \r\n for i in range(n):\r\n # for loop which continues as long as there are still lists in A \r\n R[i][i] = norm(V[i])\r\n # Creates the border for the upper triangle matrix R, where each value in the diagonal is the 2 norm of the corresponding vector in the original matrix A \r\n Q[i] = unit(V[i])\r\n # Each vector in Q is the unit vector of the corresponding vector in A \r\n for j in range(i+1,n):\r\n # the position j will be 1 more than the position i \r\n R[j][i] = dot(Q[i],V[j])\r\n # The element in R[i+1][i] is the dot product of Q[i] and V[i+1] \r\n temp = scalarmul(R[j][i],Q[i])\r\n # This is the scalar multiplication of R[i+1][i] and Q[i] which will be labeled as temp \r\n V[j] = subtract(V[j],temp)\r\n # V[j] is the difference between the original V[j] and temp \r\n return[Q,R]", "def __init__(self, M, rat):\n self.M = M\n xc0, _ = np.polynomial.chebyshev.chebgauss(M-0)\n xc1, _ = np.polynomial.chebyshev.chebgauss(M-1)\n xc2, _ = np.polynomial.chebyshev.chebgauss(M-2)\n # vandermonde and inverse vandermonde matrices\n self.V0 = np.polynomial.chebyshev.chebvander(xc0, M-1)\n self.V1 = np.polynomial.chebyshev.chebvander(xc1, M-2)\n self.V2 = np.polynomial.chebyshev.chebvander(xc2, M-3)\n self.VI0 = np.linalg.inv(self.V0)\n self.VI1 = np.linalg.inv(self.V1)\n self.VI2 = np.linalg.inv(self.V2)\n # differentiation matrices\n DC01 = np.polynomial.chebyshev.chebder(np.eye(M-0)) / rat\n DC12 = np.polynomial.chebyshev.chebder(np.eye(M-1)) / rat\n DC00 = np.row_stack([DC01, np.zeros(M)])\n self.D00 = self.V0.dot(DC00.dot(self.VI0))\n self.D01 = self.V1.dot(DC01.dot(self.VI0))\n self.D12 = self.V2.dot(DC12.dot(self.VI1))\n # boundary condition operators\n self.ibc_dirichlet = np.polynomial.chebyshev.chebvander(1, M-1).dot(self.VI0)\n self.obc_dirichlet = np.polynomial.chebyshev.chebvander(-1, M-1).dot(self.VI0)\n self.ibc_neumann = self.ibc_dirichlet.dot(self.D00)\n self.obc_neumann = self.obc_dirichlet.dot(self.D00)\n # rank reduction operators\n temp = np.zeros([M-1, M-0], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R01 = self.V1.dot(temp.dot(self.VI0))\n temp = np.zeros([M-2, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R12 = self.V2.dot(temp.dot(self.VI1))\n self.R02 = self.R12.dot(self.R01)\n # get poof operator from M-1 --> M\n temp = np.zeros([M, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.P10 = self.V0.dot(temp.dot(self.VI1))", "def solve(self):", "def kkt_check_redund(c, A, x, basis, i, tol=1e-8, threshold=1e-3, max_iter=100000, verbose=True):\n improvement = False\n init_actives = [i]\n ab = np.arange(A.shape[0])\n a = np.arange(A.shape[1])\n\n maxupdate = 10\n B = BGLU(A, basis, maxupdate, False)\n iteration = 0\n while True:\n bl = np.zeros(len(a), dtype=bool)\n bl[basis] = 1\n xb = x[basis]\n\n try:\n l = B.solve(c[basis], transposed=True) # similar to v = linalg.solve(B.T, c[basis])\n except LinAlgError:\n np.set_printoptions(threshold=np.inf)\n mp_print('This matrix seems to be singular:', PRINT_IF_RANK_NONZERO=True)\n mp_print(B.B, PRINT_IF_RANK_NONZERO=True)\n mp_print('Iteration:' + str(iteration), PRINT_IF_RANK_NONZERO=True)\n mp_print('u:', PRINT_IF_RANK_NONZERO=True)\n mp_print(u, PRINT_IF_RANK_NONZERO=True)\n print(\"LinAlgError in B.solve\")\n np.set_printoptions(threshold=1000)\n return True, 1\n\n sn = c - l.dot(A) # reduced cost\n sn = sn[~bl]\n\n if np.all(sn >= -tol): # in this case x is an optimal solution\n return True, 0\n\n entering = a[~bl][np.argmin(sn)]\n u = B.solve(A[:, entering])\n\n i = u > tol # if none of the u are positive, unbounded\n if not np.any(i):\n mp_print(\"Warning: unbounded problem in KKT_check\")\n return True, 0\n\n th = xb[i] / u[i]\n l = np.argmin(th) # implicitly selects smallest subscript\n if basis[i][l] in init_actives: # if either plus or minus leaves basis, LP has made significant improvement\n improvement = True\n\n step_size = th[l] # step size\n\n # Do pivot\n x[basis] = x[basis] - step_size * u\n x[entering] = step_size\n x[abs(x) < 10e-20] = 0\n B.update(ab[i][l], entering) # modify basis\n basis = B.b\n\n # if np.dot(c, x) < -threshold: # found a better solution, so not adjacent\n if improvement:\n if not np.dot(c, x) < -threshold:\n mp_print('Original way of finding non-adjacents does not say these are non-adjacent', True)\n # if verbose:\n # mp_print(\"Did %d steps in kkt_check, found False - c*x %.8f\" % (iteration, np.dot(c, x)))\n return False, 0\n\n iteration += 1\n if iteration % 10000 == 0:\n print(\"Warning: reached %d iterations\" % iteration)\n if iteration % max_iter == 0:\n mp_print(\"Cycling? Starting again with new perturbation.\")\n return True, 2\n\n return True, 1", "def rawsolve(self,):\n m = self.m\n n = self.n\n z = self.z\n mark = self.mark\n kAAt = self.kAAt\n iAAt = self.iAAt\n AAt = self.AAt\n diag = self.diag\n consistent = True\n eps = 0.0\n m2 = m+n\n\n if self.ndep:\n eps = self.epssol * np.abs(z).max()\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- L z |\n #| */\n\n for i in range(m2):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n row = iAAt[k]\n z[row] -= AAt[k]*beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- D z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n z[i] = z[i]/diag[i]\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| t -1 |\n #| z <- (L ) z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n beta -= AAt[k]*z[iAAt[k]]\n z[i] = beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n return consistent", "def _fit_ridge_alpha(trn_fs,trn_data,val_fs,val_data,alphas=DEFAULT_ALPHAS,\n chunk_sz=5000,is_efficient=True,dtype=np.single, is_verbose=False, pthr=0.005,\n square_alpha=False,return_resids=False): \n n_tps,n_voxels = trn_data.shape\n n_chunks = np.ceil(n_voxels/np.float(chunk_sz)).astype(np.int32)\n cc = np.zeros((n_voxels,len(alphas)),dtype=dtype)\n if return_resids:\n resids = np.zeros((n_tps,n_voxels,len(alphas)),dtype=dtype)\n pred_A = []\n if is_efficient:\n # Efficient Ridge regression from A. Huth, Part (1):\n # Full multiplication for validation (here, random split of\n # training data) prediction is: \n # pred = (Xval*Vx) * Dx * (pinv(Ux)*Ychunk) # NOTE: pinv(Ux) = Ux'\n # We will pre-compute the first and third terms in parentheses:\n # pred = XvalVx * Dx * UxYchunk\n if is_verbose: \n print('->Doing SVD of stimulus design matrix')\n t0 = time.time()\n #time.sleep(.01); # To ensure printing?\n m,n = trn_fs.shape\n if m>n:\n Ux,Sx,Vx = _utils._svd(trn_fs,full_matrices=False)\n else:\n Vx,Sx,Ux = _utils._svd(trn_fs.T,full_matrices=False)\n # Switcheroo of Vx and Ux due to transpose of input matrix\n Ux = Ux.T\n Vx = Vx.T\n\n if is_verbose:\n t1 = time.time()\n print('->Done with SVD in %0.2f sec'%(t0-t1))\n # For more efficient computation:\n #k = len(Sx) \n ## OR: \n ## singcutoff = (XX);\n ## k = sum(sx > singcutoff);\n ## sx = sx(1:k);\n XvalVx = val_fs.dot(Vx.T) # NOTE: IN MATLAB, No Vx', because Matlab leaves V in transposed form!\n else:\n raise NotImplementedError(\"Sorry, not done yet!\")\n\n for iChunk in range(n_chunks):\n print('Running chunk %d of %d...\\n'%(iChunk+1,n_chunks))\n ChIdx = np.arange(chunk_sz) + chunk_sz*iChunk\n ChIdx = ChIdx[ChIdx<n_voxels] # clip extra voxels in last run.\n Ychunk = trn_data[:,ChIdx]\n\n # Fit model with all lambdas (for subset of voxels)\n if not is_efficient:\n raise Exception('LAME! no slow reliable ridge implemented.')\n #[Wt L] = ridgemulti(X,Ychunk,params.lambdas);\n else:\n # Efficient Ridge regression from A. Huth, part (2)\n # NOTE: weights are never explicitly computed!\n UxYchunk = Ux.T.dot(Ychunk)\n \n if is_verbose:\n print('Checking model predictions...')\n for iA,A in enumerate(alphas):\n if not is_efficient:\n pred = np.cast(np.single)[Xval.dot(Wt[:,:,iA])]\n else:\n # Efficient Ridge regression from A. Huth, part (3)\n # Normalize lambda by Frobenius norm for stim matrix\n aX = A # * norm(X,'fro'); # ... or not\n # Need to decide for final whether aX**2 or not\n if square_alpha:\n Dx = Sx/(Sx**2 + aX**2) \n else:\n Dx = Sx/(Sx**2 + aX) \n # Compute predicitons (XvalVx and UxYchunk computed above)\n # (mult diag is slightly faster than matrix multiplication in timing tests)\n pred = _utils.mult_diag(Dx, XvalVx, left=False).dot(UxYchunk) \n # Compute prediction accuracy (correlations)\n cc[ChIdx,iA]=_sutils.column_corr(pred,val_data[:,ChIdx])\n if return_resids:\n resids[:,ChIdx,iA] = val_data[:,ChIdx]-pred\n if return_resids:\n return cc,resids\n else:\n return cc", "def test_SIS():\r\n def is_symmetric_mode(beta, k0, g, a_over_d, h):\r\n \"\"\"\r\n Eq (7i) of paper\r\n beta is what I call kx\r\n k0 is vacuum angular wavenumber\r\n g is thickness of air layer\r\n h is thickness of corrugated layer\r\n a_over_d is the fraction of corrugated layer which is air\r\n \"\"\"\r\n lhs = ((cmath.sqrt(beta**2 - k0**2) / k0)\r\n * cmath.tanh(g/2 * cmath.sqrt(beta**2 - k0**2)))\r\n rhs = a_over_d * cmath.tan(k0 * h)\r\n return floats_are_equal(lhs, rhs, tol=1e-4)\r\n\r\n def is_antisymmetric_mode(beta, k0, g, a_over_d, h):\r\n \"\"\"\r\n Eq (7ii) of paper\r\n \"\"\"\r\n lhs = ((cmath.sqrt(beta**2 - k0**2) / k0)\r\n / cmath.tanh(g/2 * cmath.sqrt(beta**2 - k0**2)))\r\n rhs = a_over_d * cmath.tan(k0 * h)\r\n return floats_are_equal(lhs, rhs, tol=1e-4)\r\n # Choose some parameters (can be anything, these are from Fig. 3 caption)\r\n w = 2 * pi * (4 * nu.THz)\r\n h = 50 * nu.um\r\n g = 50 * nu.um\r\n a_over_d = 0.1\r\n \r\n # Now run analysis\r\n k0 = w / nu.c0\r\n d_over_a = a_over_d**-1\r\n # epsilon of a PEC (perfect electric conductor) is -infinity, but code\r\n # doesn't allow that. Use big value instead...\r\n PEC_eps = -1e11\r\n params = {'d_list': [inf, h, g, h, inf],\r\n 'ex_list': [PEC_eps, d_over_a, 1, d_over_a, PEC_eps],\r\n 'ez_list': [PEC_eps, PEC_eps, 1, PEC_eps, PEC_eps],\r\n 'mu_list': [1, a_over_d, 1, a_over_d, 1],\r\n 'w': w}\r\n \r\n kx_list = find_kx(params, grid_points=30, iterations=11, reduction_factor=14,\r\n plot_full_region=True,\r\n search_domain=[-1e5 * nu.m**-1, 1e5 * nu.m**-1, 0, 1e5 * nu.m**-1])\r\n \r\n print('kx_list -- ' + str(len(kx_list)) + ' entries...')\r\n print(['(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n # Here, I'm only interested in solutions on the positive real axis\r\n kx_list = [kx for kx in kx_list if abs(kx.real) > 1e5 * abs(kx.imag)]\r\n kx_list = [-kx if kx.real < 0 else kx for kx in kx_list]\r\n # Delete repeats with tolerance 1e-4\r\n kx_list_norepeat = []\r\n for kx in kx_list:\r\n if not any(floats_are_equal(kx, kx2, tol=1e-4) for kx2 in kx_list_norepeat):\r\n kx_list_norepeat.append(kx)\r\n kx_list = kx_list_norepeat\r\n print('kx_list (cleaned up) -- ' + str(len(kx_list)) + ' entries...')\r\n print(['(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n found_sym_mode = False\r\n for kx in kx_list:\r\n if is_symmetric_mode(kx, k0, g, a_over_d, h):\r\n found_sym_mode = True\r\n print('Found symmetric mode! ',\r\n '(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1))\r\n params2 = deepcopy(params)\r\n params2['kx'] = kx\r\n params2 = find_all_params_from_kx(params2)\r\n if check_mode(params2) is not True:\r\n print('Not a real mode? ... Error code:')\r\n print(check_mode(params2))\r\n else:\r\n plot_mode(params2)\r\n assert found_sym_mode\r\n found_anti_mode = False\r\n for kx in kx_list:\r\n if is_antisymmetric_mode(kx, k0, g, a_over_d, h):\r\n found_anti_mode = True\r\n print('Found antisymmetric mode! ',\r\n '(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1))\r\n params2 = deepcopy(params)\r\n params2['kx'] = kx\r\n params2 = find_all_params_from_kx(params2)\r\n if check_mode(params2) is not True:\r\n print('Not a real mode? ... Error code:')\r\n print(check_mode(params2))\r\n else:\r\n plot_mode(params2)\r\n assert found_anti_mode\r\n \r\n print('Congratulations, the solver found the correct kx for both the')\r\n print('symmetric and antisymmetric mode of the structure, consistent')\r\n print('with the analytical formula in the literature.')", "def find_optimal_low_rank_matrix( self, orig_similarity_matrix, orig_rank, u, s, v, singular_reduction ):\n '''rank_list = list()\n sum_singular_values = list()\n for rank in range( 0, orig_rank ):\n compute_result = self.compute_low_rank_matrix( u, s, v, rank + 1 )\n rank_list.append( ( rank + 1 ) / float( orig_rank ) )\n sum_singular_values.append( compute_result[ 1 ] )\n utils._plot_singular_values_rank( rank_list, sum_singular_values )'''\n return self.compute_low_rank_matrix( u, s, v, int( singular_reduction * orig_rank ) )", "def _rdm12_lowfilling(self, bradata: Optional['FqeData'] = None\n ) -> Tuple['Nparray', 'Nparray']:\n norb = self.norb()\n nalpha = self.nalpha()\n nbeta = self.nbeta()\n lena = self.lena()\n lenb = self.lenb()\n nlt = norb * (norb + 1) // 2\n\n outpack = numpy.zeros((nlt, nlt), dtype=self.coeff.dtype)\n outunpack = numpy.zeros((norb, norb, norb, norb),\n dtype=self.coeff.dtype)\n if nalpha - 2 >= 0:\n alpha_map, _ = self._core.find_mapping(-2, 0)\n alpha_array = self._to_array1(alpha_map, norb)\n\n def compute_intermediate0(coeff):\n tmp = numpy.zeros((nlt, int(binom(norb, nalpha - 2)), lenb),\n dtype=self.coeff.dtype)\n _apply_array12_lowfillingaa(self.coeff, alpha_array, tmp)\n return tmp\n\n inter = compute_intermediate0(self.coeff)\n inter2 = inter if bradata is None else compute_intermediate0(\n bradata.coeff)\n outpack += numpy.tensordot(inter2.conj(),\n inter,\n axes=((1, 2), (1, 2)))\n\n if self.nalpha() - 1 >= 0 and self.nbeta() - 1 >= 0:\n alpha_map, beta_map = self._core.find_mapping(-1, -1)\n inter = numpy.zeros((norb, norb, int(binom(\n norb, nalpha - 1)), int(binom(norb, nbeta - 1))),\n dtype=self._dtype)\n\n alpha_array = self._to_array2(alpha_map, norb)\n beta_array = self._to_array2(beta_map, norb)\n\n alpha_map, beta_map = self._core.find_mapping(-1, -1)\n _apply_array12_lowfillingab(self.coeff, alpha_array, beta_array,\n nalpha, nbeta, inter)\n\n if bradata is None:\n inter2 = inter\n else:\n inter2 = numpy.zeros((norb, norb, int(binom(\n norb, nalpha - 1)), int(binom(norb, nbeta - 1))),\n dtype=self._dtype)\n _apply_array12_lowfillingab(bradata.coeff, alpha_array, beta_array, \\\n nalpha, nbeta, inter2)\n\n # 0.25 needed since _apply_array12_lowfillingab adds a factor 2\n outunpack += numpy.tensordot(\n inter2.conj(), inter, axes=((2, 3), (2, 3))) * 0.25\n\n if self.nbeta() - 2 >= 0:\n _, beta_map = self._core.find_mapping(0, -2)\n beta_array = self._to_array1(beta_map, norb)\n\n def compute_intermediate2(coeff):\n tmp = numpy.zeros((nlt, lena, int(binom(norb, nbeta - 2))),\n dtype=self.coeff.dtype)\n _apply_array12_lowfillingaa(self.coeff,\n beta_array,\n tmp,\n alpha=False)\n\n return tmp\n\n inter = compute_intermediate2(self.coeff)\n inter2 = inter if bradata is None else compute_intermediate2(\n bradata.coeff)\n outpack += numpy.tensordot(inter2.conj(),\n inter,\n axes=((1, 2), (1, 2)))\n\n out = numpy.zeros_like(outunpack)\n for i in range(norb):\n for j in range(norb):\n ij = min(i, j) + max(i, j) * (max(i, j) + 1) // 2\n parityij = 1.0 if i < j else -1.0\n for k in range(norb):\n for l in range(norb):\n parity = parityij * (1.0 if k < l else -1.0)\n out[i, j, k,\n l] -= outunpack[i, j, k, l] + outunpack[j, i, l, k]\n mnkl, mxkl = min(k, l), max(k, l)\n work = outpack[ij, mnkl + mxkl * (mxkl + 1) // 2]\n out[i, j, k, l] -= work * parity\n\n return self.rdm1(bradata)[0], out", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def SwissRollWithConstrain(nei = [5,25,50]):\n n_samples = 4000\n n_neighbor = 60\n noise = 0\n X, _ = make_swiss_roll(n_samples, noise=noise, random_state=42)\n X = X*2 #scaling ths Swiss\n\n neigh = NearestNeighbors(n_neighbors=n_neighbor).fit(X)\n _, indxes = neigh.kneighbors(X)\n\n SwissConstrain = np.delete(X,indxes[1500,:], axis=0)\n SwissConstrainNoisy = SwissConstrain + np.random.normal(0,1,[n_samples-n_neighbor,3])\n\n elevation = 10\n azimoth = 60\n fig = plt.figure(figsize=(21,7))\n ax1 = fig.add_subplot(131, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(X[:, 0], X[:, 1], X[:, 2], c=np.linalg.norm((X[:, 0], X[:, 1]), axis=0))\n ax1.set_title('Swiss Roll')\n ax1.view_init(elev=elevation, azim=azimoth)\n ax1 = fig.add_subplot(132, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(SwissConstrain[:, 0], SwissConstrain[:, 1], SwissConstrain[:, 2],\n c=np.linalg.norm((SwissConstrain[:, 0], SwissConstrain[:, 1]), axis=0))\n ax1.set_title('Swiss Roll with constrain')\n ax1.view_init(elev=elevation, azim=azimoth)\n ax1 = fig.add_subplot(133, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(SwissConstrainNoisy[:, 0], SwissConstrainNoisy[:, 1], SwissConstrainNoisy[:, 2],\n c=np.linalg.norm((SwissConstrainNoisy[:, 0], SwissConstrainNoisy[:, 1]), axis=0))\n ax1.set_title('Noisy Swiss Roll with constrain')\n ax1.view_init(elev=elevation, azim=azimoth)\n plt.savefig('Swiss Roll with different petubations')\n\n DataToPlot = [X,SwissConstrain,SwissConstrainNoisy]\n DataName = ['Swiss ISOMAP','Swiss with constrain ISOMAP', 'Swiss with constrain and noise ISOMAP']\n\n # Ploting Swiss Isomapping\n for neighbors in nei:\n fig = plt.figure(figsize=(30, 10))\n for i, j in enumerate(DataToPlot):\n Swiss_isomap = Isomap(j, 2, neighbors)\n method = DataName[i]\n ax = fig.add_subplot(1, len(DataToPlot), i + 1)\n ax.scatter(Swiss_isomap[:, 0], Swiss_isomap[:, 1],\n c=np.linalg.norm((Swiss_isomap[:, 0], Swiss_isomap[:, 1]), axis=0), cmap=plt.cm.Spectral)\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Swiss_isomap, pallete=Swiss_isomap[:, 0:1], neighbors=neighbors, method=method) #An option to plot single graphs\n plt.savefig('Swiss ISOMAP embbeding for {} neighbour'.format(neighbors))\n\n DataName = ['Swiss LLE', 'Swiss with constrain LLE', 'Swiss with constrain and noise LLE']\n # Ploting Swiss LLE\n for neighbors in nei:\n fig = plt.figure(figsize=(30, 10))\n for i, j in enumerate(DataToPlot):\n Swiss_LLE = LLE(j, 2, neighbors)\n method = DataName[i]\n ax = fig.add_subplot(1, len(DataToPlot), i + 1)\n ax.scatter(Swiss_LLE[:, 0], Swiss_LLE[:, 1],\n c=np.linalg.norm((Swiss_LLE[:, 0], Swiss_LLE[:, 1]), axis=0), cmap=plt.cm.Spectral)\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Swiss_LLE, pallete=Swiss_LLE[:, 0:1], neighbors=neighbors, method=method) #An option to plot single graphs\n plt.savefig('Swiss LLE embbeding for {} neighbour'.format(neighbors))\n return", "def perf_adaptive(R2_true,converted_adaptive_file,epsilon):\n assert(len(converted_adaptive_file)==2)\n pop = converted_adaptive_file[1][0]\n region = converted_adaptive_file[1][1]\n eps_adapt = converted_adaptive_file[1][2]\n n_adapt = converted_adaptive_file[1][3]\n blen_adapt = converted_adaptive_file[1][4]\n assert((epsilon > 0) & (epsilon <= 1.0))\n n = R2_true.shape[0]\n mat = converted_adaptive_file[0][:n,:n]\n m = mat.shape[0]\n assert(n==m)\n R2_x = np.tril(R2_true,k=-1)\n nonzero_r2_true = R2_x[R2_x > 0.0]\n nonzero_r2_adaptive = mat[mat > 0.0]\n corrcoef = corrcoef_PxP(R2_true,mat)\n return (pop,region,eps_adapt,n_adapt,blen_adapt,epsilon,np.sum(nonzero_r2_adaptive > epsilon),np.sum(nonzero_r2_true > epsilon),\n np.count_nonzero(nonzero_r2_adaptive),np.count_nonzero(nonzero_r2_true),corrcoef)", "def find_results(data,weight_matrix,params):\r\n \r\n data = data.astype(np.float32)\r\n weight_matrix = weight_matrix.astype(np.float32)\r\n \r\n rank = params['rank']\r\n lamb = params['lambda']\r\n lr = params['lr']\r\n hidden_pairs = params['hidden_pairs']\r\n cost_functions.lamb = lamb\r\n\r\n f = cost_functions.frobenius \r\n V_masked = create_mask(data,hidden_pairs)\r\n bool_mask = V_masked.notnull().values\r\n tf_mask = tf.Variable(bool_mask)\r\n \r\n V = tf.constant(V_masked.values)\r\n laplacian_matrix = laplacian(weight_matrix).astype(np.float32)\r\n W, H = init_W_H(V.shape, rank=rank)\r\n WH = tf.matmul(W, H)\r\n L = tf.constant(laplacian_matrix)\r\n WTLW = tf.matmul(tf.matmul(tf.transpose(W), L), W)\r\n\r\n cost = f(V, tf_mask, WH, WTLW)\r\n train_step = tf.train.ProximalGradientDescentOptimizer(lr).minimize(cost)\r\n init = tf.global_variables_initializer()\r\n clip = get_clip(W, H)\r\n\r\n sess = tf.Session()\r\n sess.run(init)\r\n\r\n previous_cost = sess.run(cost)\r\n sess.run(train_step)\r\n sess.run(clip)\r\n initial_difference = previous_cost - sess.run(cost)\r\n\r\n matrix_errors = []\r\n graph_errors = []\r\n imputation_error = []\r\n\r\n learnt_W = sess.run(W).astype(np.float32)\r\n learnt_H = sess.run(H).astype(np.float32)\r\n imputation_norm = np.linalg.norm((data - learnt_W.dot(learnt_H))[~bool_mask])\r\n \r\n i = 0\r\n while np.isfinite(sess.run(cost)) and previous_cost-sess.run(cost) > TARGET_DIFFERENCE * initial_difference and i<=max_iterations:\r\n previous_cost = sess.run(cost)\r\n sess.run(train_step)\r\n sess.run(clip)\r\n matrix_errors.append(sess.run(cost_functions.matrix_cost))\r\n graph_errors.append(sess.run(cost_functions.graph_cost))\r\n i+=1\r\n\r\n learnt_W = sess.run(W).astype(np.float32)\r\n learnt_H = sess.run(H).astype(np.float32)\r\n\r\n imputation_norm = np.linalg.norm((data - learnt_W.dot(learnt_H))[~bool_mask])\r\n imputation_error.append(imputation_norm)\r\n\r\n return {'imputation_error':imputation_norm,'W':sess.run(W),'H':sess.run(H),\r\n 'graph_error':graph_errors,'matrix_error':matrix_errors,'imputation_error_list':imputation_error}", "def fAVM(RHOB,Dw,Ds,Df,Dc1,PHIc1,Ck,Dk,PHIk,RSK):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,Dw,Ds,Df,Dc1,PHIc1,Ck,Dk,PHIk,RSK):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n\tVk=0.000 # Initially assumme no kerogen\n\tDh=Df\n#\n#\t5.1.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.5.3.1 Organic and Inorganic Component Density Values:\n# -------------------------------------------------------\n\t\tDBI=(1-PHIc1)*Dc1+(PHIc1*Dw) # Bulk Density of Inorganic Component\n\t\tDBO=(1-PHIk)*Dk+(PHIk*Dh)# Bulk Density of Organic Component\n#\n# 5.1.3.2 Compute Volume of Organic and Inorganic Component:\n# ----------------------------------------------------------\n\t\tVOR=(DBI-RHOB)/(DBI-DBO)\n\t\tVOR=ImposeLimits(VOR,0,1)\n\t\tVIN=(1-VOR)\n#\n# 5.1.3.3 Compute Volumetrics, Total & Effective Porosity and Total & Effective Water Saturation:\n# ---------------------------------------\t-------------------------------------------------------\n\t\tVc1=VIN*(1-PHIc1)\n\t\tVc2=0.000\n\t\tVc3=0.000\n\t\tVk=VOR*(1-PHIk)\n\t\tPHIt=VIN*PHIc1+VOR*PHIk\n\t\tPHIe=VOR*PHIk\n\t\tSwt=1-((VOR*PHIk)/PHIt)\n\t\tSwt=ImposeLimits(Swt,0,1)\n\t\tSwe=0.000\n\t\tSxot=Swt\n\t\tSxoe=Swe\n#\n# 5.1.3.4 Compute Bulk Volume of Water, Hydrocarbon Pore Volume and Pore Space Fluid Properties:\n# ---------------------------------------\t------------------------------------------------------\n\t\tBVW=PHIe*Swe\n\t\tHCPV=PHIe*(1-Swe)\n\t\tVs=RSK*Vk # Estimate volume of adsorbed (sorbed) hydrocarbon\n\t\tVs=ImposeLimits(Vs,0,HCPV)\n\t\tVf=(HCPV-Vs)\n\t\tVf=ImposeLimits(Vf,0,(HCPV-Vs))\n#\n# 5.1.3.5 Recompute hydrocarbon properties in the pore space:\n# -----------------------------------------------------------\n\t\tSum=Vs+Vf\n\t\tif(Sum<=0.000):\n\t\t\tDh=Df\n\t\telse:\n\t\t\tDh=(Ds*Vs+Df*Vf)/(Vs+Vf)\n#\n# 5.1.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Vs,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Vs,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.1.6 Preoutput computations:\n# ------------------------------\n\tQc=MissingValue\n\tDc2=0.00\n\tDc3=0.00\n\tCBW=PHIt-PHIe # The assumption is that all microporosity can be considered to be clay bound water.\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw)\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw)\n#\n# 5.5.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def principal_strain(strain_tensor_data, k, sample_ID, initial_step, ch_list):\n\n\n k = str(k)\n it = int(initial_step)\n dir = [\"xx\",\"yy\",\"zz\",\"xy\",\"yz\",\"zx\"]\n ch = ch_list.loc[\"ch\",:]\n\n\n\n \"\"\" ~~~~~~~~~~input from data file~~~~~~~~~~~~~~~~~ \"\"\"\n\n sdata = strain_tensor_data\n time_p = sdata.loc[:,\"Elapsed Time\"] \n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n time_n = time_p.values\n t = len(sdata.index)\n\n\n \"\"\" ~~~~~~~~~~Create strain tensor ~~~~~~~~~~~~~~~~~ \"\"\"\n\n stensor = np.empty((t,3,3))\n for i in range(0,t):\n strain = sdata.loc[i+1, dir]\n\n s1 = strain.at[\"xx\"]\n s2 = strain.at[\"xy\"]\n s3 = strain.at[\"zx\"]\n s4 = strain.at[\"yy\"]\n s5 = strain.at[\"yz\"]\n s6 = strain.at[\"zz\"]\n\n stensor[i,:,:] = np.array([[s1,s2,s3],\n [s2,s4,s5],\n [s3,s5,s6]])\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n w,v = LA.eigh(stensor) #calculate eigen vectors & eigenvalues\n\n\n \"\"\" ~~~~~~~~~~ Output data ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n time = time_n[it:]\n\n w = w[it:,:]\n v = v[it:,:,:]\n\n\n v1 = v[:,:,2]\n v2 = v[:,:,1]\n v3 = v[:,:,0]\n\n\n w_ave = np.mean(w, axis=0)\n v_ave = np.mean(v, axis=0)\n\n v1_ave = v_ave[:,2]\n v2_ave = v_ave[:,1]\n v3_ave = v_ave[:,0]\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n def plunge_trend(n):\n \n norm = np.linalg.norm(n)\n n = n/norm\n \n x = n[0]\n y = n[1]\n z = n[2]\n \n plunge = np.arcsin(z) \n \n if x == 0 and y > 0:\n trend = pi*0.5\n elif x == 0 and y < 0:\n trend = pi*1.5\n elif x > 0 and y == 0:\n trend = 0\n elif x < 0 and y == 0:\n trend = pi\n elif x == 0 and y == 0:\n trend = 0\n else:\n trend = np.arctan(abs(y/x))\n \n if x > 0 and y>0:\n trend = trend \n elif x > 0 and y< 0:\n trend = 2*pi - trend\n elif x <0 and y <0:\n trend = 1.5*pi - trend\n elif x <0 and y >0:\n trend = trend + 0.5*pi\n \n plunge = np.rad2deg(plunge)\n trend = np.rad2deg(trend)\n return plunge, trend\n\n\n def plot_schmidt(ax, plunge, trend, style, label = \"\", markersize = 30, alpha = 1):\n if plunge >= 0:\n ax.line(plunge, trend, style,label = label, markersize = markersize, alpha = alpha)\n elif plunge < 0:\n ax.line(-plunge, trend, style,label = label, markerfacecolor = \"#ffffff\", markersize = markersize, alpha = alpha)\n\n\n fig = plt.figure(figsize=(30,30))\n ax = fig.add_subplot(3,1,1,projection=\"stereonet\")\n ax.set_azimuth_ticklabels([\"N\",\"\",\"E\",\"\",\"S\",\"\",\"W\"])\n ax.grid(which=\"both\")\n \"\"\" ~~~~~~~~~~ Lower-himisphere Schmidt net plot of principal strain directions ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n for i in range(1, len(time)):\n plunge111, trend111 = plunge_trend(v1[i,:])\n plot_schmidt(ax,plunge111,trend111, \"ro\", markersize=5)\n\n plunge112, trend112 = plunge_trend(v2[i,:])\n plot_schmidt(ax,plunge112,trend112, \"go\", markersize=5)\n\n plunge113, trend113 = plunge_trend(v3[i,:])\n plot_schmidt(ax,plunge113,trend113, \"bo\", markersize=5)\n\n\n plunge1, trend1 = plunge_trend(v1[0,:])\n plot_schmidt(ax,plunge1,trend1, \"r^\",markersize =20)\n\n plunge2, trend2 = plunge_trend(v2[0,:])\n plot_schmidt(ax,plunge2,trend2, \"g^\",markersize =20)\n\n plunge3, trend3 = plunge_trend(v3[0,:])\n plot_schmidt(ax,plunge3,trend3, \"b^\",markersize =20)\n\n\n plunge1, trend1 = plunge_trend(v1[-1,:])\n plot_schmidt(ax,plunge1,trend1, \"ro\",markersize =20)\n\n plunge2, trend2 = plunge_trend(v2[-1,:])\n plot_schmidt(ax,plunge2,trend2, \"go\",markersize =20)\n\n plunge3, trend3 = plunge_trend(v3[-1,:])\n plot_schmidt(ax,plunge3,trend3, \"bo\",markersize =20)\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\" ~~~~~~~~~~ Lower-himisphere Schmidt net plot of averaged principal strain directions ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n plunge1, trend1 = plunge_trend(v1_ave)\n plot_schmidt(ax,plunge1,trend1, \"r*\",markersize =20, label = \"$\\sigma_1$\")\n\n plunge2, trend2 = plunge_trend(v2_ave)\n plot_schmidt(ax,plunge2,trend2, \"g*\",markersize =20,label = \"$\\sigma_2$\")\n\n plunge3, trend3 = plunge_trend(v3_ave)\n plot_schmidt(ax,plunge3,trend3, \"b*\", markersize =20,label = \"$\\sigma_3$\")\n\n ax.legend(bbox_to_anchor = (1.2, 1), loc=\"upper left\")\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n \n fig.text(0.15,0.7,ch)\n\n\n \"\"\" ~~~~~~~~~~ Plot of max & min horizontal strain directions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n \n zr = np.empty((360,1))\n for i in range(0,360):\n th_deg = i\n th = th_deg*pi*180**(-1) \n\n vector = np.array([[np.cos(th)],[np.sin(th)],[0]])\n sstensor = stensor[-1,:,:]\n z = sstensor.dot(vector)\n zz = vector.T.dot(z)\n zr[i] = zz\n\n th_max = zr.argmax()\n th_min = zr.argmin()\n\n #th_max = th_max*pi*180**(-1) \n #th_min = th_min*pi*180**(-1) \n\n #n_max_1 = np.array([[np.cos(th_max)],[np.sin(th_max)],[0]])\n #n_max_2 = np.array([[np.cos(th_max+pi)],[np.sin(th_max+pi)],[0]])\n\n #n_min_1 = np.array([[np.cos(th_min)],[np.sin(th_min)],[0]])\n #n_min_2 = np.array([[np.cos(th_min+pi)],[np.sin(th_min+pi)],[0]])\n\n plunge11, trend11 = 0, th_max\n plunge12, trend12 = 0, th_max+180\n #plunge11, trend11 = plunge_trend(n_max_1)\n #plunge12, trend12 = plunge_trend(n_max_2)\n plot_schmidt(ax,plunge11,trend11, \"rD\",markersize =30)\n plot_schmidt(ax,plunge12,trend12, \"rD\",markersize =30)\n\n plunge22, trend22 = 0, th_min\n plunge23, trend23 = 0, th_min + 180\n #plunge22, trend22 = plunge_trend(n_min_1)\n #plunge23, trend23 = plunge_trend(n_min_2)\n plot_schmidt(ax,plunge22,trend22, \"bD\",markersize =30)\n plot_schmidt(ax,plunge23,trend23, \"bD\",markersize =30)\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n \"\"\" ~~~~~~~~~~ Plot of time change of principal strain magnitudes ~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n ax1 = fig.add_subplot(3,1,2)\n w1 = w[:,2]-w[0,2]\n w2 = w[:,1]-w[0,1]\n w3 = w[:,0]-w[0,0]\n time = time[:]-time[0]\n\n\n ax1.plot(time,w1,label=\"$\\epsilon_1$\")\n ax1.plot(time,w2,label=\"$\\epsilon_2$\")\n ax1.plot(time,w3,label=\"$\\epsilon_3$\")\n ax1.set(xlabel=\"Elapsed Time[h]\",ylabel=\"Strain[$\\mu$strain]\")\n ax1.legend()\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n \"\"\" ~~~~~~~~~~ Plot of time change of principal strain magnitudes ratios ~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n ax2 = fig.add_subplot(3,1,3)\n w1 = w1[1:]\n w2 = w2[1:]\n w3 = w3[1:]\n time1 = time[1:]\n \n w21 = w2/w1\n w31 = w3/w1\n\n ax2.plot(time1,w21,label=\"$\\epsilon_2$/$\\epsilon_1$\")\n ax2.plot(time1,w31,label=\"$\\epsilon_3$/$\\epsilon_1$\")\n ax2.set(xlabel=\"Elapsed Time[h]\")\n ax2.legend()\n\n \"\"\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n fig.suptitle(sample_ID+\"_\"+k,fontsize=\"large\", fontweight=\"bold\")\n fig.savefig(\"result_\"+sample_ID+\"_\"+k+\".png\")\n plt.close(fig)\n\n return w, v", "def estimate_fundamental_matrix(points_a, points_b):\n mean_a = np.mean(points_a, axis=0)\n mean_b = np.mean(points_b, axis=0)\n std_a = np.std(points_a, axis=0)\n std_b = np.std(points_b, axis=0)\n T_a = np.asarray([[1.0/std_a[0], 0, -mean_a[0]/std_a[0]],\n [0, 1.0/std_a[1], -mean_a[1]/std_a[1]],\n [0, 0, 1]])\n T_b = np.asarray([[1.0/std_b[0], 0, -mean_b[0]/std_b[0]],\n [0, 1.0/std_b[1], -mean_b[1]/std_b[1]],\n [0, 0, 1]])\n points_a = np.hstack((points_a, np.ones((len(points_a), 1)))).T\n points_b = np.hstack((points_b, np.ones((len(points_b), 1)))).T\n points_a = np.dot(T_a, points_a)[:2].T\n points_b = np.dot(T_b, points_b)[:2].T\n\n A = []\n for pa, pb in zip(points_a, points_b):\n ua, va = pa\n ub, vb = pb\n A.append([ua*ub, va*ub, ub, ua*vb, va*vb, vb, ua, va, 1])\n A = np.vstack(A)\n _, _, Vt = np.linalg.svd(A)\n F = Vt[-1, :].reshape((3, 3))\n\n # enforce the singularity constraint\n U, D, Vt = np.linalg.svd(F)\n D[-1] = 0\n F = np.dot(np.dot(U, np.diag(D)), Vt)\n\n F = np.dot(np.dot(T_b.T, F), T_a)\n\n return F", "def explanation(self, instance):\r\n \"\"\"\r\n Args:\r\n instance: [numpy.array or sparse matrix] instance on which \r\n to explain the model prediction\r\n \r\n Returns:\r\n A tuple (explanation_set[0:self.max_explained], number_active_elements, \r\n number_explanations, minimum_size_explanation, time_elapsed, where:\r\n \r\n explanation_set: explanation(s) ranked from high to low change in predicted score or probability.\r\n The number of explanations shown depends on the argument max_explained.\r\n \r\n number_active_elements: number of active elements of the instance of interest.\r\n \r\n number_explanations: number of explanations found by algorithm.\r\n \r\n minimum_size_explanation: number of features in the smallest explanation.\r\n \r\n time_elapsed: number of seconds passed to generate explanation(s).\r\n \r\n explanations_score_change: change in predicted score/probability when removing\r\n the features in the explanation, ranked from high to low change.\r\n \"\"\"\r\n \r\n# *** INITIALIZATION ***\r\n \r\n time_max=0\r\n tic=time.time()\r\n instance=lil_matrix(instance)\r\n iteration=0\r\n nb_explanations=0\r\n minimum_size_explanation=np.nan\r\n explanations=[]\r\n explanations_sets=[]\r\n explanations_score_change=[]\r\n \r\n class_index = np.argmax(self.classifier_fn_multiclass(instance))\r\n score_predicted = self.classifier_fn_multiclass(instance)[class_index] \r\n #a tuple of predicted scores of one vs rest\r\n #get predicted score for the class that is predicted\r\n \r\n indices_active_elements=np.nonzero(instance)[1]\r\n number_active_elements=len(indices_active_elements)\r\n indices_active_elements=indices_active_elements.reshape((number_active_elements,1))\r\n threshold=-1\r\n stop=0\r\n expanded_combis=[]\r\n \r\n #use orderedset() \r\n combinations_to_expand=[]\r\n for features in indices_active_elements:\r\n combinations_to_expand.append(OrderedSet(features))\r\n #in the first iteration, the new combinations to explore\r\n #whether it are explanations are the combinations_to_expand\r\n new_combinations=combinations_to_expand.copy() \r\n \r\n #indices of active features are the feature set to explore\r\n feature_set=[]\r\n for features in indices_active_elements:\r\n feature_set.append(frozenset(features))\r\n \r\n time_max += (time.time()-tic)\r\n \r\n print('Initialization complete.')\r\n print('\\n Elapsed time %d \\n' %(time.time()-tic))\r\n\r\n while (iteration < self.max_iter) and (nb_explanations < self.max_explained) and (len(combinations_to_expand)!=0) and (len(new_combinations)!=0) and (time_max<(self.time_maximum)): \r\n \r\n time_extra=time.time()\r\n \r\n iteration+=1\r\n print('\\n Iteration %d \\n' %iteration)\r\n \r\n new_combinations_to_expand=[]\r\n scores_new_combinations_to_expand=[]\r\n for combination in new_combinations: #verify each set in new_combinations if it is an explanation or not\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination: \r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] != np.max(self.classifier_fn_multiclass(perturbed_instance))): #if class_index has no longer the top predicted score, an explanation is found.\r\n explanations.append(combination)\r\n explanations_sets.append(set(combination))\r\n explanations_score_change.append(score_predicted - score_new)\r\n nb_explanations+=1\r\n else:\r\n new_combinations_to_expand.append(combination)\r\n scores_new_combinations_to_expand.append(score_new)\r\n \r\n if (len(new_combinations[0]) == number_active_elements): \r\n stop=1\r\n else:\r\n stop=0 \r\n \r\n if (self.BB==True): #branch-and-bound\r\n if (len(explanations)!=0):\r\n lengths=[]\r\n for explanation in explanations:\r\n lengths.append(len(explanation))\r\n lengths=np.array(lengths)\r\n max_length=lengths.min() \r\n else: \r\n max_length=number_active_elements \r\n else: \r\n max_length=number_active_elements\r\n \r\n if (len(scores_new_combinations_to_expand) != 0):\r\n index_combi_max = np.argmax(score_predicted - scores_new_combinations_to_expand) #best-first combination or feature is chosen.\r\n new_score = scores_new_combinations_to_expand[index_combi_max]\r\n difference = score_predicted - new_score\r\n if difference[0] >= threshold:\r\n expand = 1\r\n else:\r\n expand = 0\r\n else:\r\n expand = 0\r\n\r\n if ((len(new_combinations[0]) < max_length) and (expand == 1) and (stop==0) and (nb_explanations < self.max_explained) and (len(new_combinations[0]) < self.max_features)): \r\n \r\n print('length of new_combinations is %d features.' %len(new_combinations[0]))\r\n print('new combinations can be expanded')\r\n \r\n comb=new_combinations_to_expand[index_combi_max]\r\n func=fn_1(comb, expanded_combis, feature_set, combinations_to_expand, explanations_sets)\r\n new_combinations=func[0]\r\n combinations_to_expand=func[1]\r\n expanded_combis=func[2]\r\n \r\n #Calculate new threshold\r\n scores_combinations_to_expand=[]\r\n for combination in combinations_to_expand:\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination:\r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] == np.max(self.classifier_fn_multiclass(perturbed_instance))):\r\n scores_combinations_to_expand.append(score_new)\r\n \r\n index_combi_max = np.argmax(score_predicted - scores_combinations_to_expand)\r\n new_score = scores_combinations_to_expand[index_combi_max]\r\n threshold = score_predicted - new_score\r\n \r\n time_extra2=time.time()\r\n time_max+=(time_extra2-time_extra)\r\n print('\\n Elapsed time %d \\n' %time_max)\r\n size_COMBIS=len(combinations_to_expand)\r\n print('\\n size combis to expand %d \\n' %size_COMBIS)\r\n \r\n else:\r\n \r\n print('length of new_combinations is %d features.' %len(new_combinations[0]))\r\n print('new combination cannot be expanded')\r\n \r\n combinations=[]\r\n for combination in combinations_to_expand:\r\n if ((len(combination) < number_active_elements) and (len(combination) < (max_length)) and (len(combination) < self.max_features)):\r\n combinations.append(combination)\r\n \r\n if (len(combinations) == 0) or (nb_explanations >= self.max_explained) or (len(combinations_to_expand) == len(new_combinations)):\r\n new_combinations=[]\r\n \r\n elif (len(combinations) != 0):\r\n \r\n new_combinations=[]\r\n it=0\r\n indices=[]\r\n new_score=0\r\n combinations_to_expand_copy = combinations.copy()\r\n \r\n scores_combinations_to_expand2=[]\r\n for combination in combinations_to_expand_copy:\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination:\r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] != np.max(self.classifier_fn_multiclass(perturbed_instance))):\r\n scores_combinations_to_expand2.append(2 * score_predicted)\r\n else:\r\n scores_combinations_to_expand2.append(score_new)\r\n \r\n while ((len(new_combinations) == 0) and (it<len(scores_combinations_to_expand2)) and ((time_max+(time.time() - time_extra))<self.time_maximum)):\r\n \r\n print('while loop %d' %it)\r\n \r\n if (it!=0):\r\n for index in indices:\r\n scores_combinations_to_expand2[index]= 2 * score_predicted\r\n #to make sure this index is never chosen again\r\n \r\n index_combi_max=np.argmax(score_predicted - scores_combinations_to_expand2) #best-first combi\r\n indices.append(index_combi_max)\r\n \r\n comb=combinations_to_expand_copy[index_combi_max]\r\n func=fn_1(comb, expanded_combis, feature_set, combinations_to_expand_copy, explanations_sets)\r\n new_combinations=func[0]\r\n combinations_to_expand=func[1]\r\n expanded_combis=func[2]\r\n \r\n #Calculate new threshold\r\n scores_combinations_to_expand=[]\r\n for combination in combinations_to_expand:\r\n perturbed_instance=instance.copy()\r\n for feature_in_combination in combination:\r\n perturbed_instance[:,feature_in_combination]=0\r\n score_new = self.classifier_fn_multiclass(perturbed_instance)[class_index]\r\n \r\n if (score_new[0] == np.max(self.classifier_fn_multiclass(perturbed_instance))):\r\n scores_combinations_to_expand.append(score_new) \r\n \r\n if (len(scores_combinations_to_expand)!=0): \r\n index_combi_max=np.argmax(score_predicted - scores_combinations_to_expand) #best-first combi\r\n new_score=scores_combinations_to_expand[index_combi_max]\r\n threshold=score_predicted - new_score\r\n it+=1 \r\n print('length of new_combinations is %d features.' %len(new_combinations))\r\n print('score_predicted minus new_score is %f.' %(score_predicted - new_score))\r\n \r\n time_max += (time.time()-time_extra)\r\n print('\\n Elapsed time %d \\n' %time_max)\r\n print('\\n size combis to expand %d \\n' %len(combinations_to_expand))\r\n\r\n print(\"iterations are done\") \r\n explanation_set=[]\r\n explanation_feature_names=[]\r\n for i in range(len(explanations)):\r\n explanation_feature_names=[]\r\n for features in explanations[i]:\r\n explanation_feature_names.append(self.feature_names[features])\r\n explanation_set.append(explanation_feature_names)\r\n \r\n if (len(explanations)!=0):\r\n lengths_explanation=[]\r\n for explanation in explanations:\r\n l=len(explanation)\r\n lengths_explanation.append(l)\r\n minimum_size_explanation=np.min(lengths_explanation)\r\n \r\n number_explanations=len(explanations)\r\n #show explanation in explanation set which is minimum in size and highest score change (delta)\r\n if (np.size(explanations_score_change)>1):\r\n inds=np.argsort(explanations_score_change, axis=0)\r\n inds = np.fliplr([inds])[0]\r\n inds_2=[]\r\n for i in range(np.size(inds)):\r\n inds_2.append(inds[i][0])\r\n explanation_set_adjusted=[]\r\n for i in range(np.size(inds)):\r\n j=inds_2[i]\r\n explanation_set_adjusted.append(explanation_set[j])\r\n explanations_score_change_adjusted=[]\r\n for i in range(np.size(inds)):\r\n j=inds_2[i]\r\n explanations_score_change_adjusted.append(explanations_score_change[j])\r\n explanation_set=explanation_set_adjusted\r\n explanations_score_change=explanations_score_change_adjusted\r\n \r\n toc=time.time()\r\n time_elapsed=toc-tic\r\n print('\\n Elapsed time %d \\n' %time_elapsed)\r\n\r\n return (explanation_set[0:self.max_explained], number_active_elements, number_explanations, minimum_size_explanation, time_elapsed, explanations_score_change[0:self.max_explained], iteration)", "def SolveSCP(self):\n\n t0 = time()\n\n # Some predicates\n Lu_min = 0.\n niters_max = self._maxiters\n maxfracchange = self._maxfracchange\n\n # initialization, resetting ...\n self.reset_all() # including _u_naught(), first application\n scp_min = self.greedy()\n\n # re-initialization iteration; col fixing ignored for the moment\n niters = 0\n f_change = _largenumber\n while (f_change>maxfracchange) and (niters<niters_max):\n # re-initialize u\n if (np.mod(niters, 2)==0): \n self.reset_u(random=True)\n else:\n self.reset_u()\n u_tmp, Lu_tmp = self.subgradient() # find a near-optimal solution \n u, Lu = self.subgradient() # rerun subgradient to get a set of Lagrangian multipliers\n\n scp_all = np.zeros(self._subg_nsteps)\n for i in np.arange(self._subg_nsteps):\n #self.reset_s()\n self.s = np.copy(self.f)\n scp_all[i] = self.greedy(u=u[:,i])\n\n # check if the solution is gettting better\n imin_tmp = (np.where(scp_all==np.amin(scp_all)))[0]\n imin = imin_tmp[np.argmax(Lu[imin_tmp])]\n imax = np.argmax(Lu)\n if (np.mod(niters, 5)==0):\n print(\"This Best solution: UB={0}, LB={1}, UB1={2}, LB1={3}\".format(scp_all[imin], Lu[imin], scp_all[imax], Lu[imax]))\n if (niters==0) or ((scp_all[imin]<=scp_min) and ((Lu[imin]-Lu_min)>-(np.fabs(Lu_min)*self._LB_maxfracchange))):\n scp_min = scp_all[imin]\n u_min = np.copy(u[:,imin])\n Lu_min = Lu[imin]\n self.stepsize = _stepsize\n\n LB = Lu_min\n\n # final step, needs to get u_min back\n self.u = np.copy(u_min)\n self.s = np.copy(self.f)\n UB = self.greedy()\n\n # Which is better? absolute change or fractional change? \n # Both are fine, but cost should be normalized over the mean/median.\n GAP = (UB-LB)/np.fabs(UB)\n f_change = GAP\n if (np.mod(niters, 5)==0):\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n niters = niters + 1\n if (niters == niters_max): \n #warnings.warn(\"Iteration reaches maximum = {0}\".format(niters))\n print(\"Iteration in re-initialization reaches maximum number = {0}\".format(niters))\n\n # Need to remove redundant columns\n # self.remove_redundant() # this itself is NP-hard ...\n\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n print(\"Final Best solution: {0}\".format(UB))\n time_used = (time()-t0)/60.\n print(\"Took {0:.3f} minutes to reach current solution.\".format(time_used))\n\n return (UB,time_used)", "def evaluate_all_points():\n start_time = timeit.default_timer()\n mua, vra = pgen.get_pdf()\n slack = ptsl.D\n\n all_alloc = list(itertools.product(range(1,ptsl.M+1),repeat=ptsl.NPH))\n riska = []\n f2 = open(\"risk-file-D216-NPH5.csv\",\"w\")\n f2.write(\"alloc1,alloc2,alloc3,alloc4,alloc5,risk,util\\n\")\n count = 0\n for a in all_alloc :\n a1, a2, a3, a4, a5 = a\n r = compute_risk(mua, vra, a, slack)\n \n if r > 0.00001 and r < 1 - 0.00001 :\n riska.append(r)\n util = a1 * mua[a1-1] + a2 * mua[a2-1] + a3 * mua[a3-1] + a4 * mua[a4-1] + a5 * mua[a5-1]\n f2.write(\"%d,%d,%d,%d,%d,%f,%f\\n\"%(a1,a2,a3,a4,a5,r,util))\n count = count + 1\n f2.close()\n np.save(\"stored_risk\",riska)\n elapsed = timeit.default_timer() - start_time\n print(\"Brute Force Evaluation Time for %d points : %fs\"%(count,elapsed))", "def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3", "def test_rastrigin(self):\n rastrigin = get_problem('rastrigin', dimension=self.dimension)\n self.assertEqual(rastrigin(self.array), 0.0)", "def test_equivalence():\n\t\n\tfrom . import spectra as sp\n\t\n\t#analytic\n\tp_dict = {'Bfield':15000,'rb85frac':1,'Btheta':0*np.pi/180,'Bphi':0*np.pi/180,'lcell':1e-3,'T':84,'Dline':'D2','Elem':'Rb'}\n\tchiL1,chiR1,chiZ1 = sp.calc_chi([-18400],p_dict)\n\tRotMat1, n11, n21 = solve_diel(chiL1,chiR1,chiZ1,0,150,force_numeric=False)\n\t\n\t#numeric\n\tchiL2, chiR2, chiZ2 = chiL1, chiR1, chiZ1\n\t#chiL2,chiR2,chiZ2 = sp.calc_chi([-18400],p_dict)\n\tRotMat2, n12, n22 = solve_diel(chiL2,chiR2,chiZ2,0,150,force_numeric=True)\n\t\n\tprint('RM 1')\n\tprint(RotMat1)\n\n\tprint('RM 2')\n\tprint(RotMat2)\t\n\t\n\tprint('n1_1 (analytic)')\n\tprint(n11)\n\tprint('n1_2')\n\tprint(n12)\n\tprint('n2_1 (analytic)')\n\tprint(n21)\n\tprint('n2_2')\n\tprint(n22)\n\t\n\tprint('chi1')\n\tprint((chiL1, chiR1, chiZ1))\n\n\tprint('chi2')\n\tprint((chiL2, chiR2, chiZ2))", "def evaltest(x_solution,ntest,pred):\n \n large = 10.0**30\n e0 = 0.0\n y=0.0\n for i in range(ntest): # Computation of correct piece\n e0 += cfg.a_unscaled[cfg.ntrain+i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[cfg.ntrain+i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[cfg.ntrain+i][j1]\n y += pred[i]\n \n y = y/ntest \n e0 = e0/ntest\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(ntest):\n rmse += (pred[i]-cfg.a_unscaled[cfg.ntrain+i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[cfg.ntrain+i][-1]) \n e1 += (cfg.a_unscaled[cfg.ntrain+i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/ntest)\n mae = mae/ntest\n\n if ntest > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(ntest):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[cfg.ntrain+i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[cfg.ntrain+i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return pred,rmse,mae,ce,r", "def eight_point(points_lst):\r\n\r\n # get H for normalization and produce normalized points\r\n points_lst = np.array(points_lst)\r\n h_l = get_h(points_lst[:, 0])\r\n h_r = get_h(points_lst[:, 1])\r\n p_l_norm = [h_l @ np.array([p[0], p[1], 1]) for p in points_lst[:, 0]]\r\n p_r_norm = [h_r @ np.array([p[0], p[1], 1]) for p in points_lst[:, 1]]\r\n\r\n # create A using normalized points\r\n a = []\r\n for p_l, p_r in zip(p_l_norm, p_r_norm):\r\n x_l, y_l = p_l[0], p_l[1]\r\n x_r, y_r = p_r[0], p_r[1]\r\n a.append([x_r * x_l, x_r * y_l, x_r, y_r * x_l, y_r * y_l, y_r, x_l, y_l, 1])\r\n a = np.array(a)\r\n\r\n u, s, vh = np.linalg.svd(a)\r\n f_mat = np.reshape(vh[-1, :], (3, 3))\r\n\r\n # enforce singularity constraint\r\n u, s, vh = np.linalg.svd(f_mat)\r\n s[-1] = 0\r\n f_unscaled = (u * s) @ vh\r\n\r\n # rescale F\r\n return np.linalg.inv(h_r) @ f_unscaled @ np.linalg.inv(h_l)", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def ransac_plane_estimation (numpy_cloud, threshold, fixed_point=None, w = .9, z = 0.95 ):\r\n\r\n # variables\r\n current_consensus = 0 # keeps track of how many points match the current plane\r\n best_consensus = 0 # shows how many points matched the best plane yet\r\n consensus_points = np.array([]) # np.ndarray of points matching the cloud\r\n best_normal_vector = np.array ([]) # current best normal vector\r\n\r\n # determine probabilities and number of draws\r\n b = np.float_power(w, 3 ) # probability that all three observations belong to the model\r\n k = ceil(np.log(1-z ) / np.log(1-b )) # estimated number of draws\r\n\r\n # copy cloud\r\n numpy_cloud = numpy_cloud[:, 0:3].copy ()\r\n\r\n # estimate k * 3 random planes, defined through one normal vector and one plane parameter d, respectively\r\n normal_vectors, plane_parameters_d = random_plane_estimation (numpy_cloud, k * 3, fixed_point )\r\n\r\n # iterate through all planes found to see which one performs best\r\n for (normal_vector, d) in zip (normal_vectors, plane_parameters_d ):\r\n\r\n # count all points that consent with the plane\r\n current_consensus, current_consensus_points = plane_consensus (numpy_cloud, normal_vector, d, threshold )\r\n\r\n # is the current consensus match higher than the previous ones?\r\n if (current_consensus > best_consensus ):\r\n\r\n # keep best consensus set\r\n consensus_points = current_consensus_points\r\n best_normal_vector = normal_vector\r\n best_consensus = current_consensus\r\n\r\n return best_normal_vector, consensus_points", "def lowest_rank_approx(A,e):\n \n \n U,s,Vh=la.svd(A,full_matrices=False)\n t=s.copy()\n t[t>e]=0\n i=t.nonzero()[0][0]\n \n return U[:,:i].dot(np.diag(s[:i])).dot(Vh[:i,:])", "def solve(m):\n\t\n #with the assumption that at least one terminal state is given:\n if(len(m)==2 or len(m)==1): return [1,1]\n \n #Normalizing the in. matrix and identifying the trans./abs. states:\n m = normalizeProbabilityMatrix(m)\n t = getTransientStates(m)\n a = getAbsorbingStates(m)\n\t\n if len(a) >0:\n print( str(len(a)) + \" absorbing state\" + (\"\" if len(a)<=1 else \"s\" ))\n else:\n print(\"No absorbing state detected\")\n return\n \n #Getting the matrices Q and R as in the canonical form:\n Q = getQ(m,t)\n R = getR(m,t,a)\n I = getIdentity(len(Q))\n I_Q = subtractMatrices(I, Q)\n \n #Getting the fundamental matrix\n N = invertMatrix(I_Q)\n F = multiplyMatrices(N,R)\n \n #packing the result with a common denominator:\n gcd = getGCD(F[0]).denominator\n res=[]\n sum = 0\n for r in F[0]:\n val = int(r.numerator*(gcd/r.denominator))\n sum+=val\n res.append(val)\n res.append(sum) \n return res", "def SK(all_black,all_white,all_other):\n real_zone_1=[]\n real_zone_2=[]\n real_zone_3=[]\n global p\n #FIRST defining the zone value since the more center you are, the\n #more value you will have.\n \n #Zone 1: the gratest value zone\n zone_1=[]\n zone_1_val=0.3\n for i in all_other:\n if 125<=int(i[0])<=1100 and 125<=int(i[1])<=825:\n zone_1.append(i)\n\n #zone 2: second greatest value zone\n zone_2=[]\n zone_2_val=0.2\n for i in all_other:\n if 0<=int(i[0])<=125 and 125<=int(i[1])<=825:\n zone_2.append(i)\n if 1100<=int(i[0])<=1225 and 125<=int(i[1])<=825:\n zone_2.append(i)\n if 125<=int(i[0])<=1100 and 0<=int(i[1])<=125:\n zone_2.append(i)\n if 125<=int(i[0])<=1100 and 825<=int(i[1])<=950:\n zone_2.append(i)\n\n #zone 3: smallest value zone\n zone_3=[]\n zone_3_val=0.1\n for i in all_other:\n if 0<=int(i[0])<=125 and 0<=int(i[1])<=125:\n zone_3.append(i)\n if 0<=int(i[0])<=125 and 825<=int(i[1])<=950:\n zone_3.append(i)\n if 1100<=int(i[0])<=1225 and 0<=int(i[1])<=125:\n zone_3.append(i)\n if 1100<=int(i[0])<=1225 and 825<=int(i[1])<=950:\n zone_3.append(i)\n\n if all_black==[] and all_white==[]:\n p=0 #First hand Black\n #all_black.append([25*25,19*25])\n return[25*25,19*25]\n\n\n \n\n #Calculation of the values\n val=0\n value_list=[] #[[coordinate],val]\n if p == 0: #First hand Black\n for i in all_black:\n x=i[0]\n y=i[1]\n #right down↘️\n if [x+25 ,y+25] in all_other:\n val=1\n value_list.append([[x+25,y+25],val])\n #print('右下 if',value_list)\n #print('Right D if',val)\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y+25*a] in all_black:\n val+=1\n elif [x+25*a,y+25*a] in all_other:\n value_list.append([[x+25*a,y+25*a],val])\n #print('Right D',val)\n #print('右下',value_list)\n elif [x+25*a,y+25*a] in all_white:\n break\n \n #left up↖️\n if [x-25,y-25] in all_other:\n val=1\n value_list.append([[x-25,y-25],val])\n #print('Left U if')\n else:\n val=1\n for a in range(1,4):\n if [x-25*a,y-25*a] in all_black:\n val+=1\n elif [x-25*a,y-25*a] in all_other:\n value_list.append([[x-25*a,y-25*a],val])\n #print('Left U')\n elif [x-25*a,y-25*a] in all_white:\n break\n \n #right up↗️ \n if [x+25,y-25] in all_other:\n val=1\n value_list.append([[x+25,y-25],val])\n #print('RU if')\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y-25*a] in all_black:\n val+=1\n elif [x+25*a,y-25*a] in all_other:\n value_list.append([[x+25*a,y-25*a],val])\n #print('右上')\n elif [x+25*a,y-25*a] in all_white:\n break\n\n #left down↙️\n if [x-25,y+25] in all_other:\n val=1\n value_list.append([[x-25,y+25],val])\n #print('左下 if') \n else:\n val=1\n for a in range(1,4):\n if [x-25*a,y+25*a] in all_black:\n val+=1\n elif [x-25*a,y+25*a] in all_other:\n value_list.append([[x-25*a,y+25*a],val])\n #print('左下')\n elif [x-25*a,y+25*a] in all_white:\n break\n\n #right➡️\n if [x+25,y] in all_other:\n val=1\n value_list.append([[x+25,y],val])\n #print('右',value_list)\n #print('右 if')\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y] in all_black:\n val+=1\n elif [x+25*a,y] in all_other:\n value_list.append([[x+25*a,y],val])\n #print('右')\n elif [x+25*a,y] in all_white:\n break\n\n #left⬅️ \n if [i[0]-25,i[1]] in all_other:\n val=1\n value_list.append([[i[0]-25,i[1]],val])\n #print('左', value_list)\n #print('左 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0]-25*a,i[1]] in all_black:\n val+=1\n elif [i[0]-25*a,i[1]] in all_other:\n value_list.append([[i[0]-25*a,i[1]],val])\n #print('左')\n elif [i[0]-25*a,i[1]] in all_white:\n break\n\n #down⬇️ \n if [i[0],i[1]+25] in all_other:\n val=1\n value_list.append([[i[0],i[1]+25],val])\n #print('下', value_list)\n #print('下 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0],i[1]+25*a] in all_black:\n val+=1\n elif [i[0],i[1]+25*a] in all_other:\n value_list.append([[i[0],i[1]+25*a],val])\n #print('下')\n elif [i[0],i[1]+25*a] in all_white:\n break\n \n #up⬆️\n if [i[0],i[1]-25] in all_other:\n val=1\n value_list.append([[i[0],i[1]-25],val])\n #print('上',value_list)\n #print('上 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0],i[1]-25*a] in all_black:\n val+=1\n elif [i[0],i[1]-25*a] in all_other:\n value_list.append([[i[0],i[1]-25*a],val])\n #print('上')\n elif [i[0],i[1]-25*a] in all_white:\n break\n\n\n\n all_val=[]\n #print(value_list,'这是value_list')\n\n \n sum_value=[]\n coord=[]\n for a in value_list:\n if a[0] not in coord:\n coord.append(a[0])\n #print(coord)\n for b in coord:\n he=[]\n for c in value_list:\n if b == c[0]:\n he.append(c[1])\n #print(he,'这是和')\n sum_value.append([b,sum(he)])\n\n\n\n #print(sum_value,'同样坐标下val相加')\n for i in sum_value:\n all_val.append(i[1])\n #print(all_val,'所有的相加之后的val')\n numb=-1\n all_max=[]\n for v in all_val:\n numb+=1\n if v == max(all_val):\n max_val_list = value_list[numb][0] #max (x,y)\n if value_list[numb][0] in all_other:\n all_max.append(value_list[numb])\n \n \n #print(max(all_val),'max val')\n for u in all_max:\n if u[0] in zone_1:\n real_zone_1.append(u[0])\n if u[0] in zone_2:\n real_zone_2.append(u[0])\n if u[0] in zone_3:\n real_zone_3.append(u[0])\n if real_zone_1 != []:\n print('real_1')\n return real_zone_1[0]\n elif real_zone_2 != []:\n print('Its zone 2')\n return real_zone_2[0]\n elif real_zone_3 != []:\n print('Its zone 3')\n return real_zone_3[0]\n else:\n return \"mistake\"", "def search_optimal_capacities(network, step_size, tolerance, filename):\r\n ## Initialization\r\n # Initialize the value of total flow over the network\r\n totalflow = max(network.lb_totalflow, step_size)\r\n \r\n # An auxiliary threshold of the total flow computed based on the capacity upper bounds, used in Line 4 of Algorithm 3.\r\n aux_bound = 1 - np.exp(network.beta - network.b + network.phi/network.u)\r\n \r\n \r\n # Initialize the bounds for flow over each route\r\n ub_flow = np.zeros(network.num_routes)\r\n lb_flow = np.zeros(network.num_routes)\r\n \r\n # Initialize the optimal solution over the network\r\n opt_socialwelfare = np.array([])\r\n opt_totalflow = 0\r\n opt_flows = np.array([])\r\n opt_capacity = np.zeros(network.num_routes)\r\n \r\n\r\n# # For debugging only\r\n# lower_bound = np.zeros(network.num_routes)\r\n# upper_bound = np.zeros(network.num_routes)\r\n# count = 0\r\n \r\n # Try to plot out the (totalflow, social_welfare) scatter plot\r\n z = []\r\n hz = []\r\n# # End of debugging\r\n\r\n ## Start the search\r\n while totalflow < 1 - tolerance:\r\n flag_nofeasibleflow = False\r\n \r\n # Compute the bounds for the flow.\r\n for i in range(network.num_routes):\r\n # Line 3-8 of Algorithm 3. Compute the upper bounds for the flow.\r\n if totalflow >= aux_bound[i]: \r\n x3_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 3) \r\n if x3_star > network.u[i]:\r\n flag_nofeasibleflow = True\r\n break \r\n else:\r\n ub_flow[i] = x3_star \r\n else: \r\n ub_flow[i] = 1 \r\n # Line 9-10 of Algorithm 3. Compute the lower bounds of the flow.\r\n x1_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 1)\r\n x2_star = bisection_search(zeta, 0, 1, [tolerance, tolerance], True, network, totalflow, i, 2)\r\n lb_flow[i] = max(x1_star, x2_star)\r\n \r\n \r\n if not flag_nofeasibleflow:\r\n # Check feasibility of the flow based on the current total flow, lower and upper bounds of the flow\r\n if totalflow < np.sum(lb_flow) or totalflow > np.sum(ub_flow): \r\n totalflow += step_size \r\n\r\n# # For debugging only\r\n# print(\"\\nThe current total flow is: \" + str(totalflow))\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count))\r\n# # Eng of debugging\r\n# \r\n continue\r\n \r\n # The implementation of line 11 to 18. Find the optimal flow given the current value of z.\r\n [opt_obj, opt_x] = ip.max_sum_xlogx(network.num_routes, totalflow, lb_flow, ub_flow) \r\n \r\n \r\n # Line 18 of Algorithm 3. Compute the social welfare given the current z and optimal q(z).\r\n temp = opt_obj - totalflow * np.log(1-totalflow)\r\n\r\n ##### Testing: to plot out the function of h(z)\r\n z.append(totalflow)\r\n hz.append(temp)\r\n ##### End of Testing: to plot out the function of h(z)\r\n \r\n if opt_socialwelfare.size == 0 or temp > opt_socialwelfare:\r\n opt_socialwelfare = temp\r\n opt_flows = opt_x\r\n opt_totalflow = totalflow \r\n \r\n # For debugging only\r\n# print(\"\\nUpdate optimal flow\")\r\n# print(opt_x)\r\n# print(lb_flow)\r\n# print(ub_flow)\r\n# print(\"Total flow is \" + str(opt_totalflow)) \r\n \r\n # For debugging\r\n# np.copyto(lower_bound, lb_flow) \r\n# np.copyto(upper_bound, ub_flow) \r\n# count += 1\r\n# print(\"The lower and upper bounds are: \")\r\n# print(lb_flow)\r\n# print(lower_bound)\r\n# print(\"\\n\")\r\n# print(ub_flow)\r\n# print(upper_bound)\r\n# print(\"\\n\")\r\n \r\n totalflow += step_size \r\n\r\n \r\n \r\n# # For debugging only\r\n# print(\"\\n----------------\\n Exiting the while loop.\")\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count)) \r\n# # Eng of debugging\r\n \r\n # Line 20 of ALgorithm 3\r\n if opt_flows.size > 0:\r\n network.update_flow(opt_flows) \r\n for i in range(network.num_routes): \r\n network.compute_capacity(opt_totalflow, i)\r\n opt_capacity[i] = network.capacity[i]\r\n print(\"\\n--------------\\nThe optimal flow is: \")\r\n print(opt_flows)\r\n print(\"\\n--------------\\nThe optimal parking capacity is: \")\r\n print(opt_capacity) \r\n print(\"\\n--------------\\nThe optimal total flow is \" + str(opt_totalflow))\r\n print(\"\\n--------------\\nThe maximum social welfare is \" + str(opt_socialwelfare) +\".\")\r\n \r\n \r\n ##### Testing: to plot out the function of h(z)\r\n #plt.scatter(z, hz, c='r', marker='r')\r\n plt.plot(z, hz, '-', linewidth=0.5)\r\n #plt.xlim(0.5, 1)\r\n plt.savefig(filename + '.png', bbox_inches='tight')\r\n ##### End of Testing: to plot out the function of h(z)\r\n \r\n \r\n \r\n# # For debugging\r\n# temp1 = np.zeros(network.num_routes)\r\n# temp2 = np.zeros(network.num_routes)\r\n# temp3 = np.zeros(network.num_routes)\r\n# for i in range(network.num_routes): \r\n# temp1[i] = zeta(network, i, opt_flows[i], opt_totalflow, 1)\r\n# temp2[i] = zeta(network, i, opt_flows[i], opt_totalflow, 2)\r\n# temp3[i] = zeta(network, i, opt_flows[i], opt_totalflow, 3)\r\n# print(\"The function value of zeta at the optimal flow: \")\r\n# print(temp1)\r\n# print(temp2)\r\n# print(temp3)\r\n# \r\n# # For debugging\r\n# print(\"\\nThe capacity upper bound when optimal flow is found: \")\r\n# print(upper_bound)\r\n# print(\"\\nThe capacity lower bound when optimal flow is found: \")\r\n# print(lower_bound)\r\n# print(str(count))\r\n# # End of debugging\r\n \r\n return opt_flows, opt_capacity, opt_socialwelfare \r\n else:\r\n print(\"\\nNo optimal solution is found!\")\r\n return np.array([]), opt_capacity, opt_socialwelfare", "def diagonal_hessian_guess(self, geom, Z, connectivity, guess_type=\"SIMPLE\"):\n\n logger = logging.getLogger(__name__)\n\n if guess_type == \"SIMPLE\":\n return 0.1\n\n elif guess_type == \"SCHLEGEL\":\n R_BC = v3d.dist(geom[self.B], geom[self.C])\n Rcov = qcel.covalentradii.get(Z[self.B], missing=4.0) + qcel.covalentradii.get(Z[self.C], missing=4.0)\n a = 0.0023\n b = 0.07\n if R_BC > (Rcov + a / b):\n b = 0.0\n return a - (b * (R_BC - Rcov))\n\n elif guess_type == \"FISCHER\":\n R = v3d.dist(geom[self.B], geom[self.C])\n Rcov = qcel.covalentradii.get(Z[self.B], missing=4.0) + qcel.covalentradii.get(Z[self.C], missing=4.0)\n a = 0.0015\n b = 14.0\n c = 2.85\n d = 0.57\n e = 4.00\n\n # Determine connectivity factor L\n Brow = connectivity[self.B]\n Crow = connectivity[self.C]\n Bbonds = 0\n Cbonds = 0\n for i in range(len(Crow)):\n Bbonds = Bbonds + Brow[i]\n Cbonds = Cbonds + Crow[i]\n L = Bbonds + Cbonds - 2\n logger.info(\"Connectivity of central 2 torsional atoms - 2 = L = %d\\n\" % L)\n return a + b * (np.power(L, d)) / (np.power(R * Rcov, e)) * (np.exp(-c * (R - Rcov)))\n\n elif guess_type == \"LINDH_SIMPLE\":\n\n R_AB = v3d.dist(geom[self.A], geom[self.B])\n R_BC = v3d.dist(geom[self.B], geom[self.C])\n R_CD = v3d.dist(geom[self.C], geom[self.D])\n k_tau = 0.005\n\n Lindh_Rho_AB = hguess_lindh_rho(Z[self.A], Z[self.B], R_AB)\n Lindh_Rho_BC = hguess_lindh_rho(Z[self.B], Z[self.C], R_BC)\n Lindh_Rho_CD = hguess_lindh_rho(Z[self.C], Z[self.D], R_CD)\n return k_tau * Lindh_Rho_AB * Lindh_Rho_BC * Lindh_Rho_CD\n\n else:\n logger.warning(\n \"\"\"Hessian guess encountered unknown coordinate type.\\n \n As default, identity matrix is used\"\"\"\n )\n return 1.0", "def solution(n, s, a, b, fares):\n\n table = [[float(\"inf\")]*n for _ in range(n)]\n for (c, d, f) in fares:\n table[c-1][d-1] = f\n table[d-1][c-1] = f\n\n for idx in range(n):\n table[idx][idx] = 0\n\n # do floyd to find all shortest paths\n for kdx in range(n):\n for idx in range(n):\n for jdx in range(n):\n table[idx][jdx] = min(table[idx][jdx], table[idx][kdx] + table[kdx][jdx])\n \n# for row in table:\n# print(row)\n \n answer = table[s-1][a-1] + table[s-1][b-1]\n # print(\"seperate:\", answer)\n for idx in range(n):\n # print(\"idx 경유:\", idx, table[s-1][idx] + table[idx][a-1] + table[idx][b-1])\n answer = min(answer, table[s-1][idx] + table[idx][a-1] + table[idx][b-1])\n\n # print(\"answer:\", answer)\n return answer", "def unit_tests():\r\n import utils\r\n import parse_file\r\n\r\n nb_bidders = 2\r\n max_value = 20\r\n\r\n bid_pdf = {}\r\n bid_pdf[(0, 0)] = 0.5\r\n bid_pdf[(100, 100)] = 0.5\r\n\r\n # When the tolerance is 0, should be infeasible\r\n tol = 0\r\n lower_bound, upper_bound = inverse_bce_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, tolerance=tol)\r\n if not lower_bound and not upper_bound:\r\n print(\"Infeasibility test: passed\")\r\n else:\r\n print(\"Infeasibility test: failed\")\r\n\r\n # When the tolerance is infinite, we should get the whole range\r\n tol = 10**6\r\n lower_bound, upper_bound = inverse_bce_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, tolerance=tol)\r\n if lower_bound == 0 and upper_bound == max_value:\r\n print(\"Tolerance test: passed\")\r\n else:\r\n print(\"Tolerance test: failed\")\r\n\r\n # The sparse and not sparse version should give the same solutions on reasonable examples.\r\n # Using the data with max_bid=5, max_value=10 as an example here.\r\n max_value = 20\r\n max_bid = 10\r\n\r\n wb = openpyxl.load_workbook('Dataset.xlsx')\r\n sheet1 = wb.get_sheet_by_name('Tract79')\r\n sheet2 = wb.get_sheet_by_name('Trbid79')\r\n\r\n bid_pdf = parse_file.bins_from_data(wb, sheet1, sheet2, nb_bidders, max_bid)\r\n\r\n tol = 0.1\r\n lower_bound_sparse, upper_bound_sparse = inverse_bce_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, tolerance=tol)\r\n lower_bound, upper_bound = inverse_bce(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, tolerance=tol)\r\n\r\n if abs(upper_bound_sparse - upper_bound) < 0.00001 and abs(lower_bound_sparse - lower_bound) < 0.00001:\r\n print(\"Equality test: passed\")\r\n else:\r\n print(\"Equality test: failed\")\r\n\r\n # The parametrized sparse code for min tolerance should return 0 when the data is generated using compute_bce\r\n mu = 5\r\n sigma = 5\r\n density = [np.exp(-(v - mu)**2 / (2 * sigma**2))\r\n for v in range(max_value + 1)]\r\n bid_pdf = compute_bce(nb_bidders, max_value, density)\r\n min_tol = inverse_bce_parameterized_sparse_min_tolerance(nb_bidders, \r\n max_value, bid_pdf, utils.first_moment, density)\r\n if min_tol <= 10**(-5):\r\n print(\"Minimum tolerance test: passed\")\r\n else:\r\n print(\"Minimum tolerance test: failed\")\r\n print(\"Minimum tolerance: \" + str(min_tol))\r\n\r\n bid_pdf = {}\r\n bid_pdf[(0, 0)] = 0.5\r\n bid_pdf[(100, 100)] = 0.5\r\n\r\n # When the tolerance is 0, the parameterized codes should be infeasible\r\n tol = 0\r\n lower_bound = inverse_bce_parameterized_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, density, tolerance=tol)\r\n lower_bound_sparse = inverse_bce_parameterized_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, density, tolerance=tol)\r\n if not lower_bound and not lower_bound_sparse:\r\n print(\"Parameterized infeasibility test: passed\")\r\n else:\r\n print(\"Parameterized infeasibility test: failed\")\r\n\r\n # When the tolerance is infinite, we should get the whole range for the parameterized code\r\n tol = 10**6\r\n lower_bound = inverse_bce_parameterized_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, density, tolerance=tol)\r\n lower_bound_sparse = inverse_bce_parameterized_sparse(\r\n nb_bidders, max_value, bid_pdf, utils.first_moment, density, tolerance=tol)\r\n if lower_bound == 0 and lower_bound_sparse == 0:\r\n print(\"Parameterized tolerance test: passed\")\r\n else:\r\n print(\"Parameterized tolerance test: failed\")", "def test_inverse( centering='SYMMETRIC'):\n\n\n npupil = 300 #156\n pctr = int(npupil/2)\n npix = 100 #1024\n u = 20 #100 # of lam/D\n\n npix, u = 2000, 200\n s = (npupil,npupil)\n\n\n\n\n mft1 = matrixDFT.MatrixFourierTransform(centering=centering)\n\n ctr = (float(npupil)/2.0, float(npupil)/2.0 )\n #print ctr\n pupil = makedisk(s=s, c=ctr, r=float(npupil)/2.0001, t=np.float64, grey=0)\n pupil /= np.sqrt(pupil.sum())\n\n pupil[100:200, 30:50] = 0\n pupil[0:50, 140:160] = 0\n\n plt.subplot(141)\n plt.imshow(pupil)\n\n print \"Pupil 1 total:\", pupil.sum() \n\n a = mft1.perform(pupil, u, npix)\n\n asf = a.real.copy()\n cpsf = a * a.conjugate()\n psf = cpsf.real.copy()\n print \"PSF total\", psf.sum()\n \n plt.subplot(142)\n plt.imshow(psf, norm=matplotlib.colors.LogNorm(1e-8, 1.0))\n\n plt.subplot(143)\n\n pupil2 = mft1.inverse(a, u, npupil)\n pupil2r = (pupil2 * pupil2.conjugate()).real\n plt.imshow( pupil2r)\n\n print \"Pupil 2 total:\", pupil2r.sum() \n\n\n\n a2 = mft1.perform(pupil2r, u, npix)\n psf2 = (a2*a2.conjugate()).real.copy()\n print \"PSF total\", psf2.sum()\n plt.subplot(144)\n plt.imshow(psf2, norm=matplotlib.colors.LogNorm(1e-8, 1.0))", "def initial_guess(self):\n\n x0 = [self.material_model.isotropic_matrix.c1, self.material_model.isotropic_matrix.c2]\n\n if not self.material_model.is_isotropic:\n # c5 is scaled in the optimisation function\n x0.append(self.material_model.fibres.c5 / c5_factor)\n\n if self.include_lm:\n x0.append(self.material_model.fibres.lm)\n\n return np.asarray(x0)", "def exercise():\n pi_good = get_pdb_inputs(pdb_str=pdb_str_answer, restraints=False)\n map_data = get_map(xrs=pi_good.xrs)\n xrs_good = pi_good.xrs.deep_copy_scatterers()\n pi_good.ph.write_pdb_file(file_name=\"answer.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())\n #\n pi_poor = get_pdb_inputs(pdb_str=pdb_str_poor, restraints=True)\n pi_poor.ph.write_pdb_file(file_name=\"poor.pdb\")\n xrs_poor = pi_poor.xrs.deep_copy_scatterers()\n #\n d = xrs_good.distances(other=xrs_poor)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)>2\n assert flex.mean(d)>0.7\n #\n xrs_refined = xrs_poor\n for i in range(3):\n ero = individual_sites.easy(\n map_data = map_data,\n xray_structure = xrs_refined,\n pdb_hierarchy = pi_poor.ph,\n geometry_restraints_manager = pi_poor.grm)\n xrs_refined = ero.xray_structure\n # comapre\n d = xrs_good.distances(other=xrs_refined)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)<0.15\n assert flex.mean(d)<0.03\n ero.pdb_hierarchy.write_pdb_file(file_name=\"refined.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())", "def FitFundamentalMatrix(mss):\n\n\tbatch_size = mss.shape[0]\n\tnum_correspondences = mss.shape[1]\t\n\n\tmss_src, T_src = utils.normalise2dpts(mss[:, :, :2])\n\tmss_target, T_target = utils.normalise2dpts(mss[:, :, 2:])\n\n\tones = torch.tensor([1]).double().cuda().view(1, 1, 1).repeat(batch_size, num_correspondences, 1)\n\tmss_src = torch.cat([mss_src, ones], dim = 2)\n\tmss_target = torch.cat([mss_target, ones], dim = 2)\n\n\trow = torch.cat([\n\t\tmss_src * mss_target[:, :, 0].view(batch_size, num_correspondences, 1), \n\t\tmss_src * mss_target[:, :, 1].view(batch_size, num_correspondences, -1),\n\t\tmss_src \n\t], dim = 2)\n\n\n\touter_product = row.unsqueeze(3) * row.unsqueeze(2)\n\tmat = outer_product.sum(dim = 1)\n\n\n\tU = []\n\tS = []\n\tVT = []\n\n\tfor i in range(batch_size):\n\n\t\t_, V = torch.symeig(mat[i])\n\t\tu, s, v = torch.svd(V[:, 0].view(3, 3))\n\t\tvt = v.permute(1, 0)\n\t\ts[2] = 0\n\n\t\tU.append(u.unsqueeze(0))\n\t\tS.append(torch.diag(s).unsqueeze(0))\n\t\tVT.append(vt.unsqueeze(0))\n\n\tU = torch.cat(U, dim = 0)\n\tS = torch.cat(S, dim = 0)\n\tVT = torch.cat(VT, dim = 0)\n\n\tF = torch.bmm(torch.bmm(U, S), VT)\n\tF = torch.bmm(T_src, torch.bmm(F.permute(0, 2, 1), T_target.permute(0, 2, 1)))\n\tF = F / F[:, 2, 2].view(batch_size, 1, 1)\n\n\tutils.nan_check(F)\n\n\treturn F", "def test():\n # test getCl\n ISWoutFile = 'ISWout_scalCls.fits'\n ISWinFile = 'ISWin_scalCls.fits'\n ell,temps = getCl(ISWoutFile)\n\n \"\"\"\n # test showCl\n showCl(ell,temps)\n\n # test makeLegendreTable\n # this works fine for small lmax values, but ell=86 and higher have problems\n # possibly due to exceeding the maximum size of a float64 dtype\n makeLegendreTable(9,'testTable.npy')\n table = symLoad('testTable.npy')\n print table\n\n # test powerArray\n powers = powerArray(2,9)\n print powers\n \"\"\"\n\n # test makeCmatrix\n # measured time: 4.25 hrs for 6110 point mask\n startTime = time.time()\n\n # old files no longer used\n #saveMatrixFile = 'covar6110_R010_lowl.npy'\n #saveMatrixFile = 'covar6110_R010.npy'\n #maskFile = '/shared/Data/PSG/hundred_point/ISWmask2_din1_R160.fits'\n #saveMatrixFile = 'covar9875_R160b.npy'\n\n # huge mask\n #maskFile = 'ISWmask9875_RING.fits' #19917 pixels\n #saveMatrixFile = 'covar19917_ISWout_bws_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWoutFile, highpass=12, beamSmooth=True, pixWin=True, nested=False)\n # took 24.83 hours\n\n # use ISWin to model expected signal\n #maskFile = 'ISWmask6110_RING.fits'\n #saveMatrixFile = 'covar6110_ISWin_bws_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWinFile, highpass=12, nested=True)\n maskFile = 'ISWmask9875_RING.fits' #9875 pixels\n saveMatrixFile = 'covar9875_ISWin_bws_hp12_RING.npy'\n covMat = makeCmatrix(maskFile, ISWinFile, highpass=12, beamSmooth=True, pixWin=True, nested=False)\n\n # no beam nor window smoothing, high lmax\n #saveMatrixFile = 'covar6110_ISWout_nBW_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWoutFile, highpass=12, beamSmooth=False, pixWin=False, lmax=2200, nested=False)\n\n print 'time elapsed: ',int((time.time()-startTime)/60),' minutes'\n symSave(covMat,saveMatrixFile)\n \"\"\"\n\n # test subMatrix\n subMask = '/shared/Data/PSG/small_masks/ISWmask_din1_R010_trunc0500.fits'\n subCmat = subMatrix(subMask,maskFile,saveMatrixFile)\n print 'time elapsed: ',int((time.time()-startTime)/60),' minutes'\n \"\"\"", "def _rdm12_lowfilling_python(self, bradata: Optional['FqeData'] = None\n ) -> Tuple['Nparray', 'Nparray']:\n norb = self.norb()\n nalpha = self.nalpha()\n nbeta = self.nbeta()\n lena = self.lena()\n lenb = self.lenb()\n nlt = norb * (norb + 1) // 2\n\n outpack = numpy.zeros((nlt, nlt), dtype=self.coeff.dtype)\n outunpack = numpy.zeros((norb, norb, norb, norb),\n dtype=self.coeff.dtype)\n if nalpha - 2 >= 0:\n alpha_map, _ = self._core.find_mapping(-2, 0)\n\n def compute_intermediate0(coeff):\n tmp = numpy.zeros((nlt, int(binom(norb, nalpha - 2)), lenb),\n dtype=self.coeff.dtype)\n for i in range(norb):\n for j in range(i + 1, norb):\n for source, target, parity in alpha_map[(i, j)]:\n tmp[i + j * (j + 1) //\n 2, target, :] += coeff[source, :] * parity\n return tmp\n\n inter = compute_intermediate0(self.coeff)\n inter2 = inter if bradata is None else compute_intermediate0(\n bradata.coeff)\n outpack += numpy.tensordot(inter2.conj(),\n inter,\n axes=((1, 2), (1, 2)))\n\n if self.nalpha() - 1 >= 0 and self.nbeta() - 1 >= 0:\n alpha_map, beta_map = self._core.find_mapping(-1, -1)\n\n def compute_intermediate1(coeff):\n tmp = numpy.zeros((norb, norb, int(binom(\n norb, nalpha - 1)), int(binom(norb, nbeta - 1))),\n dtype=self.coeff.dtype)\n for i in range(norb):\n for j in range(norb):\n for sourcea, targeta, paritya in alpha_map[(i,)]:\n paritya *= (-1)**(nalpha - 1)\n for sourceb, targetb, parityb in beta_map[(j,)]:\n work = coeff[sourcea,\n sourceb] * paritya * parityb\n tmp[i, j, targeta, targetb] += work\n return tmp\n\n inter = compute_intermediate1(self.coeff)\n inter2 = inter if bradata is None else compute_intermediate1(\n bradata.coeff)\n outunpack += numpy.tensordot(inter2.conj(),\n inter,\n axes=((2, 3), (2, 3)))\n\n if self.nbeta() - 2 >= 0:\n _, beta_map = self._core.find_mapping(0, -2)\n\n def compute_intermediate2(coeff):\n tmp = numpy.zeros((nlt, lena, int(binom(norb, nbeta - 2))),\n dtype=self.coeff.dtype)\n for i in range(norb):\n for j in range(i + 1, norb):\n for source, target, parity in beta_map[(i, j)]:\n tmp[i + j * (j + 1) //\n 2, :, target] += coeff[:, source] * parity\n\n return tmp\n\n inter = compute_intermediate2(self.coeff)\n inter2 = inter if bradata is None else compute_intermediate2(\n bradata.coeff)\n outpack += numpy.tensordot(inter2.conj(),\n inter,\n axes=((1, 2), (1, 2)))\n\n out = numpy.zeros_like(outunpack)\n for i in range(norb):\n for j in range(norb):\n ij = min(i, j) + max(i, j) * (max(i, j) + 1) // 2\n parityij = 1.0 if i < j else -1.0\n for k in range(norb):\n for l in range(norb):\n parity = parityij * (1.0 if k < l else -1.0)\n out[i, j, k,\n l] -= outunpack[i, j, k, l] + outunpack[j, i, l, k]\n mnkl, mxkl = min(k, l), max(k, l)\n work = outpack[ij, mnkl + mxkl * (mxkl + 1) // 2]\n out[i, j, k, l] -= work * parity\n\n return self.rdm1(bradata)[0], out", "def fRCrim(Swe,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rh,Cwv,Ckv,Alpha,Tout):\n#\n# 1. Compute and normalise volumetric components:\n#\t-----------------------------------------------\n\tVw=PHIe*Swe\n\tVh=PHIe*(1-Swe)\n\tVwe=(Vw-Cwv)/(1-Cwv)\n\tVwe=ImposeLimits(Vwe,0,1)\n\tVke=(Vk-Ckv)/(1-Ckv)\n\tVke=ImposeLimits(Vke,0,1)\n\tSum=abs(Vc1)+abs(Vc2)+abs(Vc3)+abs(Vke)+abs(Vwe)+abs(Vh)\n\tVc1=abs(Vc1)/Sum\n\tVc2=abs(Vc2)/Sum\n\tVc3=abs(Vc3)/Sum\n\tVk=abs(Vk)/Sum\n\tVw=abs(Vw)/Sum\n\tVh=abs(Vh)/Sum\n#\n#\t2. Determine conductivity of components:\n#\t----------------------------------------\n\tSigc1=1/Rc1\n\tSigc2=1/Rc2\n\tSigc3=1/Rc3\n\tSigk=1/Rk\n\tSigw=1/Rw\n\tSigh=1/Rh\n#\n#\t3. Compute Conductivity:\n#\t========================\n\tTrm1=Vc1*(Sigc1**(1/Alpha))\n\tTrm2=Vc2*(Sigc2**(1/Alpha))\n\tTrm3=Vc3*(Sigc3**(1/Alpha))\n\tTrm4=(Vk**2.2)*(Sigk**(1/Alpha)) # Factor of 2.2 included to get data to fit to Yang et al\n\tTrm5=Vw*(Sigw**(1/Alpha))\n\tTrm6=Vh*(Sigh**(1/Alpha))\n\tCrf=(Trm1+Trm2+Trm3+Trm4+Trm5+Trm6)**Alpha\n#\n#\n# 4. Output result:\n#\t-----------------\n\tif(Tout==0):\n\t\tFr=Crf\n\telse:\n\t\tFr=1/Crf\n\treturn Fr", "def rothesstri(A, b):\n n = shape(A)[0]\n A = hstack([A, b])\n for k in range(n-1):\n r = linalg.norm([ A[k , k] , A[k + 1, k] ])\n if r>0:\n c=A[k, k]/r; s=A[k + 1, k]/r\n A[[k, k + 1],(k + 1):(n + 1)]=[[c, s],[-s, c]]*A[[k, k + 1],(k + 1):(n + 1)]\n A[k, k] = r; A[k+1,k] = 0\n z = A[:, n].copy()\n rbacksolve(A[:, :n], z, n)\n return z", "def part1b_2():\n xs = exampleInput\n z = 5.881\n forward = [\n Counter({'-FEAT-': 0.622, '-SIZE-': 0.377}), \n Counter({'-SIZE-': 0.761, '-FEAT-': 0.238}), \n Counter({'-SIZE-': 0.741, '-FEAT-': 0.258})]\n \n z_, forward_ = submission.computeForward(simpleCRF, xs)\n for vec, vec_ in zip( forward, forward_):\n grader.requireIsTrue( Counters.approximateEquals( vec, vec_ ) )\n grader.requireIsEqual( z, z_, 1e-2)", "def xBounds2cvxoptMatrix(p):\n\n #TODO: is reshape/flatten required in newest numpy versions?\n indLB, indUB, indEQ = \\\n where(isfinite(p.lb) & ~(p.lb == p.ub))[0], \\\n where(isfinite(p.ub) & ~(p.lb == p.ub))[0], \\\n where(p.lb == p.ub)[0] \n \n initLenB = Len(p.b)\n initLenBeq = Len(p.beq)\n nLB, nUB, nEQ = Len(indLB), Len(indUB), Len(indEQ)\n\n if nLB>0 or nUB>0:\n A, b = copy(p.A), copy(p.b)\n p.A = zeros([Len(p.b) + nLB+nUB, p.n])\n p.b = zeros(Len(p.b) + nLB+nUB)\n p.b[:Len(b)] = b.flatten() # sometimes flatten is needed when called before runProbSolver(), from tests\n p.A[:Len(b)] = A\n for i in range(len(indLB)):\n p.A[initLenB+i, indLB[i]] = -1\n p.b[initLenB+i] = -p.lb[indLB[i]]\n for i in range(len(indUB)):\n p.A[initLenB+len(indLB)+i, indUB[i]] = 1\n p.b[initLenB+len(indLB)+i] = p.ub[indUB[i]]\n\n if nEQ>0:\n Aeq, beq = copy(p.Aeq), copy(p.beq)\n p.Aeq = zeros([Len(p.beq) + nEQ, p.n])\n p.beq = zeros(Len(p.beq) + nEQ)\n p.beq[:Len(beq)] = beq\n p.Aeq[:Len(beq)] = Aeq\n for i in range(len(indEQ)):\n p.Aeq[initLenBeq+i, indEQ[i]] = 1\n p.beq[initLenBeq+i] = p.lb[indEQ[i]] # = p.ub[indEQ[i]], because they are the same\n\n p.lb = -inf*ones(p.n)\n p.ub = inf*ones(p.n)", "def cross_junctions(I, bounds, Wpts):\n #--- FILL ME IN ---\n\n Ipts = np.zeros((2, 48))\n\n#parameters\n alpha = 0.15 #typically 0.04 to 0.06\n threshold = 1500 #default 2000\n sigma = 2\n ws = 12 #window size for saddle point\n\n#building Harris Detecter\n I = I/255.0\n gradx, grady = np.gradient(I)\n IxIx = gaussian_filter(gradx*gradx,sigma)\n IxIy = gaussian_filter(gradx*grady,sigma)\n IyIy = gaussian_filter(grady*grady,sigma)\n print(I.shape)\n\n #get harris score\n cand_score = []\n cand_index = []\n cand = []\n s_cand = []\n\n for j in range(len(I)):\n for i in range(len(I[0])):\n a11 = IxIx[j][i]\n a12 = IxIy[j][i]\n a21 = a12\n a22 = IyIy[j][i]\n A = np.array([[a11, a12],[a21, a22]])\n ev0, ev1 = np.linalg.eigvals(A)\n h_score = ev0*ev1 - alpha*(ev0+ev1)**2\n cand_score.append(-h_score)\n cand_index.append([i, j])\n\n #get the coordinates of the top 5000 scores\n sorted_ind = np.argsort(cand_score)\n sorted_score = np.sort(cand_score).tolist()\n\n for ind in sorted_ind[:threshold]:\n cand.append(cand_index[ind])\n s_cand = sorted_score[:threshold]\n\n\n#clustering\n #using homography to project candidate points to a up-front view\n new_bbox = np.array([[0, 100, 100, 0],[0, 0, 80, 80]])\n H = dlt_homography(bounds, new_bbox)\n cand = np.array(cand).T\n cand = np.vstack((cand, np.ones(cand.shape[1])))\n Ho_cand = np.matmul(H,cand).T\n for pt in Ho_cand:\n pt[0] = pt[0]/pt[2]\n pt[1] = pt[1]/pt[2]\n Ho_cand = Ho_cand[:,:2]\n Ho_cand = Ho_cand.tolist()\n\n #get rid of points that are not in the boundry\n temp_Ho_cand = []\n temp_s_cand = []\n for i in range(len(Ho_cand)):\n pt = Ho_cand[i]\n if (pt[0]>=100) or (pt[0]<0) or (pt[1]>=80) or (pt[1]<0):\n continue\n else:\n temp_Ho_cand.append(pt)\n temp_s_cand.append(s_cand[i])\n Ho_cand = np.array(temp_Ho_cand)\n s_cand = temp_s_cand\n #divide candidates into clusters\n assignment = []\n assignment_score = []\n\n #first put in the point that has the highest score\n assignment.append([Ho_cand[0]])\n assignment_score.append([s_cand[0]])\n for i in range(len(Ho_cand)):\n pt = Ho_cand[i]\n dist = []\n for c in assignment:\n dist.append(np.linalg.norm(pt - c[0]))\n if min(dist) > 6:\n assignment.append([pt])\n assignment_score.append([s_cand[i]])\n\n assignment = np.array(assignment)\n\n #assign points to clusters\n for i in range(len(Ho_cand)):\n pt = Ho_cand[i]\n if (pt[0] == Ho_cand[0][0]) and (pt[1] == Ho_cand[0][1]):\n continue\n dist = []\n for c in assignment:\n dist.append(np.linalg.norm(pt - c[0]))\n index = np.argsort(dist)[-1]\n np.append(assignment[index], pt)\n assignment_score[index].append(s_cand[i])\n\n #get centroids for each cluster\n Ho_centroids = []\n for i in range(len(assignment)):\n cl = assignment[i]\n cl = np.array(cl)\n Ho_centroids.append([np.mean(cl.T[0]),np.mean(cl.T[1])])\n assignment_score[i] = sum(assignment_score[i])\n\n print(len(assignment_score))\n\n Ho_centroids = np.array(Ho_centroids)\n #get rid of edge points\n\n xmin = np.amin(Ho_centroids.T[0])\n xmax = np.amax(Ho_centroids.T[0]) \n ymin = np.amin(Ho_centroids.T[1])\n ymax = np.amax(Ho_centroids.T[1])\n\n final_cand = []\n final_score = []\n for i in range(len(Ho_centroids)):\n pt = Ho_centroids[i]\n if (abs(pt[0] - xmin) <= 3) or (abs(pt[0] - xmax) <= 3) or (abs(pt[1] - ymin) <= 3) or (abs(pt[1] - ymax) <= 3):\n continue\n else:\n final_cand.append(pt)\n final_score.append(assignment_score[i])\n print(\"Number of corner found: \")\n print(len(final_cand))\n\n #get rid of fake corners\n if (len(final_cand)>48):\n ultimate_cand =[]\n for ind in np.argsort(final_score)[:48]:\n ultimate_cand.append(final_cand[ind])\n final_cand = ultimate_cand\n print(\"real corners count:\", len(ultimate_cand))\n\n\n #sort the points\n final_cand = np.array(final_cand)\n y_sort_ind = np.argsort(final_cand.T[1])\n final_cand = final_cand.tolist()\n rows = []\n for i in range(6):\n row = []\n for ind in y_sort_ind[i*8:(i+1)*8]:\n row.append(final_cand[ind])\n rows.append(row)\n\n ordered = []\n for row in rows:\n r = []\n x_sort_ind = np.argsort(np.array(row).T[0])\n for ind in x_sort_ind:\n r.append(row[ind])\n ordered.append(r)\n\n final_cand = []\n for row in ordered:\n for pt in row:\n final_cand.append(pt)\n \n\n\n #get coordinates of the centroids in the original frame\n Ho_centroids = np.array(final_cand)\n\n centroids = np.vstack((Ho_centroids.T, np.ones(Ho_centroids.shape[0])))\n centroids = np.matmul(np.linalg.inv(H), centroids).T\n for pt in centroids:\n pt[0] = int(pt[0]/pt[2])\n pt[1] = int(pt[1]/pt[2])\n centroids = centroids[:,:2]\n\n#finding saddle points around the centroids\n saddle_points = []\n for pt in centroids:\n img = I[int(pt[1]-ws):int(pt[1]+ws), int(pt[0]-ws):int(pt[0]+ws)]\n saddle = saddle_point(img)\n saddle = [saddle[0][0]+pt[0]-ws, saddle[1][0]+pt[1]-ws]\n saddle_points.append(saddle)\n\n saddle_points = np.array(saddle_points)\n #------------------\n print(saddle_points.T)\n return saddle_points.T" ]
[ "0.6203597", "0.5916464", "0.5894118", "0.5867515", "0.5715989", "0.56956524", "0.56905115", "0.5686345", "0.56403846", "0.55984086", "0.5590803", "0.5577823", "0.5559308", "0.5542196", "0.5525735", "0.55202436", "0.55189615", "0.55174667", "0.5481329", "0.5479639", "0.54680353", "0.5448406", "0.54449385", "0.5394492", "0.5389445", "0.53708994", "0.53643274", "0.5359837", "0.53582895", "0.53452563", "0.53446096", "0.5342537", "0.5342229", "0.53416127", "0.5330309", "0.5321422", "0.53083634", "0.52950096", "0.5288007", "0.5286932", "0.5283794", "0.5276441", "0.5273803", "0.5268442", "0.52652687", "0.52608323", "0.5253563", "0.5252607", "0.52508545", "0.5250511", "0.524654", "0.52464396", "0.52296376", "0.52270746", "0.5225984", "0.5213786", "0.52124536", "0.521023", "0.52078915", "0.52047116", "0.5202165", "0.5200002", "0.5199372", "0.519302", "0.5192227", "0.5167348", "0.5164294", "0.5163783", "0.5162571", "0.51602423", "0.51583916", "0.51523244", "0.5149355", "0.51384705", "0.5133454", "0.51320696", "0.5131829", "0.51299536", "0.51282793", "0.5128032", "0.51278335", "0.5126323", "0.5121947", "0.5117848", "0.51168174", "0.51156133", "0.5113494", "0.5107247", "0.5106238", "0.5105374", "0.5095095", "0.5093701", "0.50901765", "0.5087117", "0.5085116", "0.5084038", "0.5070339", "0.5063991", "0.50629747", "0.5062934" ]
0.699575
0
This function computes the essential matrix from the fundamental matrix. The E matrix is defined in normalized image coordinates
def getEssentialMatrix(K, F): E = np.dot(K.T, np.dot(F, K)) u, s, v = np.linalg.svd(E) # We correct the singular values of the E matrix s_new = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]]).reshape(3, 3) final_E = np.dot(u, np.dot(s_new, v)) return final_E
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n w,v=np.linalg.eig(matrix)\n ### END YOUR CODE\n return w, v", "def P(self):\n self.eigenmatrix()", "def Euler2Mat(e):\n x=e[0]\n y=e[1]\n z=e[2]\n s1=np.sin(x)\n s2=np.sin(y)\n s3=np.sin(z)\n c1=np.cos(x)\n c2=np.cos(y)\n c3=np.cos(z)\n m=np.array([[c1*c2*c3-s1*s3,-c3*s1-c1*c2*s3,c1*s2],\n [c1*s3+c2*c3*s1,c1*c3-c2*s1*s3,s1*s2],\n [-c3*s2,s2*s3,c2]])\n return m", "def decompose_essential_matrix(E, x1, x2):\n\n # Fix left camera-matrix\n Rl = np.eye(3)\n tl = np.array([[0, 0, 0]]).T\n Pl = np.concatenate((Rl, tl), axis=1)\n\n # TODO: Compute possible rotations and translations\n \n # s must be [1, 1, 0]\n u, s, vh = np.linalg.svd(E)\n E = u @ np.diag([1, 1, 0]) @ vh\n u, s, vh = np.linalg.svd(E)\n\n w = np.array([[ 0, 1, 0], \n [-1, 0, 0], \n [ 0, 0, 1]]) \n \n z = np.array([[ 0, -1, 0], \n [ 1, 0, 0],\n [ 0, 0, 0]])\n \n R1 = u @ w.T @ vh\n s1 = -u @ z @ u.T\n R2 = u @ w @ vh\n s2 = u @ z @ u.T\n\n t1 = np.array([[s1[2, 1]], \n [s1[0, 2]],\n [s1[1, 0]]])\n \n t2 = np.array([[s2[2, 1]], \n [s2[0, 2]], \n [s2[1, 0]]]) \n\n # Four possibilities\n Pr = [np.concatenate((R1, t1), axis=1),\n np.concatenate((R1, t2), axis=1),\n np.concatenate((R2, t1), axis=1),\n np.concatenate((R2, t2), axis=1)]\n\n # Compute reconstructions for all possible right camera-matrices\n X3Ds = [infer_3d(x1[:, 0:1], x2[:, 0:1], Pl, x) for x in Pr]\n\n # Compute projections on image-planes and find when both cameras see point\n test = [np.prod(np.hstack((Pl @ np.vstack((X3Ds[i], [[1]])), Pr[i] @ np.vstack((X3Ds[i], [[1]])))) > 0, 1) for i in\n range(4)]\n test = np.array(test)\n idx = np.where(np.hstack((test[0, 2], test[1, 2], test[2, 2], test[3, 2])) > 0.)[0][0]\n\n # Choose correct matrix\n Pr = Pr[idx]\n\n return Pl, Pr", "def mattock(e, E1, A, B, C):\r\n se = np.sign(e)\r\n ae = np.abs(e)\r\n s = se * E1 * ae * (A + (1 - A) / (1 + (B * ae)**C)**(1 / C))\r\n d = E1 * (A + (1 - A) / (1 + (B * ae)**C)**(1 / C)) - E1 * ae * (1 - A) * B * (B * ae)**(C - 1) / (1 + (B * ae)**C)**(1 / C + 1)\r\n return s, d", "def calculate_E0(self) -> float:\n noisy = self.kernel_eigenvectors_[-1].copy()\n np.random.shuffle(noisy)\n\n kernel_eigenvectors = self.kernel_eigenvectors_[:-1]\n kernel_eigenvectors.append(noisy)\n\n eigenvectors_matrix = scipy.sparse.csr_matrix(\n np.column_stack([eigenvector for eigenvector in kernel_eigenvectors])\n )\n\n if len(kernel_eigenvectors) == 2:\n ev0 = kernel_eigenvectors[0]\n ev1 = kernel_eigenvectors[1]\n _, Gamma, _ = scipy.sparse.linalg.svds(\n ev0.T @ ev1, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n else:\n _, Gamma, _ = scipy.sparse.linalg.svds(\n eigenvectors_matrix, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n\n Gamma.sort()\n gamma2 = Gamma[-2]\n E0 = (1 + gamma2) / 2\n return E0", "def _image_orthogonal_matrix22_eigvals(M00, M01, M11):\n tmp1 = M01 * M01\n tmp1 *= 4\n\n tmp2 = M00 - M11\n tmp2 *= tmp2\n tmp2 += tmp1\n cp.sqrt(tmp2, out=tmp2)\n tmp2 /= 2\n\n tmp1 = M00 + M11\n tmp1 /= 2\n l1 = tmp1 + tmp2\n l2 = tmp1 - tmp2\n return l1, l2", "def _compute_eigenmatrix(self, p, expand=False, factor=False,\n simplify=False):\n B = [Matrix(SR, [M[i] for M in p]) for i in range(self._.d + 1)]\n V = SR**(self._.d + 1)\n R = [[self._.d + 1, V, [Integer(1)]]]\n for i in range(1, self._.d + 1):\n S = sorted(([k, m, V.subspace_with_basis(b)]\n for k, b, m in B[i].eigenvectors_right()),\n key=lambda kvb: CoefficientList(kvb[0], self._.vars),\n reverse=True)\n j = 0\n while j < len(R):\n m, s, r = R[j]\n h = 0\n while h < len(S):\n k, v, b = S[h]\n sb = s.intersection(b)\n d = sb.dimension()\n if d == v:\n del S[h]\n else:\n S[h][1] -= d\n h += 1\n if d == m:\n R[j][1] = sb\n r.append(k)\n break\n elif d > 0:\n R.insert(j, [d, sb, r + [k]])\n j += 1\n m -= d\n R[j][0] = m\n j += 1\n assert len(R) == self._.d + 1 and all(len(r) == self._.d + 1\n for _, _, r in R), \\\n \"failed to compute the eigenmatrix\"\n return Matrix(SR, [r for _, _, r in R])", "def _init_eigenmatrix(self, P):\n self._.d = nrows(P) - 1\n assert all(len(r) == self._.d + 1 for r in P), \\\n \"parameter length mismatch\"\n P = Matrix(SR, P)\n for i, x in enumerate(P[0]):\n P[0, i] = integralize(x)\n self._.n = sum(P[0])\n return P", "def compute_e(f_mat, m_mat):\r\n return m_mat.T @ f_mat @ m_mat", "def calculate_eigenvalues(self):\n self.__eigenvalues = []\n dictionary = np.linalg.eig(np.array(self.__A))\n indicator = True\n sum1 = 0\n for i in range(self.__A.shape[0]):\n if all(self.__A[i, j] == 0 for j in range(self.__A.shape[1])):\n indicator = all(self.__B[i,j] for j in range(self.__B.shape[1]))\n if (indicator):\n sum1 += 1\n \n for val in dictionary[0]:\n if (val != 0):\n self.__eigenvalues.append(complex(val))\n elif (indicator) and (sum1 > 0):\n sum1 -= 1\n self.__eigenvalues.append(complex(val))", "def eigenCheat( Ja, Jf, truncNum = scipy.inf ):\n H = glueEmH( Ja, Jf, truncNum )\n \n return scipy.linalg.eigh( H )", "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return w, v", "def get_E_matrix(dR, dt):\n\n E = np.matmul(\n np.reshape(np_skew_symmetric(dt), (-1, 3, 3)),\n dR\n ).reshape(3, 3)\n return E", "def calculate_cep_matrix(molecule):\n num_atoms = molecule.GetNumAtoms()\n charges = [atom.GetAtomicNum() for atom in molecule.GetAtoms()]\n cep_matrix = np.zeros((num_atoms, num_atoms))\n for bond in molecule.GetBonds():\n bond_type = str(bond.GetBondType()).lower()\n bond_order = get_bond_order(bond_type)\n i = bond.GetBeginAtomIdx()\n j = bond.GetEndAtomIdx()\n degree_i = len(molecule.GetAtomWithIdx(i).GetNeighbors())\n degree_j = len(molecule.GetAtomWithIdx(j).GetNeighbors())\n z_i = degree_i * charges[i]\n z_j = degree_j * charges[j]\n weighted_electronic_distance = ((z_i + z_j) / (bond_order * degree_i * degree_j))\n cep_matrix[i, j] = weighted_electronic_distance\n cep_matrix[j, i] = weighted_electronic_distance\n return cep_matrix", "def get_E(J,k):\n E = -2 * J * np.cos(k) # energyeigenvalue \n return E", "def fit_evd(self):\n\n # EVD only work on square matrices as we need to compute the eigenvalues and eigenvectors\n # For this we compute the covariance matrix K\n # K should be n x n matrix (pixels x pixels)\n\n # The covariance matrix is nxn\n self.cov_matrix = np.zeros(shape=[self.n_features, self.n_features], dtype='uint8')\n\n self.cov_matrix = np.cov(self.norm_matrix, rowvar=False)\n # C is a symmetric matrix and so it can be diagonalized:\n eig_val, eig_vec = linalg.eig(self.cov_matrix)\n\n # Sorting the eigenvectors by decreasing eigenvalues\n # [Start : stop : stepcount] stepcount is reversed\n idx = eig_val.argsort()[::-1]\n eig_val, eig_vec = eig_val[idx], eig_vec[:, idx]\n\n # Explained_variance tell us how much of the variance in the data each eigen value explains\n explained_variance = eig_val / (self.n_samples - 1)\n # total_var is the total variance in the data\n total_var = explained_variance.sum()\n explained_variance_ratio = explained_variance / total_var\n # The cumulative sum of all ratios\n ratio_cumsum = np.cumsum(explained_variance_ratio)\n\n # We search in the cumsum for the index of the value which, when added, corresponds to the quality_percent\n # The index of the cumsum gives us the components we need to add to explain X quality percent of our data\n n_components = np.searchsorted(ratio_cumsum, self.quality_percent, side='right') + 1\n\n self.components = eig_vec[:n_components]\n print(\"The principal components have been calculated using eigendecomposition\", self.components.shape)\n\n return self.components", "def compute_normalized_image_to_image_matrix(p1, p2, compute_essential=False):\n n = p1.shape[1]\n if p2.shape[1] != n:\n raise ValueError('Number of points do not match.')\n\n # preprocess image coordinates\n p1n, T1 = scale_and_translate_points(p1)\n p2n, T2 = scale_and_translate_points(p2)\n\n # compute F or E with the coordinates\n F = compute_image_to_image_matrix(p1n, p2n, compute_essential)\n\n # reverse preprocessing of coordinates\n # We know that P1' E P2 = 0\n F = np.dot(T1.T, np.dot(F, T2))\n\n return F / F[2, 2]", "def _emiss_ee(self,Eph):\n if self.weight_ee == 0.0:\n return np.zeros_like(Eph)\n\n gam = np.vstack(self._gam)\n # compute integral with electron distribution\n emiss = c.cgs * trapz_loglog(np.vstack(self._nelec) * self._sigma_ee(gam,Eph),\n self._gam, axis=0)\n return emiss", "def get_su_eig(self, delcc):\n pc = SimpleNamespace()\n h = self.h\n if self.rbsize:\n self._inv_mrb()\n if h:\n pc.G = h\n pc.A = h * h / 3\n pc.Ap = h / 2\n if self.unc:\n pv = self._el\n else:\n pv = np.ix_(self._el, self._el)\n if self.m is not None:\n self.m = self.m[pv]\n self.k = self.k[pv]\n self.b = self.b[pv]\n self.kdof = self.nonrf[self._el]\n self.ksize = self.kdof.size\n\n self._el = np.arange(self.ksize) # testing ...\n self._rb = np.arange(0)\n\n if self.elsize:\n self._inv_m()\n A = self._build_A()\n eig_info = eigss(A, delcc)\n pc.wn = eig_info.wn\n pc.zeta = eig_info.zeta\n pc.eig_success = eig_info.eig_success\n if h:\n self._get_complex_su_coefs(pc, eig_info.lam, h)\n self._add_partition_copies(pc, eig_info.lam, eig_info.ur, eig_info.ur_inv)\n return pc", "def head2eeg(self): \n LOG.info(\"Computing Head2EEGMat...\")\n h2s_mat = om.Head2EEGMat(self.om_head, self.om_sensors)\n LOG.info(\"head2eeg: %d x %d\" % (h2s_mat.nlin(), h2s_mat.ncol()))\n return h2s_mat", "def E_to_M(E, ecc):\n with u.set_enabled_equivalencies(u.dimensionless_angles()):\n M = _kepler_equation(E, 0.0 * u.rad, ecc)\n return M", "def E(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating R\", file=self.logfile)\n\n\n TAE = toeplitz(self.A*self.e2[:self.P+1], np.zeros(self.P+1))\n TA = toeplitz(self.A, np.zeros(self.P+1))\n M = np.dot(TAE.transpose(), TA)\n res = toeplitz(np.concatenate([M[:,0], np.zeros((self.L_h-self.P-1))]),\n np.concatenate([M[0,:], np.zeros((self.L_h-self.P-1))]))\n res[-self.P:, -self.P:] = M[1:,1:]\n res = res*np.array([self.e2]).transpose()\n self.R = self.la*self.sigma2*np.linalg.inv(self.la*np.eye(self.L_h) + self.sigma2*res)\n\n\n\n print(\"\", file=self.logfile)\n print(\"Updating mu\", file=self.logfile)\n self.mu = np.dot(self.R, self.h)/self.sigma2\n\n\n # Propagate\n self._propagate_mu()\n self._propagate_R()", "def ensemble_determinant(self):\n return np.linalg.det(self.ensemble_transition_matrix)", "def get_F_matrix_from_E(E, K1, K2):\n F = np.matmul(np.linalg.inv(K2), np.matmul(E,np.linalg.inv(K1)))\n\n return F", "def vFrmE(E):\n Ej=E*1.6021*10**-22\n m=1.674929*10**-27\n v=np.sqrt((2.*Ej)/m)\n return(v)", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def compute_image_to_image_matrix(x1, x2, compute_essential=False):\n A = correspondence_matrix(x1, x2)\n # compute linear least square solution\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # constrain F. Make rank 2 by zeroing out last singular value\n U, S, V = np.linalg.svd(F)\n S[-1] = 0\n if compute_essential:\n S = [1, 1, 0] # Force rank 2 and equal eigenvalues\n F = np.dot(U, np.dot(np.diag(S), V))\n\n return F", "def ema(matrix, alpha):\n\n # declare empty EMA numpy array\n e = np.zeros(matrix.shape[0])\n\n # set the value of the first element in the EMA array\n e[0] = matrix[0]\n\n # use the EMA formula to calculate the value of each point in the EMA array\n for t in range(1, matrix.shape[0]):\n e[t] = alpha*matrix[t] + (1 - alpha)*e[t - 1]\n\n return e", "def ema(matrix, alpha):\n\n # declare empty EMA numpy array\n e = np.zeros(matrix.shape[0])\n\n # set the value of the first element in the EMA array\n e[0] = matrix[0]\n\n # use the EMA formula to calculate the value of each point in the EMA array\n for t in range(1, matrix.shape[0]):\n e[t] = alpha*matrix[t] + (1 - alpha)*e[t - 1]\n\n return e", "def eigenvalue_decomposition (a_t_a_matrix ):\r\n # get eigenvalues and -vectors from ATA matrix\r\n eigenvalues = np.zeros (a_t_a_matrix.shape[0] )\r\n eigenvectors = np.zeros ((a_t_a_matrix.shape[0], a_t_a_matrix.shape[0] ))\r\n evals, evecs = np.linalg.eig (a_t_a_matrix )\r\n\r\n # sort them\r\n indices = np.argsort (-evals ) # reverse sort: greatest numbers first\r\n for loop_count, index in enumerate(indices ):\r\n eigenvalues[loop_count] = evals[index]\r\n eigenvectors[:, loop_count] = evecs[:, index]\r\n\r\n # get the normal vector, normalize it and if it's turned to the ground, turn it around\r\n normal_vector = normalize_vector (eigenvectors[:, -1] ) # the last (smallest) vector is the normal vector\r\n if (normal_vector[2] < 0):\r\n normal_vector = normal_vector * -1\r\n\r\n return normal_vector, eigenvalues[-1]", "def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb", "def eigensystemtomatrix(ew1,ew2,sint):\n if abs(ew1-ew2)/float(ew1)<0.0001:\n raise Exception(\"Can't deal with equal eigenvalues\")\n cost = np.sqrt(1-sint*sint)\n x = abs((ew1-ew2))*cost*sint\n if (ew1>ew2 and sint<1/np.sqrt(2)) or (ew1<ew2 and sint>1/np.sqrt(2)):\n a = 0.5*(ew1+ew2)+np.sqrt(0.25*(ew1+ew2)**2-ew1*ew2-x*x)\n else:\n a = 0.5*(ew1+ew2)-np.sqrt(0.25*(ew1+ew2)**2-ew1*ew2-x*x)\n b = ew1+ew2-a\n return a,b,x", "def eigs(self):\n return np.concatenate(self.operator.eigenvalues)", "def eclogite_massive():\n\n rho = 3490.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 238.85; C[0,1] = 82.01; C[0,2] = 81.44; C[0,3] = 0.3; C[0,4] = -0.02; C[0,5] = 0.5\n C[1,0] = C[0,1]; C[1,1] = 242.12; C[1,2] = 81.11; C[1,3] = -0.66; C[1,4] = 0.33; C[1,5] = 0.12\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 235.57; C[2,3] = -0.28; C[2,4] = 0.22; C[2,5] = 0.31\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 78.72; C[3,4] = 0.27; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 78.37; C[4,5] = 0.25\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 77.91\n\n return C, rho", "def _compute_eigenmatrix(self, k, tr, expand=False, factor=False,\n simplify=False):\n if not self._has(\"omega\"):\n self.cosineSequences(expand=expand, factor=factor,\n simplify=simplify)\n return Matrix(SR, [[self._.omega[tr(i, j)] * k[j]\n for j in range(self._.d + 1)]\n for i in range(self._.d + 1)])", "def _emiss_ep(self,Eph):\n if self.weight_ep == 0.0:\n return np.zeros_like(Eph)\n\n gam = np.vstack(self._gam)\n eps = (Eph / mec2).decompose().value\n # compute integral with electron distribution\n emiss = c.cgs * trapz_loglog(np.vstack(self._nelec) * self._sigma_1(gam,eps),\n self._gam, axis=0).to(u.cm**2 / Eph.unit)\n return emiss", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def calculate_E(self):\n \n E = 0\n for i in xrange(self.size):\n Ei = self.h[i]\n Ei += 0.5*sum((1 if self.spins[j] else -1)*self.J[i,j] for j in self.adjacency[i])\n if not self.spins[i]:\n Ei *= -1\n E += Ei\n \n return E", "def Mtof(e,M):\n #first calculate eccentric anomaly (bigE)\n f = np.zeros(len(e))\n for i in np.arange(0,len(e)):\n n=0.\n delta=1000.\n bigE = M[i] - e[i]*np.sin(M[i]) \n while (n<1.e4 and delta>1.e-6):\n f1 = bigE - e[i]*np.sin(bigE) - M[i]\n fp = 1.0 - e[i]*np.cos(bigE)\n delta = -f1/fp\n bigE = bigE + delta\n n = n + 1\n f[i] = 2.*np.arctan( ((1. + e[i])/(1. - e[i]))**0.5 * np.tan(bigE/2.) )\n return f", "def estimateFundamentalMatrix(x1, x2):\n A = correspondence_matrix(x1, x2)\n # compute linear least square solution\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # constrain F. Make rank 2 by zeroing out last singular value\n U, S, V = np.linalg.svd(F)\n S[-1] = 0\n \n F = np.dot(U, np.dot(np.diag(S), V))\n return F", "def get_R_e(E, M, g_e, T_e):\n # transform energy-vector into matrices\n mat_E_x, mat_E_y = np.meshgrid(E,E) \n mat_diff = mat_E_y - mat_E_x # matrix representing: E_i - E_j\n R_e = np.ones((M,M))*g_e**2 * T_e # matrix for transition rates\n ind = np.abs(mat_diff) > 0 # indices of the non-divergent elements\n # fill in just those elements without divergences 1/0\n # the rest is set to the correct limit\n R_e[ind] = g_e**2 * mat_diff[ind]/(np.exp(mat_diff[ind]/T_e)-1)\n return R_e", "def Q(self):\n self.dualEigenmatrix()", "def eigensystem(mat):\n e, v = numpy.linalg.eig(mat)\n\n # `eig` returns complex results but we know all of the\n # eigenstates have real energy.\n e = numpy.real(e)\n\n items = zip(e, v.T)\n items = sorted(items, key = operator.itemgetter(0))\n e, v = zip(*items)\n\n return (e, v)", "def E2f(E):\n f=E/c['h']/u['eV']\n return f", "def computeFundamentalMatrix(pts1, pts2):\n A = np.empty((8, 9))\n for i in range(len(pts1)-1):\n x1 = pts1[i][0]\n x2 = pts2[i][0]\n y1 = pts1[i][1]\n y2 = pts2[i][1]\n A[i] = np.array([x1 * x2, x2 * y1, x2,\n y2 * x1, y2 * y1, y2,\n x1, y1, 1])\n # Compute F matrix by evaluating SVD\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # Constrain the F matrix to rank 2\n U1, S1, V1 = np.linalg.svd(F)\n # print('Old S', S)\n # S[2] = 0\n S2 = np.array([[S1[0], 0, 0], [0, S1[1], 0], [0, 0, 0]])\n # print('New S', S)\n F = np.dot(np.dot(U1, S2), V1)\n\n return F", "def _check_eigenmatrices(self):\n if self._has(\"P\") and self._has(\"Q\") and \\\n _simplify(_expand(self._.P * self._.Q)) \\\n != self.order(expand=True, simplify=True) \\\n * identity_matrix(SR, self._.d + 1):\n warn(Warning(\"the eigenmatrices do not multiply \"\n \"into a multiple of the identity matrix\"))", "def calc_eigen(self, left=False, **parameter_overrides):\n A, B = self.form_state_space_matrices(**parameter_overrides)\n\n if len(A.shape) == 3: # array version\n evals = np.zeros(A.shape[:2], dtype='complex128')\n evecs = np.zeros(A.shape, dtype='complex128')\n for i, Ai in enumerate(A):\n if left:\n Ai = np.transpose(Ai)\n evals[i], evecs[i] = np.linalg.eig(Ai)\n return evals, evecs\n else:\n if left:\n A = np.transpose(A)\n return np.linalg.eig(A)", "def main():\n N = 201 # Amount of gridpoints, odd number to include 0\n L = 10 # Size of the system\n k = 50 # Amount of energies and states calculated\n x = y = np.linspace(-L/2,L/2,N) # Gridpoints\n h = x[1]-x[0] # Spacing of gridpoints\n\n # Solve the system with and without perturbation\n E,psi,E_p,psi_p = fdm_2d(N,L,x,y,h,k)\n\n # Print the first two energies and the absolute error of the energies\n print('Energies of the two lowest states:')\n print('E_00 = %.4f' % E[0])\n print('E_01 = %.4f' % E[1], '\\n')\n print('Absolute error for E_00: %.4e' % np.abs(E[0]-1))\n print('Absolute error for E_01: %.4e' % np.abs(E[1]-2))\n\n print('\\nEnergies of the two lowest states after perturbation:')\n print('E_00 = %.4f' % E_p[0])\n print('E_01 = %.4f' % E_p[1])\n\n # Calculate the normalized densities of the states\n densities_norm = np.zeros((k,N,N))\n densities_norm_p = np.zeros((k,N,N))\n for i in range(k):\n # meshgrid form\n state = np.reshape(psi.T[i],(N,N))\n state_p = np.reshape(psi_p.T[i],(N,N))\n densities_norm[i] = normalized_density(state,x)\n densities_norm_p[i] = normalized_density(state_p,x)\n\n # Analytical solution of the ground state\n X,Y = np.meshgrid(x,y)\n psi00_exact = phi(X,0)*phi(Y,0)\n psi00_exact_density = normalized_density(psi00_exact,x)\n\n print('\\nMaximum absolute error of the normalized ground state densities of the unperturbated system:')\n print('errmax = {:.4e}'.format(np.max(np.abs(densities_norm[0]-psi00_exact_density))))\n\n # Plotting the ground state density of the unperturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_unperturbated.png'))\n plt.close()\n\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n\n # Plotting the ground state density of the perturbated system\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, densities_norm_p[0], cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Normalized ground state density $|\\psi|^2$ of the perturbated system using FDM')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(densities_norm_p[0],extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'FDM_psi00_perturbated.png'))\n plt.close()\n\n fig2 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig2.add_subplot(1,2,1,projection='3d')\n surf2 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n # Plotting the analytic ground state density\n fig1 = plt.figure(figsize=plt.figaspect(0.5))\n ax = fig1.add_subplot(1,2,1,projection='3d')\n surf1 = ax.plot_surface(X, Y, psi00_exact_density, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n fig1.suptitle(r'Analytical normalized ground state density $|\\psi|^2$')\n ax = fig1.add_subplot(1,2,2)\n ax.imshow(psi00_exact_density,extent=[-L/2,L/2,-L/2,L/2])\n plt.savefig(os.path.join(path,'exact_psi00.png'))\n plt.close()\n\n # Plot some of the other densities and save them as pdf\n for i in range(1,20):\n density = densities_norm[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_unperturbated{}.png'.format(i)))\n plt.close()\n\n density_p = densities_norm_p[i]\n fig = plt.figure(figsize=plt.figaspect(0.5))\n plt.imshow(density_p,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('n={}'.format(i))\n plt.savefig(os.path.join(path,'FDM_perturbated{}.png'.format(i)))\n plt.close() \n\n # Plot analytical states until nx,ny = 5\n for nx in range(6):\n for ny in range(6):\n state = phi(X,nx)*phi(Y,ny)\n density = normalized_density(state,x)\n plt.figure()\n plt.imshow(density,extent=[-L/2,L/2,-L/2,L/2])\n plt.title('$n_x={}, n_y={}$'.format(nx,ny))\n plt.savefig(os.path.join(path,'analytical_state_{}_{}.png'.format(nx,ny)))\n plt.close()\n\n # Get analytical energies from nx,ny = 0 to 10\n n = 10\n energies = analytical_energies(n)\n\n # Plot k analytical and the FDM energies\n index = np.arange(k)\n plt.figure()\n plt.plot(index,energies[0:k],label='Analytical energies')\n plt.plot(index,E,label='Unperturbated energies')\n plt.plot(index,E_p,label='Perturbated energies')\n plt.legend()\n plt.xlabel('n')\n plt.ylabel(r'$\\tilde{E} = \\frac{E}{\\hbar\\omega}$')\n plt.title('Energies')\n plt.savefig(os.path.join(path,'energies.png'))\n plt.close()", "def true_anomaly_from_eccentric(e, E):\n\n return 2 * atan2(sqrt(1.0 + e) * sin(E / 2.0), sqrt(1.0 - e) * cos(E / 2.0))", "def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)", "def compute_e(E0, M, e):\r\n E1 = E0 - (E0 - e * sin(E0) - M) / (1 - e * cos(E0))\r\n if abs(abs(degrees(E1)) - abs(degrees(E0))) > 0.001:\r\n E1 = compute_e(E1, M, e)\r\n return E1", "def matrix_eig(\n self,\n chis=None,\n eps=0,\n print_errors=\"deprecated\",\n hermitian=False,\n break_degenerate=False,\n degeneracy_eps=1e-6,\n sparse=False,\n trunc_err_func=None,\n evenTrunc = False,\n ):\n if print_errors != \"deprecated\":\n msg = (\n \"The `print_errors` keyword argument has been deprecated, \"\n \"and has no effect. Rely instead on getting the error as a \"\n \"return value, and print it yourself.\"\n )\n warnings.warn(msg)\n # If chis is not specfied, there is no even truncation scheme; else, we\n # keep track of the chi we specfied\n if chis is None:\n evenTrunc = False\n else:\n try:\n chis = list(chis)\n except TypeError:\n chis = [chis]\n chiSpec = max(chis)\n chis = self._matrix_decomp_format_chis(chis, eps)\n maxchi = max(chis)\n assert self.defval == 0\n assert self.invar\n assert self.charge == 0\n assert self.dirs[0] + self.dirs[1] == 0\n assert set(zip(self.qhape[0], self.shape[0])) == set(\n zip(self.qhape[1], self.shape[1])\n )\n\n S_dtype = np.float_ if hermitian else np.complex_\n U_dtype = self.dtype if hermitian else np.complex_\n\n # Eigenvalue decompose each sector at a time.\n # While doing so, also keep track of a list of all eigenvalues, as well\n # as a heap that gives the negative of the absolute value of the\n # largest eigenvalue in each sector. These will be needed later when\n # deciding how to truncate the eigenvalues.\n eigdecomps = {}\n dims = {}\n minusabs_next_eigs = []\n all_eigs = []\n for k, v in self.sects.items():\n if 0 in v.shape:\n # This matrix is empty and trivial.\n shp = v.shape\n m = min(shp)\n u = np.empty((shp[0], m), dtype=U_dtype)\n s = np.empty((m,), dtype=S_dtype)\n eigdecomp = (s, u)\n else:\n if sparse and maxchi < min(v.shape) - 1:\n if hermitian:\n s, u = spsla.eighs(\n v, k=maxchi, return_eigenvectors=True\n )\n else:\n s, u = spsla.eigs(\n v, k=maxchi, return_eigenvectors=True\n )\n else:\n if hermitian:\n s, u = np.linalg.eigh(v)\n else:\n s, u = np.linalg.eig(v)\n order = np.argsort(-np.abs(s))\n s = s[order]\n u = u[:, order]\n s = s.astype(S_dtype)\n u = u.astype(U_dtype)\n eigdecomp = (s, u)\n eigdecomps[k] = eigdecomp\n dims[k] = 0\n all_eigs.append(s)\n if 0 not in s.shape:\n heapq.heappush(minusabs_next_eigs, (-np.abs(s[0]), k))\n try:\n all_eigs = np.concatenate(all_eigs)\n except ValueError:\n all_eigs = np.array((0,))\n\n if sparse:\n norm_sq = self.norm_sq()\n else:\n norm_sq = None\n\n # Figure out what bond dimension to truncate to, how this bond\n # dimension is distributed over the different sectors, and what the\n # truncation error is.\n chi, dims, rel_err = type(self)._find_trunc_dim(\n all_eigs,\n eigdecomps,\n minusabs_next_eigs,\n dims,\n chis=chis,\n eps=eps,\n break_degenerate=break_degenerate,\n degeneracy_eps=degeneracy_eps,\n trunc_err_func=trunc_err_func,\n norm_sq=norm_sq,\n )\n\n # truncate in both sectors evenly\n if evenTrunc and chiSpec == chi:\n # This piece of codes is only designed\n # with Z2 symmetry tensor in mind\n errmeg = \"The matrix should have two sectors (0,0) and (1,1).\"\n assert len(dims) == 2, errmeg\n if chiSpec % 2 == 0:\n dims[(0, 0)] = int(chiSpec / 2)\n dims[(1, 1)] = int(chiSpec / 2)\n else:\n dims[(0, 0)] = int((chiSpec + 1) / 2)\n dims[(1, 1)] = int((chiSpec - 1) / 2)\n\n # Truncate each block and create the dim for the new index.\n new_dim = []\n new_qim = []\n eigdecomps = {k: v for k, v in eigdecomps.items() if dims[k] > 0}\n for k, v in eigdecomps.items():\n d = dims[k]\n if d > 0:\n new_dim.append(d)\n new_qim.append(k[0])\n eigdecomps[k] = (v[0][:d], v[1][:, :d])\n else:\n del eigdecomps[k]\n\n # Initialize S and U.\n d = self.dirs[0]\n S = type(self)(\n [new_dim],\n qhape=[new_qim],\n dirs=[d],\n qodulus=self.qodulus,\n dtype=S_dtype,\n invar=False,\n charge=0,\n )\n U = type(self)(\n [self.shape[0], new_dim],\n qhape=[self.qhape[0], new_qim],\n dirs=[d, -d],\n qodulus=self.qodulus,\n dtype=U_dtype,\n charge=0,\n )\n\n # Set the blocks of U, S and V.\n for k, v in eigdecomps.items():\n S[(k[0],)] = v[0]\n k_U = (k[0], k[0])\n U[k_U] = v[1]\n\n return S, U, rel_err", "def test_em_nonlinear(self):\n z_matrix = np.array(\n [[0.00000000, 0.00000000, 0.00000000],\n [0.00000000, 0.00000000, 0.16666667],\n [0.03333333, 0.08333333, 0.00000000],\n [0.03333333, 0.08333333, 0.16666667],\n [0.06666667, 0.16666667, 0.00000000],\n [0.06666667, 0.16666667, 0.16666667],\n [0.10000000, 0.16666667, 0.00000000],\n [0.10000000, 0.16666667, 0.16666667],\n [0.13333333, 0.08333333, 0.00000000],\n [0.13333333, 0.08333333, 0.16666667],\n [0.16666667, 0.00000000, 0.00000000],\n [0.16666667, 0.00000000, 0.16666667]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"EM\")\n expected_w_vector = np.array(\n [0.20724531, 0.31710188, 0.47565280],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def e(self):\n return np.matrix([self.y - self.arg[0,0]*self.x**3 - self.arg[1,0]*self.x**2 - self.arg[2,0]*self.x**1 - self.arg[3,0]])", "def dispersion_inverse(self, E):\n if self._ksign is None:\n self._ksign = np.random.choice([-1, 1])\n return np.sqrt(\n 2*self.material.m_star_m * (\n self.material.z + self.ksign*np.sqrt(E**2 - 1)\n )\n ) / self.material.hbar_m", "def eig_faces(u_mat, nmode, dim):\n n = int(nmode)\n nparray = np.zeros(np.size(u_mat[:,0]))\n for i in range(n):\n nparray = nparray + u_mat[:,i]\n \n nparray = np.reshape(nparray,dim)\n return(nparray)", "def affine_matrix(self) -> np.ndarray:\n return self._tf_matrix", "def eigen(M):\n values, vectors = np.linalg.eig(M)\n return values, vectors", "def analytical_eig(A):\n n = len(A)\n h = 1/float(n)\n d = 2/float(h)**2\n a = -1/float(h)**2\n eigenval = np.empty(n)\n for j in range(1,n+1):\n eigenval[j-1] = d + 2*a*np.cos((j*np.pi)/(float(n)+1)) # Analytic solution\n \n return eigenval", "def getEichFromEQ(self, ep, verbose=False):\n #assuming plasma is centered in machine here\n zMin = ep.g['ZmAxis'] - 0.25\n zMax = ep.g['ZmAxis'] + 0.25\n zWall = np.linspace(zMin, zMax, 1000)\n zLCFS = ep.g['lcfs'][:,1]\n #this prevents us from getting locations not at midplane\n idx = np.where(np.logical_and(zLCFS>zMin,zLCFS<zMax))\n Rmax = ep.g['lcfs'][:,0][idx].max()\n Rmin = ep.g['lcfs'][:,0][idx].min()\n # geometric quantities\n Rgeo = (Rmax + Rmin) / 2.0\n a = (Rmax - Rmin) / 2.0\n aspect = a/Rgeo\n\n #Regression 15\n C = 1.35\n Cp = -0.02\n Cr = 0.04\n Cb = -0.92\n Ca = 0.42\n # Evaluate Bp at outboard midplane\n Z_omp_sol = 0.0\n Bp = abs(ep.BpFunc.ev(Rmax,Z_omp_sol))\n #Evaluate lq\n self.lqEich = C * self.Psol**Cp * Rgeo**Cr * Bp**Cb * aspect**Ca # in mm\n Bt = abs(ep.BtFunc.ev(ep.g['RmAxis'],ep.g['ZmAxis']))\n if verbose==True:\n print(\"Poloidal Field at midplane: {:f}\".format(Bp))\n print(\"Toroidal Field at axis: {:f}\".format(Bt))\n print(\"Found heat flux width value of: {:f} mm\".format(self.lqEich))\n log.info(\"Found heat flux width value of: {:f} mm\".format(self.lqEich))\n return", "def decomposition_method(matrix):\n x, y, z = 0, 1, 2 # indices\n K = np.array([\n [R[x, x]-R[y, y]-R[z, z], R[y, x]+R[x, y], R[z, x]+R[x, z], R[y, z]-R[z, y]],\n [R[y, x]+R[x, y], R[y, y]-R[x, x]-R[z, z], R[z, y]+R[y, z], R[z, x]-R[x, z]],\n [R[z, x]+R[x, z], R[z, y]+R[y, z], R[z, z]-R[x, x]-R[y, y], R[x, y]-R[y, x]],\n [R[y, z]-R[z, y], R[z, x]-R[x, z], R[x, y]-R[y, x], R[x, x]+R[y, y]+R[z, z]]\n ])\n K = K / 3.0\n\n e_vals, e_vecs = np.linalg.eig(K)\n print('Eigenvalues:', e_vals)\n print('Eigenvectors:', e_vecs)\n max_index = np.argmax(e_vals)\n principal_component = e_vecs[max_index]\n return principal_component", "def update_E(self):\n self.grid.E[:, 0, :, :] = self.grid.E[:, -1, :, :]", "def eclogite_foliated():\n\n rho = 3300.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 203.45; C[0,1] = 67.76; C[0,2] = 64.47; C[0,3] = 0.08; C[0,4] = 1.9; C[0,5] = -0.4\n C[1,0] = C[0,1]; C[1,1] = 220.58; C[1,2] = 63.65; C[1,3] = 0.46; C[1,4] = 0.59; C[1,5] = 0.06\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 189.75; C[2,3] = 0.13; C[2,4] = 0.95; C[2,5] = -0.2\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 66.32; C[3,4] = -0.27; C[3,5] = 0.73\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 65.77; C[4,5] = -0.02\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 70.75\n\n return C, rho", "def normal_modes(self, finite_step):\n\n # Get the mass weighted hessian matrix in amu\n hessian = self.calculate_hessian(finite_step)\n\n # Now get the eigenvalues and vectors\n e_vals, e_vectors = np.linalg.eig(hessian)\n print(e_vals)\n print(e_vectors)", "def update_E(self):\n self.grid.E[:, :, 0, :] = self.grid.E[:, :, -1, :]", "def get_eigenvalues(self):\n ev, v = self.fa.get_eigenvalues()\n df_eignevalues = pd.DataFrame(ev)\n if self.verbose:\n print(f'Eigenvalues\\n{df_eignevalues}\\n')\n return df_eignevalues", "def epidote():\n\n rho = 3465.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 211.5; C[0,1] = 65.6; C[0,2] = 43.2; C[0,3] = 0.; C[0,4] = -6.5; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 239.; C[1,2] = 43.6; C[1,3] = 0.; C[1,4] = -10.4; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 202.1; C[2,3] = 0.; C[2,4] = -20.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 39.1; C[3,4] = 0.; C[3,5] = -2.3\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 43.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.5\n\n return C, rho", "def normalize(self, matrix):\n eigvals, eigvecs = np.linalg.eig(matrix)\n Sdiag = np.diagonal(np.linalg.inv(eigvecs)@matrix@eigvecs)\n S12diag = Sdiag**-.5\n S12 = np.zeros((len(S12diag), len(S12diag)))\n np.fill_diagonal(S12, S12diag)\n return S12", "def H(self) -> BaseMatrix:", "def H(self) -> BaseMatrix:", "def diagonalize_asymm(H):\n E,C = np.linalg.eig(H)\n #if np.allclose(E.imag, 0*E.imag):\n # E = np.real(E)\n #else:\n # print \"WARNING: Eigenvalues are complex, will be returned as such.\"\n\n idx = E.real.argsort()\n E = E[idx]\n C = C[:,idx]\n\n return E,C", "def analytical_eigenvalues_2d(Ne, lx, ly):\n ev = [(m * np.pi / lx) ** 2 + (n * np.pi / ly) ** 2 for m in range(1, Ne + 1)\n for n in range(1, Ne + 1)]\n ev = np.array(ev)\n\n return ev[:Ne]", "def inverse(self):\n # find the determinant of the matrix\n determinant = self.determinant()\n # find the matrix of minors of the matrix\n matrix_of_minors = self.matrix_of_minors()\n # find the cofactor of the matrix of minors\n cofactor_matrix = self.cofactor_matrix(matrix_of_minors)\n # find the transpose of the cofactor matrix\n transpose_cofactor_matrix = self.transpose(cofactor_matrix)\n # find the adjugate (inverse) matrix\n inverse_matrix = self.adjugate_matrix(determinant, transpose_cofactor_matrix)\n\n return inverse_matrix", "def solve_elas(self,x,E_p=None):\n \n if x['Crystal_Structure'] == \"Cubic\":\n self.estf = self.Ccubic( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2] )\n\n elif x['Crystal_Structure'] == \"HCP\":\n self.estf = self.Chcp( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2], x['Stiffness'][3], x['Stiffness'][4] )\n\n # Update orientation\n for n in range(9):\n cell_num_list = list((9*self.cell_num)+n)\n self.orient.vector()[cell_num_list] = self.rots[self.subdomain_num,n]\n \n self.a = inner(self.sigs3x3(self.u), sym(grad(self.v)))*dx\n \n if E_p:\n # Note use of sym(), assuming E_p to be the \\chi field\n L_elas_rhs = self.L_elas + inner(self.sigs_e(sym(E_p)), sym(grad(self.v)))*dx\n else:\n L_elas_rhs = self.L_elas \n\n self.A_elas, self.b_elas = assemble_system(self.a, L_elas_rhs, self.bc_elas) \n \n # Attach near nullspace to matrix\n as_backend_type(self.A_elas).set_near_nullspace(self.null_space)\n\n # Set matrix operator\n self.elasticity_solver.set_operator(self.A_elas);\n\n # Compute solution\n self.elasticity_solver.solve(self.ue.vector(), self.b_elas);\n \n if E_p:\n self.Ue_sym = project( sym(grad(self.ue) - E_p), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n else:\n self.Ue_sym = project( sym(grad(self.ue)), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n \n self.sim_strn = np.reshape(self.Ue_sym.vector().get_local(),(len(self.grains.array()),9))\n\n for grain_no in range(self.grains.array().max()):\n # Grain numbering is 1 index origin\n cell_subset = self.grains.array()==(grain_no+1)\n if np.any(cell_subset):\n self.sim_avg[grain_no,:] = np.average(self.sim_strn[cell_subset,:],\n axis=0,weights=self.dVol[cell_subset]) \n \n deps = self.exp_strn - self.sim_avg\n resid = np.linalg.norm(deps.ravel())\n print(resid) #,self.its)\n return resid", "def lowest_rank_approx(A,e):\n \n \n U,s,Vh=la.svd(A,full_matrices=False)\n t=s.copy()\n t[t>e]=0\n i=t.nonzero()[0][0]\n \n return U[:,:i].dot(np.diag(s[:i])).dot(Vh[:i,:])", "def normE(self):\n\n # Get the magnitude of E and add it to our data\n E_mag = np.zeros_like(self.data['Ex'], dtype=np.float64)\n for comp in ('Ex', 'Ey', 'Ez'):\n E_mag += np.absolute(self.data[comp])**2\n self.extend_data('normE', np.sqrt(E_mag))\n return np.sqrt(E_mag)", "def update_E(self):\n self.grid.E[self.loc] += (\n self.grid.courant_number\n * self.grid.inverse_permittivity[self.loc]\n * self.phi_E\n )", "def read_eigenvalues(self):\n # Eigenvalue line indexes\n index_eig_begin = None\n for iline, line in enumerate(self.lines):\n fstring = 'eigenvalues '\n if line.find(fstring) >= 0:\n index_eig_begin = iline + 1\n line1 = line.replace(':', ',')\n ncol, nband, nkpt, nspin = map(int, line1.split(',')[-4:])\n break\n else:\n return None\n\n # Take into account that the last row may lack \n # columns if nkpt * nspin * nband % ncol != 0\n nrow = int(np.ceil(nkpt * nspin * nband * 1. / ncol))\n index_eig_end = index_eig_begin + nrow\n ncol_last = len(self.lines[index_eig_end - 1].split())\n self.lines[index_eig_end - 1] += ' 0.0 ' * (ncol - ncol_last)\n\n eig = np.loadtxt(self.lines[index_eig_begin:index_eig_end]).flatten()\n eig *= Hartree\n N = nkpt * nband\n eigenvalues = [eig[i * N:(i + 1) * N].reshape((nkpt, nband))\n for i in range(nspin)]\n\n return eigenvalues", "def test_em_float32(self):\n z_matrix = np.array(\n [[0.000, 0.000, 0.333],\n [0.033, 0.050, 0.267],\n [0.067, 0.100, 0.200],\n [0.100, 0.175, 0.100],\n [0.200, 0.200, 0.067],\n [0.267, 0.225, 0.033],\n [0.333, 0.250, 0.000]],\n dtype=np.float32)\n obtained_w_vector = mcdm.weigh(z_matrix, \"EM\")\n expected_w_vector = np.array(\n [0.37406776, 0.25186448, 0.37406776],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def get_eigenvalues(self):\n return self.eigenValues", "def EulerZXZ2Mat(e):\n x=e[0]\n y=e[1]\n z=e[2]\n s1=np.sin(x)\n s2=np.sin(y)\n s3=np.sin(z)\n c1=np.cos(x)\n c2=np.cos(y)\n c3=np.cos(z)\n m=np.array([[c1*c3-c2*s1*s3,-c1*s3-c3*c2*s1,s1*s2],\n [s1*c3+c2*c1*s3,c1*c2*c3-s1*s3,-c1*s2],\n [s3*s2,s2*c3,c2]])\n return m", "def eigCent(A):\n lam,V = np.linalg.eig(A)\n v = V[:,np.argmax(lam)]\n v = v*(1./v[0])\n return v", "def echelon(aug_matrix: list) -> list:\n new_value = aug_matrix[:]\n\n # Loop through each row\n for i, current in enumerate(new_value):\n\n # Check if it's a potential pivot\n if i < len(new_value) - 1:\n\n # Elimate all in the same column beside the pivot\n for j in range(i + 1, len(new_value)):\n coef = new_value[j][i] / current[i]\n new_value[j] = minus(new_value[j], times(coef, current))\n return new_value", "def check(mat, otp):\n prd = mat*otp\n eigval = prd[0]/otp[0]\n print 'computed eigenvalue :' , eigval\n [eigs, vecs] = np.linalg.eig(mat)\n abseigs = list(abs(eigs))\n ind = abseigs.index(max(abseigs))\n print ' largest eigenvalue :', eigs[ind]", "def read_eigenvalues(self):\n # Eigenvalue line indexes\n index_eig_begin = None\n for iline, line in enumerate(self.lines):\n fstring = 'eigenvalues '\n if line.find(fstring) >= 0:\n index_eig_begin = iline + 1\n line1 = line.replace(':', ',')\n ncol, nband, nkpt, nspin = map(int, line1.split(',')[-4:])\n break\n else:\n return None\n\n # Take into account that the last row may lack\n # columns if nkpt * nspin * nband % ncol != 0\n nrow = int(np.ceil(nkpt * nspin * nband * 1. / ncol))\n index_eig_end = index_eig_begin + nrow\n ncol_last = len(self.lines[index_eig_end - 1].split())\n if ncol - ncol_last > 0:\n self.lines[index_eig_end - 1] = self.lines[index_eig_end - 1].replace('\\n', '')\n self.lines[index_eig_end - 1] += ' 0.0 ' * (ncol - ncol_last)\n self.lines[index_eig_end - 1] += '\\n'\n eig = np.loadtxt(self.lines[index_eig_begin:index_eig_end]).flatten()\n eig *= Hartree\n N = nkpt * nband\n eigenvalues = [eig[i * N:(i + 1) * N].reshape((nkpt, nband))\n for i in range(nspin)]\n\n return eigenvalues", "def fce_re(B,E):\n return wce(B)/(2.*np.pi)/ev2gamma(E);", "def E_fermi(n_e):\n return n_e / nu0 # in K", "def test_em_linear(self):\n z_matrix = np.array(\n [[0.000, 0.000, 0.333],\n [0.033, 0.050, 0.267],\n [0.067, 0.100, 0.200],\n [0.100, 0.175, 0.100],\n [0.200, 0.200, 0.067],\n [0.267, 0.225, 0.033],\n [0.333, 0.250, 0.000]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"EM\")\n expected_w_vector = np.array(\n [0.37406776, 0.25186448, 0.37406776],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def affine_matrix(self) -> np.ndarray:\n return self._forward_link._inverse_tf_matrix", "def test_rescale_eigenvalues(self):\n\t\t\n\t\tevals = self.watcher.get_ESD(layer=self.fc2_layer)\n\t\trescaled_evals, Wscale = RMT_Util.rescale_eigenvalues(evals)\n\t\tun_rescaled_evals = RMT_Util.un_rescale_eigenvalues(rescaled_evals, Wscale)\n\n\t\tactual = np.max(evals)\n\t\texpected = np.max(un_rescaled_evals)\n\t\tself.assertAlmostEqual(actual, expected)", "def eigen_decomposition(self):\n w, V = linalg.eigh(self.K)\n c = w[::-1]\n if isinstance(self.num_xi, float):\n percent_energy = np.cumsum(c) / np.sum(c)\n self.num_xi = np.arange(c.shape[0])[percent_energy < self.num_xi][-1] # num_xi changes\n self.Lambda = w[::-1][:self.num_xi]\n self.V = V[:, ::-1][:, :self.num_xi]", "def posdef_eig(mat):\n return posdef_eig_functions[POSDEF_EIG_METHOD](mat)", "def initial_energy(spin_matrix, n_spins):\n\n E = 0\n M = 0\n\n for i in range(n_spins):\n for j in range(n_spins):\n\n left = spin_matrix[i-1, j] if i>0 else spin_matrix[n_spins - 1, j]\n above = spin_matrix[i, j-1] if j>0 else spin_matrix[i, n_spins - 1]\n\n E -= spin_matrix[i,j]*(left+above)\n M += spin_matrix[i,j]\n\n return E, M", "def compute_mesh_eigenfunctions(self, mesh, star0, star1, bdry=False):\n nb = len(mesh)\n\n inputs = []\n for m, s0, s1 in zip(mesh, star0, star1):\n d = m['int_d01']\n if bdry:\n d = scipy.sparse.vstack([d, m['bdry_d01']])\n inputs.extend([s0, s1, d])\n\n eigenvalues, eigenvectors = [], []\n outputs = self.hodgefunc(nb, self.num_eigenvectors,\n self.num_extra_eigenvectors, *inputs)\n for i in range(nb):\n eigenvalues.append(outputs[2*i])\n eigenvectors.append(outputs[2*i+1])\n\n return eigenvalues, eigenvectors", "def M_to_E(M, ecc):\n with u.set_enabled_equivalencies(u.dimensionless_angles()):\n E = optimize.newton(_kepler_equation, M, _kepler_equation_prime,\n args=(M, ecc))\n return E", "def matrix(self, full=False, keeppads=True):\n\n v = np.fft.hfft(self._u, n=self.N) / self.N\n idx = sum(np.ogrid[0:self.N, -self.N:0])\n C = v[idx] # follow scipy.linalg.{circulant,toeplitz,hankel}\n\n if keeppads:\n a = self._yfac_.copy()\n b = self._xfac_.copy()\n else:\n a = self.yfac.copy()\n b = self.xfac.copy()\n C = self._unpad(C, 0, True)\n C = self._unpad(C, 1, False)\n a = a.reshape(-1, 1)\n\n if not full:\n return a, b, C\n else:\n return a * C * b", "def eigen_vector_i_all(self):\n return self._eig_vec", "def test_avg_entanglement_fidelity_ensemble():\n # Test on emsemble.\n probs = [1.]\n states = [np.eye(2) / 2.]\n # Test on pauli choi matrix.\n krauss_ops = initialize_pauli_examples(0.1, 0.2, 0.7)\n choi_matrix = sum([np.outer(np.ravel(x, order=\"F\"),\n np.conj(np.ravel(x, order=\"F\"))) for x in krauss_ops])\n choi_obj = ChoiQutip(choi_matrix, [1, 1], 2, 2)\n actual = choi_obj.average_entanglement_fidelity(probs, states)\n desired = np.ravel(states[0], \"F\").dot(choi_matrix.dot(np.ravel(states[0], \"F\")))\n assert np.abs(actual - desired) < 1e-5\n\n # Test on another ensemble\n probs = [0.25, 0.75]\n states = [np.eye(2), (np.eye(2) + 0.2 * np.array([[0., 1.], [1., 0.]])) / 2.]\n actual = choi_obj.average_entanglement_fidelity(probs, states)\n desired = np.ravel(states[0], \"F\").dot(choi_matrix.dot(np.ravel(states[0], \"F\"))) * probs[0]\n desired += np.ravel(states[1], \"F\").dot(choi_matrix.dot(np.ravel(states[1], \"F\"))) * probs[1]\n assert np.abs(actual - desired) < 1e-5\n\n kraus = [np.array([[0., 1.], [1., 0.]])]", "def eig(C):\r\n\r\n # class eig(object):\r\n # def __call__(self, C):\r\n\r\n # Householder transformation of a symmetric matrix V into tridiagonal form.\r\n # -> n : dimension\r\n # -> V : symmetric nxn-matrix\r\n # <- V : orthogonal transformation matrix:\r\n # tridiag matrix == V * V_in * V^t\r\n # <- d : diagonal\r\n # <- e[0..n-1] : off diagonal (elements 1..n-1)\r\n\r\n # Symmetric tridiagonal QL algorithm, iterative\r\n # Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3 operations\r\n # -> n : Dimension.\r\n # -> d : Diagonale of tridiagonal matrix.\r\n # -> e[1..n-1] : off-diagonal, output from Householder\r\n # -> V : matrix output von Householder\r\n # <- d : eigenvalues\r\n # <- e : garbage?\r\n # <- V : basis of eigenvectors, according to d\r\n\r\n\r\n # tred2(N, B, diagD, offdiag); B=C on input\r\n # tql2(N, diagD, offdiag, B);\r\n\r\n # private void tred2 (int n, double V[][], double d[], double e[]) {\r\n def tred2 (n, V, d, e):\r\n # This is derived from the Algol procedures tred2 by\r\n # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for\r\n # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding\r\n # Fortran subroutine in EISPACK.\r\n\r\n num_opt = False # factor 1.5 in 30-D\r\n\r\n for j in range(n):\r\n d[j] = V[n-1][j] # d is output argument\r\n\r\n # Householder reduction to tridiagonal form.\r\n\r\n for i in range(n-1,0,-1):\r\n # Scale to avoid under/overflow.\r\n h = 0.0\r\n if not num_opt:\r\n scale = 0.0\r\n for k in range(i):\r\n scale = scale + abs(d[k])\r\n else:\r\n scale = sum(abs(d[0:i]))\r\n\r\n if scale == 0.0:\r\n e[i] = d[i-1]\r\n for j in range(i):\r\n d[j] = V[i-1][j]\r\n V[i][j] = 0.0\r\n V[j][i] = 0.0\r\n else:\r\n\r\n # Generate Householder vector.\r\n if not num_opt:\r\n for k in range(i):\r\n d[k] /= scale\r\n h += d[k] * d[k]\r\n else:\r\n d[:i] /= scale\r\n h = np.dot(d[:i],d[:i])\r\n\r\n f = d[i-1]\r\n g = h**0.5\r\n\r\n if f > 0:\r\n g = -g\r\n\r\n e[i] = scale * g\r\n h = h - f * g\r\n d[i-1] = f - g\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] = 0.0\r\n else:\r\n e[:i] = 0.0\r\n\r\n # Apply similarity transformation to remaining columns.\r\n\r\n for j in range(i):\r\n f = d[j]\r\n V[j][i] = f\r\n g = e[j] + V[j][j] * f\r\n if not num_opt:\r\n for k in range(j+1, i):\r\n g += V[k][j] * d[k]\r\n e[k] += V[k][j] * f\r\n e[j] = g\r\n else:\r\n e[j+1:i] += V.T[j][j+1:i] * f\r\n e[j] = g + np.dot(V.T[j][j+1:i],d[j+1:i])\r\n\r\n f = 0.0\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] /= h\r\n f += e[j] * d[j]\r\n else:\r\n e[:i] /= h\r\n f += np.dot(e[:i],d[:i])\r\n\r\n hh = f / (h + h)\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] -= hh * d[j]\r\n else:\r\n e[:i] -= hh * d[:i]\r\n\r\n for j in range(i):\r\n f = d[j]\r\n g = e[j]\r\n if not num_opt:\r\n for k in range(j, i):\r\n V[k][j] -= (f * e[k] + g * d[k])\r\n else:\r\n V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])\r\n\r\n d[j] = V[i-1][j]\r\n V[i][j] = 0.0\r\n\r\n d[i] = h\r\n # end for i--\r\n\r\n # Accumulate transformations.\r\n\r\n for i in range(n-1):\r\n V[n-1][i] = V[i][i]\r\n V[i][i] = 1.0\r\n h = d[i+1]\r\n if h != 0.0:\r\n if not num_opt:\r\n for k in range(i+1):\r\n d[k] = V[k][i+1] / h\r\n else:\r\n d[:i+1] = V.T[i+1][:i+1] / h\r\n\r\n for j in range(i+1):\r\n if not num_opt:\r\n g = 0.0\r\n for k in range(i+1):\r\n g += V[k][i+1] * V[k][j]\r\n for k in range(i+1):\r\n V[k][j] -= g * d[k]\r\n else:\r\n g = np.dot(V.T[i+1][0:i+1], V.T[j][0:i+1])\r\n V.T[j][:i+1] -= g * d[:i+1]\r\n\r\n if not num_opt:\r\n for k in range(i+1):\r\n V[k][i+1] = 0.0\r\n else:\r\n V.T[i+1][:i+1] = 0.0\r\n\r\n\r\n if not num_opt:\r\n for j in range(n):\r\n d[j] = V[n-1][j]\r\n V[n-1][j] = 0.0\r\n else:\r\n d[:n] = V[n-1][:n]\r\n V[n-1][:n] = 0.0\r\n\r\n V[n-1][n-1] = 1.0\r\n e[0] = 0.0\r\n\r\n\r\n # Symmetric tridiagonal QL algorithm, taken from JAMA package.\r\n # private void tql2 (int n, double d[], double e[], double V[][]) {\r\n # needs roughly 3N^3 operations\r\n def tql2 (n, d, e, V):\r\n\r\n # This is derived from the Algol procedures tql2, by\r\n # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for\r\n # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding\r\n # Fortran subroutine in EISPACK.\r\n\r\n num_opt = False # using vectors from numpy makes it faster\r\n\r\n if not num_opt:\r\n for i in range(1,n): # (int i = 1; i < n; i++):\r\n e[i-1] = e[i]\r\n else:\r\n e[0:n-1] = e[1:n]\r\n e[n-1] = 0.0\r\n\r\n f = 0.0\r\n tst1 = 0.0\r\n eps = 2.0**-52.0\r\n for l in range(n): # (int l = 0; l < n; l++) {\r\n\r\n # Find small subdiagonal element\r\n\r\n tst1 = max(tst1, abs(d[l]) + abs(e[l]))\r\n m = l\r\n while m < n:\r\n if abs(e[m]) <= eps*tst1:\r\n break\r\n m += 1\r\n\r\n # If m == l, d[l] is an eigenvalue,\r\n # otherwise, iterate.\r\n\r\n if m > l:\r\n iiter = 0\r\n while 1: # do {\r\n iiter += 1 # (Could check iteration count here.)\r\n\r\n # Compute implicit shift\r\n\r\n g = d[l]\r\n p = (d[l+1] - g) / (2.0 * e[l])\r\n r = (p**2 + 1)**0.5 # hypot(p,1.0)\r\n if p < 0:\r\n r = -r\r\n\r\n d[l] = e[l] / (p + r)\r\n d[l+1] = e[l] * (p + r)\r\n dl1 = d[l+1]\r\n h = g - d[l]\r\n if not num_opt:\r\n for i in range(l+2, n):\r\n d[i] -= h\r\n else:\r\n d[l+2:n] -= h\r\n\r\n f = f + h\r\n\r\n # Implicit QL transformation.\r\n\r\n p = d[m]\r\n c = 1.0\r\n c2 = c\r\n c3 = c\r\n el1 = e[l+1]\r\n s = 0.0\r\n s2 = 0.0\r\n\r\n # hh = V.T[0].copy() # only with num_opt\r\n for i in range(m-1, l-1, -1): # (int i = m-1; i >= l; i--) {\r\n c3 = c2\r\n c2 = c\r\n s2 = s\r\n g = c * e[i]\r\n h = c * p\r\n r = (p**2 + e[i]**2)**0.5 # hypot(p,e[i])\r\n e[i+1] = s * r\r\n s = e[i] / r\r\n c = p / r\r\n p = c * d[i] - s * g\r\n d[i+1] = h + s * (c * g + s * d[i])\r\n\r\n # Accumulate transformation.\r\n\r\n if not num_opt: # overall factor 3 in 30-D\r\n for k in range(n): # (int k = 0; k < n; k++) {\r\n h = V[k][i+1]\r\n V[k][i+1] = s * V[k][i] + c * h\r\n V[k][i] = c * V[k][i] - s * h\r\n else: # about 20% faster in 10-D\r\n hh = V.T[i+1].copy()\r\n # hh[:] = V.T[i+1][:]\r\n V.T[i+1] = s * V.T[i] + c * hh\r\n V.T[i] = c * V.T[i] - s * hh\r\n # V.T[i] *= c\r\n # V.T[i] -= s * hh\r\n\r\n p = -s * s2 * c3 * el1 * e[l] / dl1\r\n e[l] = s * p\r\n d[l] = c * p\r\n\r\n # Check for convergence.\r\n if abs(e[l]) <= eps*tst1:\r\n break\r\n # } while (Math.abs(e[l]) > eps*tst1);\r\n\r\n d[l] = d[l] + f\r\n e[l] = 0.0\r\n\r\n\r\n # Sort eigenvalues and corresponding vectors.\r\n if 11 < 3:\r\n for i in range(n-1): # (int i = 0; i < n-1; i++) {\r\n k = i\r\n p = d[i]\r\n for j in range(i+1, n): # (int j = i+1; j < n; j++) {\r\n if d[j] < p: # NH find smallest k>i\r\n k = j\r\n p = d[j]\r\n\r\n if k != i:\r\n d[k] = d[i] # swap k and i\r\n d[i] = p\r\n for j in range(n): # (int j = 0; j < n; j++) {\r\n p = V[j][i]\r\n V[j][i] = V[j][k]\r\n V[j][k] = p\r\n # tql2\r\n\r\n N = len(C[0])\r\n if 11 < 3:\r\n V = np.array([x[:] for x in C]) # copy each \"row\"\r\n N = V[0].size\r\n d = np.zeros(N)\r\n e = np.zeros(N)\r\n else:\r\n V = [[x[i] for i in xrange(N)] for x in C] # copy each \"row\"\r\n d = N * [0.]\r\n e = N * [0.]\r\n\r\n tred2(N, V, d, e)\r\n tql2(N, d, e, V)\r\n return (array(d), array(V))" ]
[ "0.60159427", "0.6003989", "0.5936057", "0.5899959", "0.58792543", "0.58560514", "0.5814833", "0.58042705", "0.57690716", "0.5768329", "0.57617337", "0.57117414", "0.5709993", "0.5708076", "0.5683127", "0.56726044", "0.56690747", "0.5665278", "0.5657497", "0.56303555", "0.56117654", "0.5608965", "0.558355", "0.5565742", "0.55606663", "0.5553097", "0.5550065", "0.5544799", "0.5516541", "0.5516541", "0.54994315", "0.5492178", "0.54782546", "0.547431", "0.546752", "0.5464522", "0.5457946", "0.54521096", "0.54154146", "0.54019827", "0.53932846", "0.5390314", "0.5390156", "0.5375898", "0.53751427", "0.5366502", "0.53639597", "0.5340142", "0.5333266", "0.53176767", "0.5314984", "0.53121686", "0.53109324", "0.53079486", "0.52920955", "0.5280938", "0.527378", "0.52496195", "0.5247073", "0.52201015", "0.52181727", "0.5216381", "0.5214505", "0.5200785", "0.5199552", "0.51986605", "0.5195401", "0.5193468", "0.5193233", "0.5186295", "0.5186295", "0.5185278", "0.5184201", "0.5170468", "0.5167499", "0.5159651", "0.51580983", "0.51576203", "0.51560515", "0.5151101", "0.5149705", "0.513777", "0.5130073", "0.5129782", "0.51293945", "0.51280665", "0.51274145", "0.5126884", "0.5119607", "0.51191026", "0.51177245", "0.5116116", "0.51148015", "0.510054", "0.5095938", "0.50925285", "0.50883365", "0.50865227", "0.5081581", "0.5079466" ]
0.6334938
0
Given the essential matrix, we derive the camera position and orientation
def ExtractCameraPose(E): u, s, v = np.linalg.svd(E, full_matrices=True) w = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]]).reshape(3, 3) c1 = u[:, 2].reshape(3, 1) r1 = np.dot(np.dot(u, w), v).reshape(3, 3) c2 = -u[:, 2].reshape(3, 1) r2 = np.dot(np.dot(u, w), v).reshape(3, 3) c3 = u[:, 2].reshape(3, 1) r3 = np.dot(np.dot(u, w.T), v).reshape(3, 3) c4 = -u[:, 2].reshape(3, 1) r4 = np.dot(np.dot(u, w.T), v).reshape(3, 3) if np.linalg.det(r1) < 0: c1 = -c1 r1 = -r1 if np.linalg.det(r2) < 0: c2 = -c2 r2 = -r2 if np.linalg.det(r3) < 0: c3 = -c3 r3 = -r3 if np.linalg.det(r4) < 0: c4 = -c4 r4 = -r4 cam_center = np.array([c1, c2, c3, c4]) cam_rotation = np.array([r1, r2, r3, r4]) return cam_center, cam_rotation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_camera_orientation(self):\n\n # Create the vector from the camera to the robot\n vector_x = self.robot_x - self.camera_x\n vector_y = self.robot_y - self.camera_y\n vector_z = self.robot_z - self.camera_z\n\n # Calculate yaw and pitch from this vector\n yaw = math.atan2(vector_y, vector_x)\n pitch = -math.asin(vector_z)\n\n # Create the quaternion from the euler angles\n self.quaternion = geometry_msgs.msg.Quaternion(\n *tf_conversions.transformations.quaternion_from_euler(0, pitch, yaw))", "def get_extrinsic_matrix(pose):\n batch_size, _ = pose.shape\n rot = pose[:,:3]\n trans = pose[:,3:]\n\n rot = transforms.euler_angles_to_matrix(rot,convention=\"XYZ\")\n pose = torch.cat((rot,trans.view(batch_size, 3, 1)), -1)\n\n return pose", "def camera_matrix(e, p, t):\n # Translates all points such that the camera is centered at the origin.\n T = np.array([[1, 0, 0, -e[0]],\n [0, 1, 0, -e[1]],\n [0, 0, 1, -e[2]],\n [0, 0, 0, 1]])\n\n # Set up orthonormal basis.\n w = e - p\n w = w / np.linalg.norm(w)\n u = np.cross(t, w)\n u = u / np.linalg.norm(u)\n v = np.cross(w, u)\n\n # Rotate points such that camera is aligned with UVW-axes (g -> -z-axis).\n R = np.array([[u[0], u[1], u[2], 0],\n [v[0], v[1], v[2], 0],\n [w[0], w[1], w[2], 0],\n [ 0, 0, 0, 1]])\n return R.dot(T)", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def modelview_matrix(self):\n camera = self.figure.scene.camera\n return camera.view_transform_matrix.to_array().astype(np.float32)", "def camera_transformation_from_pose(azimutal, elevation):\n azimutal, elevation = azimutal * 2. * np.pi / 360., elevation * 2. * np.pi / 360.\n azimutal *= -1.\n elevation *= -1.\n r_y = np.array([[np.cos(elevation), 0, np.sin(elevation)],\n [0, 1, 0],\n [-np.sin(elevation), 0, np.cos(elevation)]])\n r_z = np.array([[np.cos(azimutal), -np.sin(azimutal), 0],\n [np.sin(azimutal), np.cos(azimutal), 0],\n [0, 0, 1]])\n r = r_z.dot(r_y)\n # world_to_camera matrix, camera_to_world matrix\n return r, np.linalg.inv(r)", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def pose2mat(pose):\n extrinsic = torch.eye(4)\n extrinsic[:3, :] = pose[:, :4]\n inv_extrinsic = torch.inverse(extrinsic)\n extrinsic = torch.inverse(inv_extrinsic)\n h, w, focal_length = pose[:, 4]\n intrinsic = torch.Tensor([[focal_length, 0, w/2],\n [0, focal_length, h/2],\n [0, 0, 1]])\n\n return extrinsic, intrinsic", "def perspective_transform():\n src = np.float32([(220,720), (1110, 720), (570, 470), (722, 470)]) # Manually get these numbers from plot\n dst = np.float32([[320, 720], [920, 720], [320, 1], [920, 1]])\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n return M, Minv", "def camera_matrix(self) -> TransformationMatrixType:\n return numpy.matmul(\n self.rotation_matrix(*self.rotation),\n displacement_matrix(*-numpy.array(self.location)),\n )", "def determine_rotation_matrix(self, origin, angle, scale):\n # scaling will be ignored at this step\n rotation_matrix = cv2.getRotationMatrix2D(origin, angle * 180 / np.pi, scale)\n return rotation_matrix", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n\n origin = np.array([location.x, location.y, location.z])\n return matrix, origin", "def decompose_essential_matrix(E, x1, x2):\n\n # Fix left camera-matrix\n Rl = np.eye(3)\n tl = np.array([[0, 0, 0]]).T\n Pl = np.concatenate((Rl, tl), axis=1)\n\n # TODO: Compute possible rotations and translations\n \n # s must be [1, 1, 0]\n u, s, vh = np.linalg.svd(E)\n E = u @ np.diag([1, 1, 0]) @ vh\n u, s, vh = np.linalg.svd(E)\n\n w = np.array([[ 0, 1, 0], \n [-1, 0, 0], \n [ 0, 0, 1]]) \n \n z = np.array([[ 0, -1, 0], \n [ 1, 0, 0],\n [ 0, 0, 0]])\n \n R1 = u @ w.T @ vh\n s1 = -u @ z @ u.T\n R2 = u @ w @ vh\n s2 = u @ z @ u.T\n\n t1 = np.array([[s1[2, 1]], \n [s1[0, 2]],\n [s1[1, 0]]])\n \n t2 = np.array([[s2[2, 1]], \n [s2[0, 2]], \n [s2[1, 0]]]) \n\n # Four possibilities\n Pr = [np.concatenate((R1, t1), axis=1),\n np.concatenate((R1, t2), axis=1),\n np.concatenate((R2, t1), axis=1),\n np.concatenate((R2, t2), axis=1)]\n\n # Compute reconstructions for all possible right camera-matrices\n X3Ds = [infer_3d(x1[:, 0:1], x2[:, 0:1], Pl, x) for x in Pr]\n\n # Compute projections on image-planes and find when both cameras see point\n test = [np.prod(np.hstack((Pl @ np.vstack((X3Ds[i], [[1]])), Pr[i] @ np.vstack((X3Ds[i], [[1]])))) > 0, 1) for i in\n range(4)]\n test = np.array(test)\n idx = np.where(np.hstack((test[0, 2], test[1, 2], test[2, 2], test[3, 2])) > 0.)[0][0]\n\n # Choose correct matrix\n Pr = Pr[idx]\n\n return Pl, Pr", "def get_camera_transform(self):\r\n if not self.pose:\r\n rospy.loginfo(\"no pose!\")\r\n return None\r\n if self.pose.header.frame_id != self.role_name:\r\n rospy.logwarn(\"Unsupported frame received. Supported {}, received {}\".format(\r\n self.role_name, self.pose.header.frame_id))\r\n return None\r\n sensor_location = carla.Location(x=self.pose.pose.position.x,\r\n y=-self.pose.pose.position.y,\r\n z=self.pose.pose.position.z)\r\n quaternion = (\r\n self.pose.pose.orientation.x,\r\n self.pose.pose.orientation.y,\r\n self.pose.pose.orientation.z,\r\n self.pose.pose.orientation.w\r\n )\r\n roll, pitch, yaw = euler_from_quaternion(quaternion)\r\n # rotate to CARLA\r\n sensor_rotation = carla.Rotation(pitch=math.degrees(roll)-90,\r\n roll=math.degrees(pitch),\r\n yaw=-math.degrees(yaw)-90)\r\n return carla.Transform(sensor_location, sensor_rotation)", "def computeMVP(self):\n projMat = self.converterYUR\n modelViewMat = self.transforMat.invertCompose(\n Globals.render.getTransform(self.cameraNode)).getMat()\n return UnalignedLMatrix4f(modelViewMat * projMat)", "def intrinsic_matrix_from_camera(w, h, fov):\n (cx, cy), f = calc_focal_values(w, h, fov)\n return np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]])", "def projection_matrix(self):\n scene = self.figure.scene\n scene_size = tuple(scene.get_size())\n aspect_ratio = float(scene_size[0]) / float(scene_size[1])\n p = scene.camera.get_perspective_transform_matrix(\n aspect_ratio, -1, 1).to_array().astype(np.float32)\n return p", "def computeOrientation3D(object, P):\n\n # compute rotational matrix around yaw axis\n R = [[np.cos(object.ry), 0, np.sin(object.ry)],\n [0, 1, 0],\n [-np.sin(object.ry), 0, np.cos(object.ry)]]\n\n # orientation in object coordinate system\n orientation_3D = [[0.0, object.l],\n [0.0, 0.0],\n [0.0, 0.0]]\n\n # rotate and translate in camera coordinate system, project in image\n orientation_3D = R * orientation_3D\n orientation_3D[0, :] += object.t[0]\n orientation_3D[1, :] += object.t[1]\n orientation_3D[2, :] += object.t[2]\n\n # vector behind image plane?\n if any(orientation_3D[2, :] < 0.1):\n orientation_2D = []\n else:\n # project orientation into the image plane\n orientation_2D = projectToImage(orientation_3D, P)\n return orientation_2D", "def matrix(self):\n return self._rotation", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def get_matrix(self, transform):\r\n\r\n rotation = transform.rotation\r\n location = transform.location\r\n c_y = np.cos(np.radians(rotation.yaw))\r\n s_y = np.sin(np.radians(rotation.yaw))\r\n c_r = np.cos(np.radians(rotation.roll))\r\n s_r = np.sin(np.radians(rotation.roll))\r\n c_p = np.cos(np.radians(rotation.pitch))\r\n s_p = np.sin(np.radians(rotation.pitch))\r\n matrix = np.matrix(np.identity(4))\r\n matrix[0, 3] = location.x\r\n matrix[1, 3] = location.y\r\n matrix[2, 3] = location.z\r\n matrix[0, 0] = c_p * c_y\r\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\r\n matrix[1, 0] = s_y * c_p\r\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\r\n matrix[2, 0] = s_p\r\n matrix[2, 1] = -c_p * s_r\r\n matrix[2, 2] = c_p * c_r\r\n return matrix", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def __init__(self, before, after):\r\n self.M = cv2.getPerspectiveTransform(before, after)\r\n self.inverse_M = cv2.getPerspectiveTransform(after, before)", "def head_pose_points(image, rotation_vector, translation_vector, camera_matrix):\n rear_size = 1\n rear_depth = 0\n front_size = image.shape[1]\n front_depth = front_size*2\n val = [rear_size, rear_depth, front_size, front_depth]\n point_2d = get_2d_points(image, rotation_vector, translation_vector, camera_matrix, val)\n y = (point_2d[5] + point_2d[8])//2\n x = point_2d[2]\n \n return (x, y)", "def get_orientation_matrix(self, p):\n for i in range(3):\n for j in range(3):\n self.Umat[i, j] = p['U%d%d' % (i, j)].value", "def __rotate_model(self):\n self.__model_matrix = self.__get_rotation_matrix(\n self.__face.position_cartesian,\n (1 + self.__face.position[2]) * 0.5)", "def perspectiveNormalizationXform(self):\n return np.array([[1.0/np.tan(self.view_angle_h), 0, 0, 0],\n [0, 1.0/np.tan(self.view_angle_v), 0, 0],\n [0, 0, (self.far + self.near)/(self.far - self.near),\n 2*self.far*self.near/(self.far - self.near)],\n [0, 0, -1, 0]])", "def _calculate_camera_pose(frame, K, d, corners, pattern_shape=(6, 4), grid_size=30): # noqa: E501\n img = frame.copy()\n axis = np.float32([[grid_size, 0, 0], [0, grid_size, 0],\n [0, 0, -grid_size]]).reshape(-1, 3)*2\n\n objp = np.zeros((np.prod(pattern_shape), 3), np.float32)\n objp[:, :2] = np.mgrid[0:pattern_shape[0],\n 0:pattern_shape[1]].T.reshape(-1, 2) * grid_size\n\n _, rvecs, tvecs = cv2.solvePnP(objp, corners, K, d)\n R, _ = cv2.Rodrigues(rvecs)\n # project 3D points onto image plane\n imgpts, _ = cv2.projectPoints(axis,\n rvecs, tvecs,\n K, d)\n\n canvas = computer_vision.draw_axis(img, corners, imgpts)\n return R, tvecs, canvas", "def __init__(self, camera):\n self.__camera = camera\n self.__innerOrientationParameters = None\n self.__isSolved = False\n self.__exteriorOrientationParameters = np.array([0, 0, 0, 0, 0, 0], 'f')\n self.__rotationMatrix = None", "def test_active_matrix_from_extrinsic_roll_pitch_yaw():\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz([0.5 * np.pi, 0, 0]),\n np.array([\n [0, -1, 0],\n [1, 0, 0],\n [0, 0, 1]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz(\n [0.5 * np.pi, 0, 0.5 * np.pi]),\n np.array([\n [-1, 0, 0],\n [0, -1, 0],\n [0, 0, 1]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz(\n [0.5 * np.pi, 0.5 * np.pi, 0]),\n np.array([\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz(\n [0.5 * np.pi, 0.5 * np.pi, 0.5 * np.pi]),\n np.array([\n [-1, 0, 0],\n [0, 0, 1],\n [0, 1, 0]\n ])\n )", "def map_to_matrix(x, y):\n x_pos = round(x * ((MATRIX_SIZE_X - 1)/(FRAME_W - 1)))\n y_pos = round(y * ((MATRIX_SIZE_Y - 1)/(FRAME_H - 1)))\n\n x_pos = (MATRIX_SIZE_X - 1) - x_pos #invert x direction (left and right) to account for camera perspective\n\n return x_pos, y_pos", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def as_homogenous_transformation(self):\n r3 = self.orientation.normalize().unit_quaternion_as_r3_rotation_matrix()\n return matrix.sqr((r3[0],r3[1],r3[2],self.translation[0],\n r3[3],r3[4],r3[5],self.translation[1],\n r3[6],r3[7],r3[8],self.translation[2],\n 0,0,0,1))", "def test_estimate_head_pose_with_use_orientation_mode(self):\n\n faceEngine = VLFaceEngine()\n faceEngine.faceEngineProvider.faceDetV3Settings.useOrientationMode = 1\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n\n images = [VLImage.load(filename=ROTATED0), VLImage.load(filename=ROTATED90)]\n detections = detector.detect(images, detect68Landmarks=True)\n angles0 = TestHeadPose.headPoseEstimator.estimate(detections[0][0].landmarks68)\n angles90 = TestHeadPose.headPoseEstimator.estimate(detections[1][0].landmarks68)\n\n assert pytest.approx(angles90.pitch, abs=2) == angles0.pitch\n assert pytest.approx(angles90.roll, abs=2) == angles0.roll\n assert pytest.approx(angles90.yaw, abs=2) == angles0.yaw", "def orientation(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn angle", "def euler2mat(angle):\n B = angle.size(0)\n x, y, z = angle[:, 0], angle[:, 1], angle[:, 2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach() * 0\n ones = zeros.detach() + 1\n zmat = torch.stack([cosz, -sinz, zeros,\n sinz, cosz, zeros,\n zeros, zeros, ones], dim=1).view(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny,\n zeros, ones, zeros,\n -siny, zeros, cosy], dim=1).view(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros,\n zeros, cosx, -sinx,\n zeros, sinx, cosx], dim=1).view(B, 3, 3)\n\n # rotMat = xmat.bmm(ymat).bmm(zmat)\n # changed to match opencv and conversion euler->mat/mat->euler\n rotMat = torch.bmm(zmat, torch.bmm(ymat, xmat))\n\n return rotMat", "def proj_to_velo(calib_data):\n rect = calib_data[\"R0_rect\"].reshape(3, 3)\n #to transform a point from Lidar framce to camera frame\n #reshape the flat line with 12 elements to 3X4 matrix\n velo_to_cam = calib_data[\"Tr_velo_to_cam\"].reshape(3, 4)\n#print('velo2cam', velo_to_cam)\n inv_rect = np.linalg.inv(rect)\n #select all rows and only first three columns\n#print('velo_to_cam[:, :3]', velo_to_cam[:, :3])\n #select all rows and only first three columns\n inv_velo_to_cam = np.linalg.pinv(velo_to_cam[:, :3])\n return np.dot(inv_velo_to_cam, inv_rect)", "def GetRobotPos(self):\n # read quaternion as the robot's current pose\n orientation_q = self.odom.pose.pose.orientation\n orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]\n # transfer to Euler's angles, output angles are in the range (-pi, pi)\n (roll, pitch, yaw) = euler_from_quaternion(orientation_list)\n # # transfer to degrees\n # roll = roll / np.pi * 180\n # pitch = pitch / np.pi * 180\n # yaw = yaw / np.pi * 180\n return roll, pitch, yaw", "def _get_rotation_matrix(transform):\n # caution: UE4 is using left-hand ortation order\n roll = np.deg2rad(-transform.rotation.roll)\n pitch = np.deg2rad(-transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n sr, cr = np.sin(roll), np.cos(roll)\n sp, cp = np.sin(pitch), np.cos(pitch)\n sy, cy = np.sin(yaw), np.cos(yaw)\n rotation_matrix = np.array([[cy * cp, -sy * sr + cy * sp * sr, cy * sp * cr + sy * sr],\n [sy * cp, cy * sp * sr + cy * sr, -cy * sr + sy * sp * cr],\n [-sp, cp * sr, cp * cr]])\n return rotation_matrix", "def vrrotvec2mat(ax_ang):\n\n #file_dir = os.path.dirname(os.path.realpath(__file__))\n #path_dir2 = file_dir + '/../geometry/'\n #sys.path.append(path_dir2)\n\n if ax_ang.ndim == 1:\n if np.size(ax_ang) == 5:\n ax_ang = np.reshape(ax_ang, (5, 1))\n msz = 1\n elif np.size(ax_ang) == 4:\n ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))\n msz = 1\n else:\n raise Exception('Wrong Input Type')\n elif ax_ang.ndim == 2:\n if np.shape(ax_ang)[0] == 5:\n msz = np.shape(ax_ang)[1]\n elif np.shape(ax_ang)[1] == 5:\n ax_ang = ax_ang.transpose()\n msz = np.shape(ax_ang)[1]\n else:\n raise Exception('Wrong Input Type')\n else:\n raise Exception('Wrong Input Type')\n\n direction = ax_ang[0:3, :]\n angle = ax_ang[3, :]\n\n d = np.array(direction, dtype=np.float64)\n d /= np.linalg.norm(d, axis=0)\n x = d[0, :]\n y = d[1, :]\n z = d[2, :]\n c = np.cos(angle)\n s = np.sin(angle)\n tc = 1 - c\n\n mt11 = tc*x*x + c\n mt12 = tc*x*y - s*z\n mt13 = tc*x*z + s*y\n\n mt21 = tc*x*y + s*z\n mt22 = tc*y*y + c\n mt23 = tc*y*z - s*x\n\n mt31 = tc*x*z - s*y\n mt32 = tc*y*z + s*x\n mt33 = tc*z*z + c\n\n mtx = np.column_stack((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))\n\n inds1 = np.where(ax_ang[4, :] == -1)\n mtx[inds1, :] = -mtx[inds1, :]\n\n if msz == 1:\n mtx = mtx.reshape(3, 3)\n else:\n mtx = mtx.reshape(msz, 3, 3)\n\n return mtx", "def _get_init_pose(self):\n return self.init_pose_R, self.init_pose_t", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def _get_camera_to_car_calibration(self, sensor): \r\n calibration = np.identity(3)\r\n calibration[0, 2] = sensor[\"width\"] / 2.0\r\n calibration[1, 2] = sensor[\"height\"] / 2.0\r\n calibration[0, 0] = calibration[1, 1] = sensor[\"width\"] / (2.0 * np.tan(sensor[\"fov\"] * np.pi / 360.0))\r\n return calibration", "def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[2, 2]=c\n self.matrix[2, 0]=s\n self.matrix[0, 2]=-s\n self.matrix[0, 0]=c\n return self.matrix", "def processOdom(self, msg):\n self.x = msg.pose.pose.position.x\n self.y = msg.pose.pose.position.y\n self.z = 180 * (msg.pose.pose.orientation.z % 2)\n print(self.z)", "def give_orientation(pose, orr_array):\n pose.orientation.x = orr_array[0]\n pose.orientation.y = orr_array[1]\n pose.orientation.z = orr_array[2]\n pose.orientation.w = orr_array[3]", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def get_projection_matrix(left, right, bottom, top):\r\n zNear = -25.0\r\n zFar = 25.0\r\n inv_z = 1.0 / (zFar - zNear)\r\n inv_y = 1.0 / (top - bottom)\r\n inv_x = 1.0 / (right - left)\r\n mat = [[(2.0 * inv_x), 0.0, 0.0, (-(right + left) * inv_x)],\r\n [0.0, (2.0 * inv_y), 0.0, (-(top + bottom) * inv_y)],\r\n [0.0, 0.0, (-2.0 * inv_z), (-(zFar + zNear) * inv_z)],\r\n [0.0, 0.0, 0.0, 1.0]]\r\n return mat", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[0, 0]=c\n self.matrix[0, 1]=s\n self.matrix[1, 0]=-s\n self.matrix[1, 1]=c\n return self.matrix", "def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img", "def rotation_matrix(yaw, pitch) -> TransformationMatrixType:\n return rotation_matrix_yx(math.radians(yaw + 180), math.radians(pitch))", "def rotational_transform(self, X):\n\n # Unpack pose? could do something different here.\n X_cam, Y_cam, Z_cam, azimuth_cam_deg, pitch_cam_deg, roll_cam_deg = self.p\n\n # Make X a set of homogeneous coors\n X = np.vstack((X, np.ones(X.shape[1])))\n\n # Convert degrees to radians\n azimuth_cam_rad = np.deg2rad(azimuth_cam_deg)\n pitch_cam_rad = np.deg2rad(pitch_cam_deg)\n roll_cam_rad = np.deg2rad(roll_cam_deg)\n\n translation_vec = [X_cam, Y_cam, Z_cam]\n C = self.make_cam_mtx(azimuth_cam_rad, pitch_cam_rad, roll_cam_rad, translation_vec)\n\n return C @ X", "def get_rotation_angle(prev_image, curr_image, size_of_cropped_image):\n max_value = np.amax(prev_image)\n\n if prev_image.dtype == 'float' and max_value <= 1:\n prev_image = np.uint8(prev_image * 255)\n curr_image = np.uint8(curr_image * 255)\n\n if prev_image.dtype == 'float' and max_value > 1:\n prev_image = np.uint8(prev_image)\n curr_image = np.uint8(curr_image)\n\n prev_image = cv.equalizeHist(prev_image)\n curr_image = cv.equalizeHist(curr_image)\n\n # Initiate ORB detector\n orb = cv.ORB_create(nfeatures=200)\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(prev_image, None)\n kp2, des2 = orb.detectAndCompute(curr_image, None)\n\n # do feature matching\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n\n # calculate perspective transform matrix\n src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n vector_along_x_axis_from_center = \\\n np.float32([[size_of_cropped_image / 2, size_of_cropped_image / 2],\n [size_of_cropped_image, size_of_cropped_image / 2]]).reshape(-1, 1, 2)\n vector_transformed = cv.perspectiveTransform(vector_along_x_axis_from_center, transform_matrix)\n\n theta = - np.arctan2(vector_transformed[1, 0, 1] - vector_transformed[0, 0, 1],\n vector_transformed[1, 0, 0] - vector_transformed[0, 0, 0]) * 180 / np.pi\n # negative sign is to make the sign of the angle to correspond to one in a right-handed coordinate system\n return theta", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[1, 1]=c\n self.matrix[1, 2]=s\n self.matrix[2, 1]=-s\n self.matrix[2, 2]=c\n return self.matrix", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def get_uv_coord(mode, camera, anno_xyz):\r\n\tif camera == 'SK':\r\n\t\t# SK only have left hand. this is only for color image. Unable to translate kp to depth image.\r\n\t\tif mode == 'color':\r\n\t\t\tuv_coord, _ = cv2.projectPoints(anno_xyz, I_SK.R_color, I_SK.T_color, I_SK.K_color, None)\r\n\t\telif mode == 'depth':\r\n\t\t\tuv_coord, _ = cv2.projectPoints(anno_xyz, I_SK.R_depth, I_SK.T_depth, I_SK.K_depth, None)\r\n\t\telse:\r\n\t\t\traise ValueError\r\n\telif camera == 'BB':\r\n\t\tif mode == 'left':\r\n\t\t\tuv_coord, _ = cv2.projectPoints(anno_xyz, I_BB.R_l, I_BB.T_l, I_BB.K, None)\r\n\t\telif mode == 'right':\r\n\t\t\tuv_coord, _ = cv2.projectPoints(anno_xyz, I_BB.R_r, I_BB.T_r, I_BB.K, None)\r\n\t\telse:\r\n\t\t\traise ValueError\r\n\telse:\r\n\t\traise ValueError\r\n\treturn np.reshape(uv_coord, (21, 2))", "def _calculate_homography(self):\n src = np.array(self._metadata[:4])\n\n h_units = self._config.h_units\n v_units = self._config.v_units\n dst = np.array(\n [(0, v_units), (h_units, v_units), (0, 0), (h_units, 0)])\n\n h_m, _ = cv2.findHomography(src, dst)\n self.logger.info(f\"{self.id}: homography calculated\")\n return h_m", "def orientation(self, p, q, r):\n\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if (val > 0):\n\n # Clockwise orientation\n return 1\n elif (val < 0):\n\n # Counterclockwise orientation\n return 2\n else:\n\n # Colinear orientation\n return 0", "def camera_location(azimuth, elevation, dist):\n\n phi = float(elevation)\n theta = float(azimuth)\n dist = float(dist)\n\n x = dist * math.cos(phi) * math.cos(theta)\n y = dist * math.cos(phi) * math.sin(theta)\n z = dist * math.sin(phi)\n\n return x, y, z", "def update_odometry(self, msg):\n self.px = msg.pose.pose.position.x\n \tself.py = msg.pose.pose.position.y\n \tquat_orig = msg.pose.pose.orientation\n \tquat_list = [ quat_orig.x, quat_orig.y, quat_orig.z, quat_orig.w]\n \t#calculates roll, pitch, and yaw from quaternion\n self.orient = msg.pose.pose.orientation\n (roll , pitch , yaw) = euler_from_quaternion( quat_list )\n \tself.pth = yaw", "def restore_orientation_matrix(self):\n self.Umat = self.init_p", "def test_pose(self):\n t = self.t\n \n # Cyclic functions for orientation and position values\n delta = math.sin(t) * 1000\n alpha = math.cos(t) * math.pi * 2\n \n # Default values\n x = 0\n y = 0\n z = 0\n\n pitch = 0\n yaw = 0\n roll = 0\n \n # assign values cyclically\n if t % (math.pi * 12) < math.pi * 2:\n x = delta\n elif t % (math.pi * 12) < math.pi * 4:\n y = delta\n elif t % (math.pi * 12) < math.pi * 6:\n z = delta\n elif t % (math.pi * 12) < math.pi * 8:\n pitch = alpha\n elif t % (math.pi * 12) < math.pi * 10:\n yaw = alpha\n elif t % (math.pi * 12) < math.pi * 12:\n roll = alpha\n else:\n # Reset counter\n self.t = 0.0\n \n return ((x, y, z), (pitch, yaw, roll))", "def _make(self):\n\t\tself.scene.camera = self.camera\n\t\tself.camera.rotation_euler[0] = np.radians(np.random.randint(40, 100) +\n\t\t np.random.random())\n\t\tself.camera.rotation_euler[2] = np.radians(np.random.randint(0, 360) +\n\t\t np.random.random())\n\t\tprint([np.degrees(x) for x in self.camera.rotation_euler])", "def get_direction_matrix(self) -> int:", "def __init__(self, at=(0, 0, 0), eye=(0, 0, -0.1), lens=None,\r\n is_3d=True, scale=1.0):\r\n super(Camera, self).__init__()\r\n\r\n self.at = at\r\n self.start_eye = eye # for reset with different lens settings\r\n self.eye = [eye[0], eye[1], eye[2]]\r\n if lens == None:\r\n from pi3d.Display import Display\r\n lens = [Display.INSTANCE.near, Display.INSTANCE.far, Display.INSTANCE.fov,\r\n Display.INSTANCE.width / float(Display.INSTANCE.height)]\r\n self.lens = lens\r\n self.view = _LookAtMatrix(at, eye, [0, 1, 0])\r\n if is_3d:\r\n self.projection = _ProjectionMatrix(lens[0], lens[1], lens[2] / scale, lens[3])\r\n else:\r\n self.projection = _OrthographicMatrix(scale=scale)\r\n self.model_view = dot(self.view, self.projection)\r\n # Apply transform/rotation first, then shift into perspective space.\r\n self.mtrx = array(self.model_view, copy=True)\r\n # self.L_reflect = _LookAtMatrix(at,eye,[0,1,0],reflect=True)\r\n self.rtn = [0.0, 0.0, 0.0]\r\n\r\n self.was_moved = True", "def convert_pose_inverse_transform(pose):\n translation = np.zeros((4,1))\n translation[0] = -pose.position.x\n translation[1] = -pose.position.y\n translation[2] = -pose.position.z\n translation[3] = 1.0\n\n rotation = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)\n euler_angle = tr.euler_from_quaternion(rotation)\n rotation = np.transpose(tr.rotation_matrix(euler_angle[2], [0,0,1])) # the angle is a yaw\n transformed_translation = rotation.dot(translation)\n\n translation = (transformed_translation[0], transformed_translation[1], transformed_translation[2])\n rotation = tr.quaternion_from_matrix(rotation)\n return (translation, rotation)", "def getDejaVuMatrix(self):\n mtx = self.getRotMatrix((4, 4), transpose=None) # from Quaternion\n mtx[3] = self.getTranslation()\n mtx[:3, 3] = mtx[3, :3]\n mtx[3, :3] = [0, 0, 0]\n return mtx", "def __set_perspective(self):\n\n src = np.float32([[(.42 * self.img_shape[1],.65 * self.img_shape[0] ),\n (.58 * self.img_shape[1], .65 * self.img_shape[0]),\n (0 * self.img_shape[1],self.img_shape[0]),\n (1 * self.img_shape[1], self.img_shape[0])]])\n\n dst = np.float32([[0,0],\n [self.img_shape[1],0],\n [0,self.img_shape[0]],\n [self.img_shape[1],self.img_shape[0]]])\n\n self.M = cv2.getPerspectiveTransform(src, dst)\n self.M_inv = cv2.getPerspectiveTransform(dst, src)", "def homogenous_transformation(x,y):\n y_start = 0.3\n y_stop = 1\n src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]])\n dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]])\n M_inv = cv2.getPerspectiveTransform(dst,src)\n \n ones = np.ones((1,len(y)))\n coordinates = np.vstack((x, y, ones))\n trans = np.matmul(M_inv, coordinates)\n \n x_vals = trans[0,:]/trans[2,:]\n y_vals = trans[1,:]/trans[2,:]\n return x_vals, y_vals", "def human_readable_pose2d(self, pose):\n\n\t\t# create a quaternion from the pose\n\t\tquaternion = (\n\t\tpose.orientation.x,\n\t\tpose.orientation.y,\n\t\tpose.orientation.z,\n\t\tpose.orientation.w\n\t\t)\n\n\t\t# convert quaternion rotation to euler rotation\n\t\troll, pitch, yaw = euler_from_quaternion(quaternion)\n\n\t\tresult = (\n\t\tpose.position.x, # x position\n\t\tpose.position.y, # y position\n\t\tyaw # theta angle\n\t\t)\n\n\t\treturn result", "def test_estimate_head_pose_hight_level_with_use_orientation_mode(self):\n\n faceEngine = VLFaceEngine()\n faceEngine.faceEngineProvider.faceDetV3Settings.useOrientationMode = 1\n detector = VLFaceDetector(DetectorType.FACE_DET_V3, faceEngine)\n\n angles0 = detector.detectOne(VLImage.load(filename=ROTATED0)).headPose\n angles90 = detector.detectOne(VLImage.load(filename=ROTATED90)).headPose\n\n assert pytest.approx(angles90.pitch, abs=2) == angles0.pitch\n assert pytest.approx(angles90.roll, abs=2) == angles0.roll\n assert pytest.approx(angles90.yaw, abs=2) == angles0.yaw", "def convert_pose_to_xy_and_theta(self, passed_stamped_pose):\n # Convert to map coordinate frame from odom\n pose = self.transform(passed_stamped_pose).pose # Apply current transform to given pose\n\n orientation_tuple = (pose.orientation.x,\n pose.orientation.y,\n pose.orientation.z,\n pose.orientation.w)\n angles = t.euler_from_quaternion(orientation_tuple)\n\n return (pose.position.x, pose.position.y, angles[2])", "def set_matrix(self):\n theta1 = -90\n theta2 = 105\n theta3 = 180\n\n if self.number > 8:\n theta2 = 75\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glPushMatrix()\n glRotatef(theta2, 0.0, 1.0, 0.0)\n glRotatef(theta1, 1.0, 0.0, 0.0)\n glRotatef(theta3, 0.0, 0.0, 1.0)\n matrix = glGetDoublev(GL_MODELVIEW_MATRIX)\n glPopMatrix()\n glPopMatrix()\n return matrix", "def _orientation(location, time='now'):\n obstime = parse_time(time)\n\n # Define the frame where its Z axis is aligned with local zenith\n local_frame = AltAz(obstime=obstime, location=location)\n\n return _sun_north_angle_to_z(local_frame)", "def reformat_pose_to_dict(self, now_pose):\n # now_pose è un dict in particolare { pose : [ {position : [{x : value , y:value , z:value} ] } , {orientation : [] } }\n # devo convertire i quaternioni in amgoli di eulero...estrarre i quaternioni da pose_now e convertirli in angoli RPY\n\n lato_corto_2 = 1.65 #1.45 # offset parcheggio\n \n #correggo gli offset x centrare le macchine nei parcheggi\n\n if abs(round(now_pose.position.x,2)) == 22.45:\n if now_pose.position.x < 0 :\n now_pose.position.x+=lato_corto_2\n now_pose.position.y-=0.4\n else :\n now_pose.position.x-=lato_corto_2\n now_pose.position.y+=0.4\n \n if abs(round(now_pose.position.y,2)) == 22.45:\n if now_pose.position.y < 0 :\n now_pose.position.y+=lato_corto_2\n now_pose.position.x+=0.4\n else :\n now_pose.position.y-=lato_corto_2\n now_pose.position.x-=0.4\n\n # correggo la z per renderla uguale all'asfalto che viene spownata nel mondo\n\n offset_asfalto = 0.3\n\n x = now_pose.position.x\n y = now_pose.position.y\n z = now_pose.position.z + offset_asfalto\n\n q1 = now_pose.orientation.x\n q2 = now_pose.orientation.y\n q3 = now_pose.orientation.z\n q4 = now_pose.orientation.w\n\n\n # converto i quaternioni in angoli di rulero RPY in radianti\n orientation_list = [q1,q2,q3,q4]\n\n euler = euler_from_quaternion( orientation_list )\n roll = euler[0]\n pitch = euler[1]\n yaw = round(euler[2],2) + np.pi\n\n\n # creo la lista dei parametri che mi servono nel campo pose:[] del file .yaml\n\n lista_parametri = [x ,y ,z ,roll ,pitch ,yaw ]\n\n # creo un dict con tutti i campi di cui ho bisogno nel file .yaml\n # settare le chiavi 'name' , ' type ' , 'package' , ' pose ' secondo le proprie necessità\n # i due stili sono equivalenti : usare quello preferito\n \"\"\"\n {\"name\" : \"park1\" , \n \"type\" : \"sdf\" , \n \"package\" : \"object_spawner\" , \n \"pose \":self.seq(lista_parametri) \n }\n \n \"\"\"\n lista_veicoli = ['macchina','pickup','ferrari','prius_hybrid','car_lexus','car_polo','car_volvo','car_golf']\n num_veicoli = 1\n\n #modificare qui implementando una funzione randomica se si vogliono piu veicoli casuali spawnati\n elemento_lista = {'name' : lista_veicoli[3],\n 'type': 'sdf',\n 'package': 'object_spawner',\n 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n #\"\"\"\n # elemento_lista = {'name' : 'ferrari',\n # 'type': 'urdf',\n # 'package': 'autopark',\n # 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n\n return elemento_lista", "def convert_pose_to_xy_and_theta(pose):\n orientation_tuple = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)\n angles = euler_from_quaternion(orientation_tuple)\n return pose.position.x, pose.position.y, angles[2]", "def matrix(self):\n\t\t# apply yaw, then pitch, then roll\n\t\treturn Matrix((\n\t\t\t(\t1,\t0,\t\t\t\t\t\t\t0\t\t\t\t\t\t\t),\n\t\t\t(\t0,\tmath.cos(self.roll.val),\t-math.sin(self.roll.val)\t),\n\t\t\t(\t0,\tmath.sin(self.roll.val),\tmath.cos(self.roll.val)\t\t)\n\t\t))* Matrix((\n\t\t\t(\tmath.cos(self.pitch.val),\t0,\tmath.sin(self.pitch.val)\t),\n\t\t\t(\t0,\t\t\t\t\t\t\t1,\t0 \t\t\t\t\t\t\t),\n\t\t\t(\t-math.sin(self.pitch.val),\t0,\tmath.cos(self.pitch.val)\t)\n\t\t)) * Matrix((\n\t\t\t(\tmath.cos(self.yaw.val),\t-math.sin(self.yaw.val),\t0\t),\n\t\t\t(\tmath.sin(self.yaw.val),\tmath.cos(self.yaw.val),\t\t0\t),\n\t\t\t(\t0,\t\t\t\t\t\t0,\t\t\t\t\t\t\t1\t)\n\t\t))", "def _cubelet_rotation_matrix(self, cubelet_meta_info, qpos_array):\n euler_angles = qpos_array[cubelet_meta_info[\"euler_qpos\"]]\n return rotation.euler2mat(euler_angles)", "def compute_right_camera_pose(left_camera_to_world, left_to_right):\n left_world_to_camera = np.linalg.inv(left_camera_to_world)\n right_world_to_camera = np.matmul(left_to_right, left_world_to_camera)\n right_camera_to_world = np.linalg.inv(right_world_to_camera)\n return right_camera_to_world", "def adjust_camera(self):\n pose = deepcopy(self.data['poses']['marker']) # PoseStamped()\n eye_pose = deepcopy(pose)\n eye_pose.pose.position.x += 0.60\n eye_pose.pose.position.z += 0.20\n focus_pose = PoseStamped()\n base_eye_pose = PoseStamped()\n\n try:\n # Convert pose to base frame\n pose.header.stamp = self.tfl. \\\n getLatestCommonTime(self.params['world'], pose.header.frame_id)\n focus_pose = self.tfl.transformPose(self.params['world'], pose)\n except (TfE, LookupException, ConnectivityException):\n Me.error_message(\"Error transforming pose \" + pose.header.frame_id)\n\n try:\n # Convert pose to base frame\n pose.header.stamp = self.tfl. \\\n getLatestCommonTime(self.params['world'],\n eye_pose.header.frame_id)\n base_eye_pose = self.tfl.transformPose(self.params['world'],\n eye_pose)\n except (TfE, LookupException, ConnectivityException):\n Me.error_message(\"Error transforming pose \" + pose.header.frame_id)\n\n cam_place = CameraPlacement()\n cam_place.target_frame = self.params['world']\n cam_place.time_from_start = Duration(1)\n # Position of the camera relative to target_frame\n cam_place.eye.header.frame_id = cam_place.target_frame\n cam_place.eye.point = base_eye_pose.pose.position\n # Target_frame-relative point for the focus\n cam_place.focus.header.frame_id = cam_place.target_frame\n cam_place.focus.point = focus_pose.pose.position\n # Target_frame-relative vector that maps to \"up\" in the view plane.\n cam_place.up.header.frame_id = cam_place.target_frame\n cam_place.up.vector.x = 0\n cam_place.up.vector.y = 0\n cam_place.up.vector.z = 1\n self.pub.publish(cam_place)\n return", "def extract_calibration(self):\n #TODO add function to check if the folder exists because opencv points to other error rather than saying it doesnt exist\n cv_file = cv2.FileStorage(\"calib_images/calibration.yaml\", cv2.FILE_STORAGE_READ)\n camera_matrix = cv_file.getNode(\"camera_matrix\").mat()\n dist_matrix = cv_file.getNode(\"dist_coeff\").mat()\n print(\"[INFO]: Extracted camera parameters.\")\n cv_file.release()\n return camera_matrix, dist_matrix", "def getTransposeMatrix(self) -> CMatrix4:\n ...", "def read_camera(self):\n _, frame = self.camera.read()\n return self.mirror(frame)", "def find_start_pose(self):\n\n # Find start position\n y,x = [k for k,v in self.mp.items() if v == 94 or v == 60 \\\n or v == 62 or v == 118][0]\n\n\n # Assign orientation\n dy,dx, theta = 0,0, 0\n if self.mp[y,x] == ord('^'): theta = np.pi/2\n elif mp[y,x] == ord('<'): theta = -np.pi\n elif mp[y,x] == ord('>'): theta = 0\n else: theta = -np.pi/2\n\n return y, x, theta", "def test_active_matrix_from_extrinsic_euler_zyz():\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw([0.5 * np.pi, 0, 0]),\n np.array([\n [1, 0, 0],\n [0, 0, -1],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0]),\n np.array([\n [0, 1, 0],\n [0, 0, -1],\n [-1, 0, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [0, 1, 0],\n [-1, 0, 0]\n ])\n )", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def get_camera_metadata(self):\n return self.camera.getHeight(), self.camera.getWidth(), 4 # channels", "def get_coord(depth, pose, intrinsics_color_inv):\n img_height, img_width = depth.shape[0], depth.shape[1]\n mask = np.ones_like(depth)\n mask[depth==0] = 0\n mask = np.reshape(mask, (img_height, img_width,1))\n x = np.linspace(0, img_width-1, img_width)\n y = np.linspace(0, img_height-1, img_height)\n xx, yy = np.meshgrid(x, y)\n xx = np.reshape(xx, (1, -1))\n yy = np.reshape(yy, (1, -1))\n ones = np.ones_like(xx)\n pcoord = np.concatenate((xx, yy, ones), axis=0)\n depth = np.reshape(depth, (1, img_height*img_width))\n ccoord = np.dot(intrinsics_color_inv, pcoord) * depth\n ccoord = np.concatenate((ccoord, ones), axis=0)\n scoord = np.dot(pose, ccoord)\n scoord = np.swapaxes(scoord,0,1)\n scoord = scoord[:,0:3]\n scoord = np.reshape(scoord, (img_height, img_width,3))\n scoord = scoord * mask\n mask = np.reshape(mask, (img_height, img_width))\n return scoord, mask", "def get_current_position(self) -> np.ndarray:\n pose_world = forward_kinematics(self.body, eef_link=self.DoF - 1)\n pose_rcm = self.pose_world2rcm(pose_world, 'matrix')\n return pose_rcm", "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def create_cam2world_matrix(forward_vector,\n origin,\n device=None):\n \"\"\"\"\"\"\n\n forward_vector = normalize_vecs(forward_vector)\n up_vector = torch.tensor([0, 1, 0], dtype=torch.float, device=device) \\\n .expand_as(forward_vector)\n\n left_vector = normalize_vecs(\n torch.cross(up_vector,\n forward_vector,\n dim=-1))\n\n up_vector = normalize_vecs(\n torch.cross(forward_vector,\n left_vector,\n dim=-1))\n\n rotation_matrix = torch.eye(4, device=device) \\\n .unsqueeze(0) \\\n .repeat(forward_vector.shape[0], 1, 1)\n rotation_matrix[:, :3, :3] = torch.stack(\n (-left_vector, up_vector, -forward_vector), axis=-1)\n\n translation_matrix = torch.eye(4, device=device) \\\n .unsqueeze(0) \\\n .repeat(forward_vector.shape[0], 1, 1)\n translation_matrix[:, :3, 3] = origin\n\n cam2world = translation_matrix @ rotation_matrix\n\n return cam2world", "def get_x_y_z(drone, p, q, r):\n num_cameras = 2\n camera_constants = [0,math.pi/2]\n rads = np.zeros(num_cameras)\n phis = np.zeros(num_cameras)\n d = np.zeros(num_cameras)\n theta = np.zeros(num_cameras)\n Hs = np.zeros(num_cameras)\n s = 12\n HFOV = math.pi/4\n VFOV = 5*math.pi/36\n HPIX = 1280\n VPIX = 720\n #loop one, where we increment over camera number, and\n # get new information\n\n cent = calculate_centroid(p,q,r)\n for camera_num in range(num_cameras):\n\n A,B = find_a_and_b(p[camera_num],q[camera_num],r[camera_num],cent[camera_num])\n a = find_a(A,B)\n d_in = find_inner_d(a, s)\n angle_c = find_angle_c(a)\n alpha = find_alpha(HFOV, HPIX, A)\n w = find_w(angle_c, s)\n d_out = find_outer_d(w,alpha,a)\n pointy_front = is_point_front(r[camera_num],q[camera_num],p[camera_num],cent[camera_num])\n d[camera_num] = find_d(d_in,d_out,pointy_front)\n theta[camera_num] = find_theta(angle_c,A,B,camera_constants[camera_num])\n k = find_k(drone[camera_num], cent[camera_num])\n angle_k = find_angle_k(k, HFOV, HPIX)\n phi = find_phi(theta[camera_num], angle_k)\n rad = find_r(d[camera_num], angle_k)\n phis[camera_num] = phi\n rads[camera_num] = rad\n\n # end of first loop\n\n cosphis = np.cos(phis)\n sinphis = np.sin(phis)\n big_matrix = np.column_stack((cosphis,sinphis))\n points = np.zeros((int(num_cameras*(num_cameras-1)/2),2))\n i = 0\n for pair in itertools.combinations(range(num_cameras), 2):\n matrix_a = np.vstack((big_matrix[pair[0]],big_matrix[pair[1]]))\n vec_b = np.hstack((rads[pair[0]],rads[pair[1]]))\n point = np.linalg.solve(matrix_a, vec_b)\n points[i] = point\n i += 1\n drone_pos = np.mean(points,axis=0)\n\n # start of third loop\n for camera_num in range(num_cameras):\n d_prime = find_d_prime(d[camera_num], theta[camera_num], drone_pos)\n P,Q,M,N = find_P_Q_M_N(p[camera_num],q[camera_num],r[camera_num])\n h = find_h(d[camera_num],P,Q,M,N)\n angle_4 = find_angle_4(h,d[camera_num])\n Y = find_Y(drone[camera_num], cent[camera_num])\n angle_5 = find_angle_5(Y, VFOV, VPIX)\n angle_6 = angle_5 - angle_4\n h_prime = find_h_prime(d_prime, angle_6)\n Hs[camera_num] = h + h_prime\n drone_h = np.mean(H)\n return np.append(drone_pos,drone_h)" ]
[ "0.6524759", "0.64992476", "0.6400119", "0.62868214", "0.6233258", "0.62323457", "0.6178379", "0.6016908", "0.6012133", "0.60015947", "0.5997646", "0.59892434", "0.5961536", "0.5942139", "0.59395564", "0.5925271", "0.5909638", "0.58591086", "0.58126867", "0.581134", "0.5802404", "0.5793011", "0.5793011", "0.5782746", "0.5780559", "0.577595", "0.5741405", "0.57348615", "0.5727813", "0.5678761", "0.56739557", "0.56732994", "0.56709313", "0.5644285", "0.56363326", "0.5630813", "0.562723", "0.5618931", "0.5614829", "0.56064165", "0.56044763", "0.56002766", "0.5599191", "0.5593354", "0.5588049", "0.55827403", "0.55637366", "0.5556181", "0.5550753", "0.5546361", "0.5531557", "0.5531557", "0.5531557", "0.55178195", "0.55095774", "0.5504455", "0.55017334", "0.5501311", "0.55008453", "0.5496078", "0.5488816", "0.5483579", "0.5478765", "0.54749095", "0.54741", "0.546547", "0.54635227", "0.5456508", "0.54564357", "0.54535115", "0.5436113", "0.5427847", "0.5427393", "0.54232115", "0.54132503", "0.5410755", "0.5409979", "0.5408566", "0.539943", "0.5393533", "0.5390372", "0.53890365", "0.53822285", "0.53795594", "0.53770196", "0.5374839", "0.5370888", "0.5360099", "0.53508556", "0.5345568", "0.5336427", "0.5333004", "0.53304064", "0.53297186", "0.53235096", "0.532338", "0.5321936", "0.5319187", "0.5318664", "0.53148115" ]
0.7063537
0
This function returns the extrinsic parameter matrix
def getExtrinsicParameter(K, R, C): t = np.dot(-R, C) homogeneous_matrix = np.hstack((R.reshape(3, 3), t)) extrinsic_parameter = np.dot(K, homogeneous_matrix) return extrinsic_parameter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_intrinsic_mat(params):\n return np.asarray(\n [\n [params[0], 0.0, params[1]],\n [0.0, params[2], params[3]],\n [0.0, 0.0, 1.0],\n ]\n )", "def get_extrinsic_matrix(pose):\n batch_size, _ = pose.shape\n rot = pose[:,:3]\n trans = pose[:,3:]\n\n rot = transforms.euler_angles_to_matrix(rot,convention=\"XYZ\")\n pose = torch.cat((rot,trans.view(batch_size, 3, 1)), -1)\n\n return pose", "def extrinsic(self):\n return self._extrinsic", "def matrix_param(self):\n return self.__matrix_param", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def get_params(self) -> np.array:\n pass", "def getMatrix(self) -> CMatrix4:\n ...", "def _get_params(self):\r\n x = np.hstack((self.X.flatten(), self.X_variance.flatten(), SparseGP._get_params(self)))\r\n return x", "def get_parameters(self):\n return self.sess.run(self.A_symm)", "def params(self):\n return {'out_dim': self.out_dim,\n 'act_fn': self.act_fn,\n 'use_bias': self.use_bias,\n 'idx': self.idx}", "def get_params_array(self):\n return np.array(self.W), np.array(self.b)", "def get_model_params(self):\n\n results = self._model.fit()\n model_params = np.expand_dims(results.params.as_matrix(), 1)\n return model_params", "def matrix(self):\n return self._matrix(*self.parameters)", "def M(self):\n return _hypre.HypreParMatrix_M(self)", "def get_design_matrix(x):\n\tF = np.ones((10, 1))\n\tF = np.hstack((F, x))\n\n\treturn F", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def pack_params(K, k, extrinsic_matrices):\n packed_params = []\n\n # Flatten intrinsics\n alpha, beta, gamma, u_c, v_c = K[0,0], K[1,1], K[0,1], K[0,2], K[1,2]\n k1, k2, k3 ,p1, p2 = k\n\n a = [alpha, beta, gamma, u_c, v_c,k1, k2, k3, p1, p2 ]\n\n packed_params.extend(a)\n\n # Flattened extrinsics\n for E in extrinsic_matrices:\n # Convert extrinsics to flattened Rodrigues representation\n R = E[:3, :3]\n t = E[:, 3]\n\n rodrigues = cv2.Rodrigues(R)[0]\n\n rho_x, rho_y, rho_z = rodrigues\n t_x, t_y, t_z = t\n\n e = [rho_x, rho_y, rho_z, t_x, t_y, t_z]\n\n packed_params.extend(e)\n\n packed_params = np.array(packed_params,dtype=object)\n return packed_params", "def get_params(self):\n return self.arr", "def _get_model(self):\n\n parameters = {keys._topology:self.topology,\n keys._size:self.size,\n keys._name:self.name,\n #keys._output_activation:self._outActiv_fun_key,\n #keys._hidden_activation:self._hiddenActiv_fun_key,\n keys._learning_rate:self.learningRate,\n keys._momentum:self.momentum}\n\n return parameters", "def designMatrix(self,x,m):\n\n phi = []\n\n for i in x:\n matric = []\n for j in range(0, m + 1):\n matric.append(np.power(i,j))\n phi.append(matric)\n return np.asarray(phi)", "def _get_proj_mat(self):\n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vec_handles)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vec_handles, self.basis_vec_handles)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def e(self):\n return np.matrix([self.y - self.arg[0,0]*self.x**3 - self.arg[1,0]*self.x**2 - self.arg[2,0]*self.x**1 - self.arg[3,0]])", "def _xyz_matrix():\n fx = 583.0\n fy = 583.0\n cx = 321\n cy = 249\n a = -0.0028300396\n b = 3.1006268\n mat = np.array([[1/fx, 0, 0, -cx/fx],\n [0, -1/fy, 0, cy/fy],\n [0, 0, 0, -1],\n [0, 0, a, b]])\n return mat", "def current_parameters(self):\n current = []\n for core_param in range(len(self.q)):\n for approx_param in range(self.q[core_param].param_no):\n current.append(self.q[core_param].vi_return_param(approx_param))\n return np.array(current)", "def get_parameters(self):\n if self.add_bias:\n params = np.concatenate((self.bias, self.W), 0)\n else:\n params = self.W\n return params", "def create_design_matrix(self):\n self.design_matrix = np.zeros([self.n, self.p])\n self.design_matrix[:,0] = 1.0 #First comlum is 1 (bias term)\n\n for i in range(self.n):\n for j in range(1,self.p):\n self.design_matrix[i,j] = self.phi(self.x[i],j)\n\n self.design_eigvals = np.linalg.eigvals([email protected]_matrix)", "def _get_proj_mat(self): \n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vecs)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vecs, self.basis_vecs)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def get_params(self):\n return deepcopy(np.hstack([to_numpy(v).flatten() for v in\n self.parameters()]))", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def parameters(self):\n # encoded in θ\n return self.theta.columns", "def parameters(self, t):\n if self.fixed_params:\n return (self.As[:, :, 0], self.Bs[:, :, 0], self.Cs[:, :, 0], self.Ds[:, :, 0],\n self.Qs[:, :, 0], self.Rs[:, :, 0])\n\n return (self.As[:, :, t + 1], self.Bs[:, :, t + 1], self.Cs[:, :, t + 1], self.Ds[:, :, t + 1],\n self.Qs[:, :, t + 1], self.Rs[:, :, t + 1])", "def get_projection_matrix(self, aspect):\n return self.ptr.get_projection_matrix(aspect)", "def def_paramt():\n Zeff = 1.0\n amu = 2.0\n mf = mp*amu\n return Zeff, amu,mf", "def information_matrix(self):\n return self._cov.inv()", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}", "def get_iperparams(self):\n\t\treturn (self.D, self.K)", "def P(self):\n self.eigenmatrix()", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def __call__(self): \n m = np.zeros((len(self.observation),))\n k = np.zeros((len(self.observation), len(self.prior)))\n \n sv = self.stateVector\n m = sv[0] * np.exp(-(self.independentVariable/sv[1])) + sv[2]\n \n k[:, 0] = np.exp(-(self.independentVariable/sv[1]))\n k[:, 1] = (sv[0] * self.independentVariable * \n np.exp(-(self.independentVariable/sv[1]))/(sv[1])**2)\n k[:, 2] = np.ones((len(self.observation),))\n \n self.modelCalculation, self.Jacobian = m, k\n \n return m, k", "def test_active_matrix_from_extrinsic_euler_zxz():\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zxz([0.5 * np.pi, 0, 0]),\n np.array([\n [0, -1, 0],\n [1, 0, 0],\n [0, 0, 1]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zxz(\n [0.5 * np.pi, 0, 0.5 * np.pi]),\n np.array([\n [-1, 0, 0],\n [0, -1, 0],\n [0, 0, 1]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zxz(\n [0.5 * np.pi, 0.5 * np.pi, 0]),\n np.array([\n [0, -1, 0],\n [0, 0, -1],\n [1, 0, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zxz(\n [0.5 * np.pi, 0.5 * np.pi, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [0, -1, 0],\n [1, 0, 0]\n ])\n )", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def get_affine_reg_params(self):\n affine_params = [\n self.affine_reg_pyramid_steps,\n self.affine_reg_used_pyramid_steps,\n ]\n return affine_params", "def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }", "def get_matrix(self):\n return self._matrix[:3, :]", "def getMatrix(self, frame):\n self.matrix[3, 0]=self.getValue(frame)\n return self.matrix", "def getMatrix(self, frame):\n self.matrix[3, 1]=self.getValue(frame)\n return self.matrix", "def get_em_parameters(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def affine_matrix(self) -> np.ndarray:\n return self._tf_matrix", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}", "def get_parameters(self):\n params = np.concatenate([p.contiguous().view(-1).data.numpy()\n for p in self.__network.parameters()])\n return params.copy()", "def parameters(self):\n return self.pars", "def get_variables(self) -> np.array:\n return np.array([self.m, self.c])", "def design_matrix(nonlinear_p, data, prior):\n P, ecc, omega, M0 = nonlinear_p[:4] # we don't need the jitter here\n\n t = data._t_bmjd\n t0 = data._t_ref_bmjd\n zdot = cy_rv_from_elements(t, P, 1., ecc, omega, M0, t0, 1e-8, 128)\n\n M1 = np.vander(t - t0, N=prior.poly_trend, increasing=True)\n M = np.hstack((zdot[:, None], M1))\n\n return M", "def test_active_matrix_from_extrinsic_euler_zyz():\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw([0.5 * np.pi, 0, 0]),\n np.array([\n [1, 0, 0],\n [0, 0, -1],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0]),\n np.array([\n [0, 1, 0],\n [0, 0, -1],\n [-1, 0, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [0, 1, 0],\n [-1, 0, 0]\n ])\n )", "def getMatrix(self, frame):\n self.matrix[3, 2]=self.getValue(frame)\n return self.matrix", "def getParameters(self):\n\n current_params = {'taux': self.taux, 'mu': self.mu, 'G': self.G, 'alpha_0': self.alpha_0,\n 'delta': self.delta, 'p': self.p, 'I0': self.I0, 'kparam': self.kparam}\n\n return (current_params)", "def test_get_param_matrix_coords(self):\n f81 = F81()\n self.assertEqual(f81.get_param_matrix_coords(), {})\n self.assertTrue(\n len(f81.get_param_matrix_coords(include_ref_cell=True)[\"ref_cell\"]) == 12\n )\n hky85 = HKY85()\n coords = hky85.get_param_matrix_coords()\n self.assertEqual(set(coords), set([\"kappa\"]))\n coords = hky85.get_param_matrix_coords(include_ref_cell=True)\n self.assertEqual(set(coords), set([\"kappa\", \"ref_cell\"]))\n gn = GN()\n coords = gn.get_param_matrix_coords(include_ref_cell=True)\n self.assertTrue(len(coords) == 12)\n self.assertTrue(len(coords[\"ref_cell\"]) == 1)", "def predict_mat(self):\n mat = self.covs_mat.dot(self.alpha)\n return mat.reshape(self.shape)", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def intrinsic_matrix_inv(self) -> np.ndarray:\n\n # determinant of top left of intrinsic matrix\n tldet = self.kx * self.ky\n\n return np.array([[1 / self.kx, -self.kxy / tldet, (self.py * self.kxy - self.ky * self.px) / tldet],\n [0, 1 / self.ky, -self.py / self.ky]])", "def get_cov_matrix_parameters(self):\n cov = numpy.diag(numpy.zeros(self.get_num_parameters()))\n i = 0\n for p in self.parameters:\n cov[i,i] = p.get_covariance()\n i += 1\n return cov", "def getTranslationMatrix(tx, ty, tz):\n return MatrixExtended([\n [1, 0, 0, tx],\n [0, 1, 0, ty],\n [0, 0, 1, tz],\n [0, 0, 0, 1]])", "def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w", "def get_params(self) -> torch.Tensor:\n params = []\n for pp in list(self.net.parameters()):\n params.append(pp.view(-1))\n return torch.cat(params)", "def CreateDesignMatrix_X(z, x, y, n ):\n if len(x.shape) > 1:\n x = np.ravel(x)\n y = np.ravel(y)\n\n N = len(x)\n l = int((n+1)*(n+2)/2) \n X = np.ones((N,l))\n\n for i in range(1,n+1):\n q = int((i)*(i+1)/2)\n for k in range(i+1):\n X[:,q+k] = x**(i-k) * y**k\n \n X, z_, indicies = shuffle(X, z)\n X_train, X_test, z_train, z_test = train_test_split(X, z_, test_size=split_train_test, random_state=seed, shuffle=False)\n X_test, X_val, z_test, z_val = train_test_split(X_test, z_test, test_size=split_test_val, random_state=seed, shuffle=False)\n\n return X, X_train, X_test, X_val, z_train, z_test, z_val, indicies", "def CreateMatrix(self) -> BaseMatrix:", "def CreateMatrix(self) -> BaseMatrix:", "def unpack_refinement_params(params):\n intrinsics = params[:10]\n\n # Unpack intrinsics\n alpha, beta, gamma, u_c, v_c, k1, k2, k3 ,p1, p2 = intrinsics\n K = np.array([[alpha, gamma, u_c],\n [ 0., beta, v_c],\n [ 0., 0., 1.]])\n k = np.array([k1, k2, k3, p1, p2])\n\n # Unpack extrinsics\n extrinsic_matrices = []\n for i in range(10, len(params), 6):\n E_rodrigues = params[i:i+6]\n rho_x, rho_y, rho_z, t_x, t_y, t_z = E_rodrigues\n R = cv2.Rodrigues(np.array([rho_x, rho_y, rho_z]))[0]\n t = np.array([t_x, t_y, t_z])\n\n E = np.zeros((3, 4))\n E[:3, :3] = R\n E[:, 3] = t\n\n extrinsic_matrices.append(E)\n\n return K, k, extrinsic_matrices", "def get_remat(self) -> ndarray:\n return self.data.weight[:, None]*self.revar.mapping.mat", "def modelview_matrix(self):\n camera = self.figure.scene.camera\n return camera.view_transform_matrix.to_array().astype(np.float32)", "def get_parameter_dict(self):\n prm = ModelParameters()\n prm.define(\"a\", self.a)\n return prm", "def getProjectionMatrix(sorted_eigvecs):\n matrix_w = np.vstack(sorted_eigvecs).transpose()\n return matrix_w", "def get_unitary(self, params: Sequence[float] = []) -> UnitaryMatrix:\n self.check_parameters(params)\n\n H = dot_product(params, self.sigmav)\n eiH = sp.linalg.expm(H)\n return UnitaryMatrix(eiH, check_arguments=False)", "def Omat(self):\n if self.standard:\n return np.matrix(((0, -1, 0), (0, 0, 1), (-1, 0, 0)))\n else:\n return np.matrix(((0, 0, 1), (0, 1, 0), (-1, 0, 0)))", "def mat(self) -> np.ndarray:\n Tp = ToeplitzificationOperator(P=self.P, M=self.M, dtype=self.x.dtype)\n return Tp.matvec(self.x)", "def get_vm_parameters(self):\n return (self.__mu, self.__kappa)", "def getParam(self):\n return self.__alpha0, self.__alpha1, self.__beta, self.__eta", "def projection_matrix(self):\n scene = self.figure.scene\n scene_size = tuple(scene.get_size())\n aspect_ratio = float(scene_size[0]) / float(scene_size[1])\n p = scene.camera.get_perspective_transform_matrix(\n aspect_ratio, -1, 1).to_array().astype(np.float32)\n return p", "def parameters(self):\n parameters = np.concatenate( (np.ravel(self.noise_var),\\\n self.kern.parameters), axis=0)\n\n # check if the parameters have changed\n if not np.array_equal(parameters, self._previous_parameters):\n # remove the internal variables that rely on the parameters\n for attr in self.dependent_attributes:\n setattr(self, attr, None)\n # update the previous parameter array\n self._previous_parameters = parameters.copy()\n return parameters.copy()", "def get_view_matrix(self):\n return self.ptr.get_view_matrix()", "def getTransportMatrix(self):\n\t\treturn self.trMtrx", "def model(self):\n return numpy.array([\n [self.x1, self.y1],\n [self.x2, self.y2],\n [self.x3, self.x3]\n ])", "def _mn_par_ ( self , i ) :\n if not i in self : raise IndexError\n #\n val = ctypes.c_double ( 0 ) \n err = ctypes.c_double ( 0 ) \n #\n res = self.GetParameter ( i , val , err )\n #\n val = float ( val.value )\n err = float ( err.value )\n #\n return VE ( val , err*err )", "def obtain_parameterized_exchanges(self):\n\n lca = self.lca\n\n exchanges = self.ParametersModel.run(self.parameters)\n\n indices_tech = np.array([], dtype=int)\n indices_bio = np.array([], dtype=int)\n\n get_input = lambda exc: (exc['input_db'], exc['input_code'])\n get_output = lambda exc: (exc['output_db'], exc['output_code'])\n\n exc_tech = np.array([exc for exc in exchanges if get_input(exc) in lca.activity_dict])\n if exc_tech.shape[0] != 0:\n mask_tech = lambda i,j : np.where( np.all([lca.tech_params['row']==i, lca.tech_params['col']==j], axis=0) )\n indices_tech = np.hstack([ mask_tech( lca.activity_dict[get_input(exc)],lca.activity_dict[get_output(exc)] ) \\\n for exc in exc_tech]) [0]\n\n exc_bio = np.array([exc for exc in exchanges if get_input(exc) in lca.biosphere_dict])\n if exc_bio.shape[0] != 0:\n mask_bio = lambda i,j : np.where( np.all([lca.bio_params['row']==i, lca.bio_params['col']==j], axis=0) )\n indices_bio = np.hstack([ mask_bio( lca.biosphere_dict[get_input(exc)],lca.activity_dict[get_output(exc)] ) \\\n for exc in exc_bio]) [0]\n parameters_dict = {}\n\n parameters_dict['tech_params_where'] = indices_tech\n parameters_dict['tech_params_amounts'] = np.array([ exc['amount'] for exc in exc_tech ])\n parameters_dict['tech_n_params'] = len(indices_tech)\n\n parameters_dict['bio_params_where'] = indices_bio\n parameters_dict['bio_params_amounts'] = np.array([ exc['amount'] for exc in exc_bio ])\n parameters_dict['bio_n_params'] = len(indices_bio)\n\n # TODO remove this check later on maybe\n assert indices_tech.shape[0] == parameters_dict['tech_n_params']\n assert indices_bio.shape[0] == parameters_dict['bio_n_params']\n\n self.parameters_dict = parameters_dict", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def matrix(self) -> sympy.Matrix:\n return self.matrix_factory(*self.params)", "def get_parameter_values(self):\n obsPars = numpy.zeros(self.get_num_parameters())\n i = 0\n for p in self.parameters:\n obsPars[i] = p.read_value_in_fmu(self.fmu)\n i += 1\n return obsPars", "def parameters(self):\n return {\n 'base':self.base.parameters(),\n 'material':[m.parameters() for m in self.material],\n 'fraction':self.fraction,\n }", "def pose2mat(pose):\n extrinsic = torch.eye(4)\n extrinsic[:3, :] = pose[:, :4]\n inv_extrinsic = torch.inverse(extrinsic)\n extrinsic = torch.inverse(inv_extrinsic)\n h, w, focal_length = pose[:, 4]\n intrinsic = torch.Tensor([[focal_length, 0, w/2],\n [0, focal_length, h/2],\n [0, 0, 1]])\n\n return extrinsic, intrinsic", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def eff_param_string():\n return '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'", "def _get_estimate_matrices(self):\n params_mapping = {\n \"state_transition\": \"transition_matrices\",\n \"process_noise\": \"transition_covariance\",\n \"measurement_offsets\": \"observation_offsets\",\n \"transition_offsets\": \"transition_offsets\",\n \"measurement_noise\": \"observation_covariance\",\n \"measurement_function\": \"observation_matrices\",\n \"initial_state\": \"initial_state_mean\",\n \"initial_state_covariance\": \"initial_state_covariance\",\n }\n valid_ems = _validate_estimate_matrices(\n input_ems=self.estimate_matrices, all_ems=list(params_mapping.keys())\n )\n\n em_vars = [params_mapping[em_var] for em_var in valid_ems]\n return em_vars", "def getTransposeMatrix(self) -> CMatrix4:\n ..." ]
[ "0.7331173", "0.63358027", "0.63124555", "0.6310796", "0.61443675", "0.61443675", "0.61398846", "0.6122645", "0.6090896", "0.59263265", "0.58786243", "0.5862724", "0.5847503", "0.58337194", "0.57778543", "0.5750955", "0.57484686", "0.57484686", "0.5668891", "0.5660093", "0.56556934", "0.5648544", "0.5635101", "0.5631414", "0.56252795", "0.56166553", "0.56088704", "0.5601754", "0.5601537", "0.5598446", "0.55844545", "0.5576991", "0.5570305", "0.55607903", "0.5557442", "0.55561656", "0.55435544", "0.5534627", "0.5534272", "0.552924", "0.552924", "0.5527447", "0.5521204", "0.5519148", "0.5518834", "0.5517484", "0.5509614", "0.55082035", "0.5500788", "0.54888505", "0.54841316", "0.54816264", "0.5481105", "0.5480313", "0.54674155", "0.54606974", "0.54536694", "0.5447419", "0.54417795", "0.54380196", "0.5417599", "0.5416744", "0.54127085", "0.54127085", "0.54127085", "0.5406963", "0.5401217", "0.54004127", "0.5391224", "0.53899634", "0.53885734", "0.5388177", "0.5388177", "0.5387547", "0.53874177", "0.5387057", "0.5386207", "0.5386146", "0.53778535", "0.5377387", "0.5372034", "0.5371653", "0.53660333", "0.53660214", "0.5359419", "0.5359244", "0.5342121", "0.534021", "0.5340096", "0.5336724", "0.5330662", "0.53274035", "0.5326683", "0.5313333", "0.53093886", "0.530823", "0.5307088", "0.53051525", "0.5304977", "0.52992827" ]
0.6725677
1
Gets the translation vector and rotation matrix of the camera w.r.t the world frame and removes camera frame ambiguity
def getDisambiguousPose(K, C, R, left_features, right_features): check = 0 for i in range(0, len(R)): count = 0 extrinsic_params = getExtrinsicParameter(K, R[i], C[i]) for j in range(0, len(left_features)): X = getTriangulationPoint(K, extrinsic_params, left_features[j], right_features[j]) r3 = R[i][2, :].reshape((1, 3)) cheiralityCondition = np.dot(r3, X[:3] - C[i]) if cheiralityCondition > 0 and X[2] >= 0: count += 1 if count > check: check = count Translation = C[i] Rotation = R[i] if Translation[2] < 0: Translation = -Translation return Translation, Rotation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_camera_transform(self):\r\n if not self.pose:\r\n rospy.loginfo(\"no pose!\")\r\n return None\r\n if self.pose.header.frame_id != self.role_name:\r\n rospy.logwarn(\"Unsupported frame received. Supported {}, received {}\".format(\r\n self.role_name, self.pose.header.frame_id))\r\n return None\r\n sensor_location = carla.Location(x=self.pose.pose.position.x,\r\n y=-self.pose.pose.position.y,\r\n z=self.pose.pose.position.z)\r\n quaternion = (\r\n self.pose.pose.orientation.x,\r\n self.pose.pose.orientation.y,\r\n self.pose.pose.orientation.z,\r\n self.pose.pose.orientation.w\r\n )\r\n roll, pitch, yaw = euler_from_quaternion(quaternion)\r\n # rotate to CARLA\r\n sensor_rotation = carla.Rotation(pitch=math.degrees(roll)-90,\r\n roll=math.degrees(pitch),\r\n yaw=-math.degrees(yaw)-90)\r\n return carla.Transform(sensor_location, sensor_rotation)", "def camera_matrix(self) -> TransformationMatrixType:\n return numpy.matmul(\n self.rotation_matrix(*self.rotation),\n displacement_matrix(*-numpy.array(self.location)),\n )", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def camera_transformation_from_pose(azimutal, elevation):\n azimutal, elevation = azimutal * 2. * np.pi / 360., elevation * 2. * np.pi / 360.\n azimutal *= -1.\n elevation *= -1.\n r_y = np.array([[np.cos(elevation), 0, np.sin(elevation)],\n [0, 1, 0],\n [-np.sin(elevation), 0, np.cos(elevation)]])\n r_z = np.array([[np.cos(azimutal), -np.sin(azimutal), 0],\n [np.sin(azimutal), np.cos(azimutal), 0],\n [0, 0, 1]])\n r = r_z.dot(r_y)\n # world_to_camera matrix, camera_to_world matrix\n return r, np.linalg.inv(r)", "def telescope_to_camera(telescope_coord, camera_frame):\n x_pos = telescope_coord.cartesian.x\n y_pos = telescope_coord.cartesian.y\n rot = telescope_coord.rotation * -1 # reverse the rotation applied to get to this system\n\n if rot ==0: #if no rotation applied save a few cycles\n x=x_pos\n y=y_pos\n else: # or else rotate all positions around the camera centre\n x = x_pos*cos(rot) - y_pos*sin(rot)\n y = y_pos*sin(rot) + y_pos*cos(rot)\n\n f = telescope_coord.focal_length\n x = x*(f/u.m) # Remove distance units here as we are using small angle approx\n y = y*(f/u.m)\n\n representation = CartesianRepresentation(x.value*u.m ,y.value*u.m,0*u.m)\n\n return camera_frame.realize_frame(representation)", "def get_frame(cap):\n\n #camera matrix for camera calibration\n mtx = np.array(np.mat(\"588.4525598886621, 0, 301.8008794717551; 0, 588.9763096391521, 242.617026416902; 0, 0, 1\"))\n\n #distrotion coefficients for camera calibration\n dist = np.array(np.mat(\"-0.4351555722591889, 0.2082765081608728, -0.006072767012672472, 0.008139871640987759, 0\"))\n\n #get image frame from the camera\n ret, frame = cap.read()\n\n return frame\n\n h, w = frame.shape[:2]\n\n #get the new optimal camera matrix and the roi which can be used to crop the result\n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),0,(w,h))\n\n #get the undistroted image\n dst = cv2.undistort(frame, mtx, dist, None, newcameramtx)\n\n x,y,w,h = roi\n\n #get the cropped image\n dst = dst[y:y+h, x:x+w]\n h, w = dst.shape[:2]\n\n #furthur crop the image to reduce the size of arena\n dst = dst[int(h/7):int(h*6/7), int(w/7):int(w*6/7)]\n\n #resize the arena to ARENA_SIZE\n dst = cv2.resize(dst, ARENA_SIZE, interpolation= cv2.INTER_CUBIC)\n\n return dst", "def camera_to_world(self, X):\n raise NotImplementedError", "def world_to_camera(self, X):\n raise NotImplementedError", "def ExtractCameraPose(E):\n u, s, v = np.linalg.svd(E, full_matrices=True)\n w = np.array([[0, -1, 0],\n [1, 0, 0],\n [0, 0, 1]]).reshape(3, 3)\n c1 = u[:, 2].reshape(3, 1)\n r1 = np.dot(np.dot(u, w), v).reshape(3, 3)\n c2 = -u[:, 2].reshape(3, 1)\n r2 = np.dot(np.dot(u, w), v).reshape(3, 3)\n c3 = u[:, 2].reshape(3, 1)\n r3 = np.dot(np.dot(u, w.T), v).reshape(3, 3)\n c4 = -u[:, 2].reshape(3, 1)\n r4 = np.dot(np.dot(u, w.T), v).reshape(3, 3)\n if np.linalg.det(r1) < 0:\n c1 = -c1\n r1 = -r1\n if np.linalg.det(r2) < 0:\n c2 = -c2\n r2 = -r2\n if np.linalg.det(r3) < 0:\n c3 = -c3\n r3 = -r3\n if np.linalg.det(r4) < 0:\n c4 = -c4\n r4 = -r4\n cam_center = np.array([c1, c2, c3, c4])\n cam_rotation = np.array([r1, r2, r3, r4])\n return cam_center, cam_rotation", "def map_to_matrix(x, y):\n x_pos = round(x * ((MATRIX_SIZE_X - 1)/(FRAME_W - 1)))\n y_pos = round(y * ((MATRIX_SIZE_Y - 1)/(FRAME_H - 1)))\n\n x_pos = (MATRIX_SIZE_X - 1) - x_pos #invert x direction (left and right) to account for camera perspective\n\n return x_pos, y_pos", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[2, 2]=c\n self.matrix[2, 0]=s\n self.matrix[0, 2]=-s\n self.matrix[0, 0]=c\n return self.matrix", "def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result", "def modelview_matrix(self):\n camera = self.figure.scene.camera\n return camera.view_transform_matrix.to_array().astype(np.float32)", "def camera_to_telescope(camera_coord, telescope_frame):\n x_pos = camera_coord.cartesian.x\n y_pos = camera_coord.cartesian.y\n\n rot = telescope_frame.rotation\n if rot ==0:\n x=x_pos\n y=y_pos\n else:\n x = x_pos*cos(rot) - y_pos*sin(rot)\n y = y_pos*sin(rot) + y_pos*cos(rot)\n\n f = telescope_frame.focal_length\n\n x = (x/f) * u.deg\n y = (y/f) * u.deg\n representation = CartesianRepresentation(x,y,0*u.deg)\n\n return telescope_frame.realize_frame(representation)", "def camera_to_object_transform(self):\n # form the full object to camera transform\n T_stp_camera = self.stp_to_camera_transform()\n T_obj_stp = self.object_to_stp_transform()\n T_obj_camera = T_stp_camera.dot(T_obj_stp)\n return T_obj_camera", "def create_cam2world_matrix(forward_vector,\n origin,\n device=None):\n \"\"\"\"\"\"\n\n forward_vector = normalize_vecs(forward_vector)\n up_vector = torch.tensor([0, 1, 0], dtype=torch.float, device=device) \\\n .expand_as(forward_vector)\n\n left_vector = normalize_vecs(\n torch.cross(up_vector,\n forward_vector,\n dim=-1))\n\n up_vector = normalize_vecs(\n torch.cross(forward_vector,\n left_vector,\n dim=-1))\n\n rotation_matrix = torch.eye(4, device=device) \\\n .unsqueeze(0) \\\n .repeat(forward_vector.shape[0], 1, 1)\n rotation_matrix[:, :3, :3] = torch.stack(\n (-left_vector, up_vector, -forward_vector), axis=-1)\n\n translation_matrix = torch.eye(4, device=device) \\\n .unsqueeze(0) \\\n .repeat(forward_vector.shape[0], 1, 1)\n translation_matrix[:, :3, 3] = origin\n\n cam2world = translation_matrix @ rotation_matrix\n\n return cam2world", "def transform_camera_pose_to_world_pose(self):\n for pose in self.close_positions_camera:\n self.close_positions_world.append(self.get_world_pose_for_camera_pose(pose))\n\n for pose in self.medium_positions_camera:\n self.medium_positions_world.append(self.get_world_pose_for_camera_pose(pose))\n\n for pose in self.far_positions_camera:\n self.far_positions_world.append(self.get_world_pose_for_camera_pose(pose))", "def computeMVP(self):\n projMat = self.converterYUR\n modelViewMat = self.transforMat.invertCompose(\n Globals.render.getTransform(self.cameraNode)).getMat()\n return UnalignedLMatrix4f(modelViewMat * projMat)", "def get_transformation_matrix(self, fromFrame, toFrame):\n fromIndex = self.frameNames.index(fromFrame)\n toIndex = self.frameNames.index(toFrame)\n #return get_transformation_matrix(self.frameStack, fromIndex, toIndex)\n return self._get_transformation_matrix_with_indices(fromIndex, toIndex)", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[1, 1]=c\n self.matrix[1, 2]=s\n self.matrix[2, 1]=-s\n self.matrix[2, 2]=c\n return self.matrix", "def transformation_matrix(self) -> TransformationMatrixType:\n # camera translation\n if self._transformation_matrix is None:\n self._transformation_matrix = numpy.matmul(\n self.projection_matrix,\n self.camera_matrix,\n )\n\n return self._transformation_matrix", "def proj_to_velo(calib_data):\n rect = calib_data[\"R0_rect\"].reshape(3, 3)\n #to transform a point from Lidar framce to camera frame\n #reshape the flat line with 12 elements to 3X4 matrix\n velo_to_cam = calib_data[\"Tr_velo_to_cam\"].reshape(3, 4)\n#print('velo2cam', velo_to_cam)\n inv_rect = np.linalg.inv(rect)\n #select all rows and only first three columns\n#print('velo_to_cam[:, :3]', velo_to_cam[:, :3])\n #select all rows and only first three columns\n inv_velo_to_cam = np.linalg.pinv(velo_to_cam[:, :3])\n return np.dot(inv_velo_to_cam, inv_rect)", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[0, 0]=c\n self.matrix[0, 1]=s\n self.matrix[1, 0]=-s\n self.matrix[1, 1]=c\n return self.matrix", "def read_camera(self):\n _, frame = self.camera.read()\n return self.mirror(frame)", "def stp_to_camera_transform(self):\n # setup variables\n camera_xyz_w = self.cam_pos\n camera_rot_w = self.cam_rot\n camera_int_pt_w = self.cam_interest_pt\n camera_xyz_obj_p = camera_xyz_w - camera_int_pt_w\n \n # get the distance from the camera to the world\n camera_dist_xy = np.linalg.norm(camera_xyz_w[:2])\n z = [0,0,np.linalg.norm(camera_xyz_w[:3])]\n\n # form the rotations about the x and z axis for the object on the tabletop\n theta = camera_rot_w[0] * np.pi / 180.0\n phi = -camera_rot_w[2] * np.pi / 180.0 + np.pi / 2.0\n camera_rot_obj_p_z = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n\n camera_rot_obj_p_x = np.array([[1, 0, 0],\n [0, np.cos(theta), -np.sin(theta)],\n [0, np.sin(theta), np.cos(theta)]])\n \n # form the full rotation matrix, swapping axes to match maya\n camera_md = np.array([[0, 1, 0],\n [1, 0, 0],\n [0, 0, -1]])\n camera_rot_obj_p = camera_md.dot(camera_rot_obj_p_z.dot(camera_rot_obj_p_x))\n camera_rot_obj_p = camera_rot_obj_p.T\n \n # form the full object to camera transform\n R_stp_camera = camera_rot_obj_p\n t_stp_camera = np.array(z)\n return RigidTransform(rotation=R_stp_camera,\n translation=t_stp_camera,\n from_frame='stp', to_frame='camera')", "def testCalculateTransform(self):\n # Create some points in the first frame.\n z = 4.0\n self.evaluator.camera_height = z\n first_points = numpy.array(\n [[0, 0, z], [2, 0, z], [2, 5, z], [0, 5, z]], dtype=numpy.float32)\n # Create a transformation that will move the camera\n R = numpy.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n t = numpy.array([[3.0], [-5.0], [0.0]])\n expected_result = numpy.eye(4)\n expected_result[0:3, 0:3] = R\n expected_result[0:3, 3:] = t\n # Determine where the second points would be given that.\n second_points = (numpy.matmul(\n R, first_points.transpose()) + t).transpose()\n # Create a simple intrinsic matrix to project onto a fictional camera\n intrinsic = numpy.array(\n [[1.0, 0.0, 20.0], [0.0, 1.0, 20.0], [0.0, 0.0, 1.0]])\n # Use no distortion or transformations\n rvec = numpy.zeros((3, 1))\n tvec = rvec\n distortion = numpy.zeros((5, 1))\n # Project the points into the camera\n (camera_first_points, _) = cv2.projectPoints(\n first_points, rvec, tvec, intrinsic, distortion)\n camera_first_points = camera_first_points.squeeze()\n (camera_second_points, _) = cv2.projectPoints(\n second_points, rvec, tvec, intrinsic, distortion)\n camera_second_points = camera_second_points.squeeze()\n # Using these projected points, can the object recover the correct initial transform\n result = self.evaluator._calculateTransform(\n camera_first_points, camera_second_points, intrinsic)\n # The matrix comparisions aren't reliable near zero, so check elements manually.\n for i in range(expected_result.shape[0]):\n for j in range(expected_result.shape[1]):\n result_element = result[i, j]\n expected_element = expected_result[i, j]\n self.assertAlmostEqual(result_element, expected_element, 6,\n 'Matrix element ({0:d}, {1:d}) is incorrect.'.format(i, j))", "def get_computed_camera_poses(self):\n pairs = sorted((timestamp, pose) for timestamp, pose in self.frame_deltas.items())\n current_pose = tf.Transform()\n computed_poses = {}\n for timestamp, delta in pairs:\n delta = delta.find_relative(tf.Transform()) # Flip the direction from previous pose relative to new\n current_pose = current_pose.find_independent(delta)\n computed_poses[timestamp] = current_pose\n return computed_poses", "def CoordTrans(frame1, frame2, original_vec, oe=np.zeros(6), \n theta_gst=float('NaN'), lla_gs=np.zeros(3), mu=c.mu_earth, \n r_body=c.r_earth):\n\n # Orbital Elements\n a, e, inc, raan, w, nu = oe\n\n # Warnings\n oe_frames = ['ric', 'ntw', 'pqw']\n if any(frame in oe_frames for frame in (frame1, frame2)):\n if oe.dot(oe) == 0:\n print('ERROR: You forgot to define the orbital elements!')\n\n topocentric_frames = ['sez']\n if any(frame in topocentric_frames for frame in (frame1, frame2)):\n if lla_gs.dot(lla_gs) == 0:\n print('ERROR: You forgot lla for the ground stations!')\n\n # Coordinate System Logic\n if frame1.lower() == 'bci':\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(original_vec, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(original_vec, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(original_vec, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(original_vec, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec1 = bci2bcbf(original_vec, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec1, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec1 = bci2bcbf(original_vec, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec1, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'bcbf':\n if frame2.lower() == 'bci':\n rotated_vec = bcbf2bci(original_vec, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ntw':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'pqw':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'lla':\n rotated_vec = bcbf2lla(original_vec, r_body=r_body)\n \n elif frame2.lower() == 'sez':\n rotated_vec = bcbf2sez(original_vec, lla_gs, r_body=r_body)\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'ric':\n rotated_vec1 = ric2bci(original_vec, raan, inc, w, nu)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'ntw':\n rotated_vec1 = ntw2bci(original_vec, e, raan, inc, w, nu)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'pqw':\n rotated_vec1 = pqw2bci(original_vec, raan, inc, w)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'lla':\n rotated_vec1 = lla2bcbf(original_vec, r_body=r_body)\n if frame2.lower() == 'bcbf':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ric':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2ric(rotated_vec2, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ntw':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2ntw(rotated_vec2, e, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'pqw':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2pqw(rotated_vec2, raan, inc, w)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'bci':\n rotated_vec = bcbf2bci(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec = bcbf2sez(rotated_vec1, lla_gs, r_body=r_body)\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'sez':\n rotated_vec1 = sez2bcbf(original_vec, lla_gs, r_body=r_body)\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n if frame2.lower() == 'bcbf':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec2, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec2, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec2, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec = bcbf2lla(rotated_vec1, r_body=r_body)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec2\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n else:\n print('ERROR: Frame1 is not included in this function!')\n\n return rotated_vec", "def compute_right_camera_pose(left_camera_to_world, left_to_right):\n left_world_to_camera = np.linalg.inv(left_camera_to_world)\n right_world_to_camera = np.matmul(left_to_right, left_world_to_camera)\n right_camera_to_world = np.linalg.inv(right_world_to_camera)\n return right_camera_to_world", "def getMatrix(self, frame):\n self.matrix[3, 0]=self.getValue(frame)\n return self.matrix", "def filter_rotation(frame):\n return frame[frame['direction'] != 'none'].copy()", "def computeWorldTransformationFromFoot(self):\n self.corba.signal(self.perceivedBody).recompute(\n self.corba.signal(self.perceivedBody).time + 1)\n self.robot.dynamic.signal(\n self.trackedBody).recompute(self.robot.dynamic.signal(\n self.trackedBody).time + 1)\n\n mocapMfoot = XYThetaToHomogeneousMatrix(\n self.corba.signal(self.perceivedBody).value)\n sotMfoot = np.matrix(self.robot.dynamic.signal(\n self.trackedBody).value)\n\n # mocap position w.r.t sot frame\n sotMmocap = sotMfoot * np.linalg.inv(mocapMfoot)\n return matrixToTuple(sotMmocap)", "def relative_pose_cam_to_body(\n relative_scene_pose, Rt_cam2_gt\n ):\n relative_scene_pose = (\n np.linalg.inv(Rt_cam2_gt)\n @ relative_scene_pose\n @ Rt_cam2_gt\n )\n return relative_scene_pose", "def get_world_pose_for_camera_pose(self, pose):\n\n # Create a point stamped from the given position\n camera_point = geometry_msgs.msg.PointStamped()\n camera_point.header.stamp = rospy.Time.now()\n camera_point.header.frame_id = 'camera'\n camera_point.point.x = pose[0]\n camera_point.point.y = pose[1]\n camera_point.point.z = pose[2]\n\n # Wait for the transformation to be available\n time = rospy.Time().now()\n self.listener.waitForTransform('camera', 'world', time, rospy.Duration(5))\n world_point = self.listener.transformPoint('world', camera_point)\n\n # Return the new coordinates\n return [world_point.point.x, world_point.point.y, world_point.point.z]", "def cam_to_world(cam_point, world_to_cam):\n # cam_point = np.array([cam_pose[0], cam_pose[1], cam_pose[2]])\n\n obj_vector = np.concatenate((cam_point, np.ones(1))).reshape((4, 1))\n world_point = np.dot(world_to_cam, obj_vector)\n\n world_point = [p[0] for p in world_point]\n return world_point[0:3]", "def get_world_trans(m_obj):\n plug = get_world_matrix_plug(m_obj, 0)\n matrix_obj = plug.asMObject()\n matrix_data = oMa.MFnMatrixData(matrix_obj)\n matrix = matrix_data.matrix()\n\n trans_matrix = oMa.MTransformationMatrix(matrix)\n trans = trans_matrix.translation(oMa.MSpace.kWorld)\n\n return trans", "def camera_matrix(e, p, t):\n # Translates all points such that the camera is centered at the origin.\n T = np.array([[1, 0, 0, -e[0]],\n [0, 1, 0, -e[1]],\n [0, 0, 1, -e[2]],\n [0, 0, 0, 1]])\n\n # Set up orthonormal basis.\n w = e - p\n w = w / np.linalg.norm(w)\n u = np.cross(t, w)\n u = u / np.linalg.norm(u)\n v = np.cross(w, u)\n\n # Rotate points such that camera is aligned with UVW-axes (g -> -z-axis).\n R = np.array([[u[0], u[1], u[2], 0],\n [v[0], v[1], v[2], 0],\n [w[0], w[1], w[2], 0],\n [ 0, 0, 0, 1]])\n return R.dot(T)", "def get_render_obs(self):\n x, y, z = self.robot.body_xyz\n # print (x, y, z)\n cameraEyePosition = list([x, y-0.75, 1.0])\n cameraTargetPosition = [x, y, 1.0]\n cameraUpVector = [0, 0, 1]\n\n fov = 120\n aspect = self.render_dims[0] / self.render_dims[1]\n nearPlane = 0.05 # this ensures outside body, may see limbs\n farPlane = 100.0\n\n # TODO: fix me to be along moving axis\n viewMatrix = p.computeViewMatrix(cameraEyePosition, cameraTargetPosition, cameraUpVector, physicsClientId=self.physicsClientId)\n # viewMatrix = p.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)\n projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);\n img_arr = p.getCameraImage(self.render_dims[0], self.render_dims[1], viewMatrix, projectionMatrix, renderer=p.ER_BULLET_HARDWARE_OPENGL, physicsClientId=self.physicsClientId)\n\n # w=img_arr[0] #width of the image, in pixels\n # h=img_arr[1] #height of the image, in pixels\n rgb=img_arr[2] #color data RGB\n gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)\n gray = gray.reshape((1, *self.render_dims))\n gray[gray > 0] = 255\n return gray", "def getMatrix(self, frame):\n self.matrix[3, 2]=self.getValue(frame)\n return self.matrix", "def getMatrix(self, frame):\n self.matrix[3, 1]=self.getValue(frame)\n return self.matrix", "def camera_2_world(self, o, d):\r\n wo = self.camera2world_point @ ti.Vector([o.x, o.y, o.z, 1.0])\r\n wd = self.camera2world_vec @ d\r\n return ti.Vector([wo.x,wo.y,wo.z]), wd", "def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!", "def intrinsic_matrix_from_camera(w, h, fov):\n (cx, cy), f = calc_focal_values(w, h, fov)\n return np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]])", "def set_modelview_from_camera(Rt):\n \n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n \n # rotate teapot 90 deg around x-axis so that z-axis is up\n Rx = np.array([[1,0,0],[0,0,-1],[0,1,0]])\n \n # set rotation to best approximation\n R = Rt[:,:3]\n U,S,V = np.linalg.svd(R)\n R = np.dot(U,V)\n R[0,:] = -R[0,:] # change sign of x-axis\n \n # set translation\n t = Rt[:,3]\n \n # setup 4*4 model view matrix\n M = np.eye(4)\n M[:3,:3] = np.dot(R,Rx)\n M[:3,3] = t\n\n # transpose and flatten to get column order\n M = M.T\n \n m = M.flatten()\n \n # replace model view with the new matrix\n glLoadMatrixf(m)", "def get_2d_poses(self, frame: int, camera: int) -> np.ndarray:\n poses = self._get_2d_data_by_type(frame, camera, 'poses')\n if frame in poses:\n return np.asarray(poses[frame])\n else:\n return np.array([])", "def camera_coords_to_world_coords(point, cam_height, cam_angle):\n\n # adjust the axis order\n point = np.array([point[2], point[0], point[1]])\n\n # calculate the vectors of the camera axis in the desired coordinate system\n cam_direction = np.array([np.cos(cam_angle), 0, -np.sin(cam_angle)])\n z = cam_direction\n x = np.cross(np.array([0, 0, 1]), cam_direction)\n y = np.cross(z, x)\n\n # transposed rotation matrix\n rotation = np.vstack([x, y, z])\n\n # translation vector\n translation = np.array([0, 0, cam_height])\n\n return rotation @ (point - translation)", "def get_render_obs(self):\n x, y, z = self.robot.body_xyz\n # print (x, y, z)\n\n if self.camera_type == 'follow':\n cameraEyePosition = [x, y-1.25, 1.0]\n cameraTargetPosition = [x, y, 1.0]\n elif self.camera_type == 'fixed':\n cameraEyePosition = [2.0, y-2.5, 1.0]\n cameraTargetPosition = [2.0, y, 1.0]\n\n cameraUpVector = [0, 0, 1]\n\n fov = 90\n aspect = self.render_dims[0] / self.render_dims[1]\n nearPlane = 0.05 # this ensures outside body, may see limbs\n farPlane = 100.0\n\n viewMatrix = p.computeViewMatrix(cameraEyePosition, cameraTargetPosition, cameraUpVector, physicsClientId=self.physicsClientId)\n projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);\n img_arr = p.getCameraImage(self.render_dims[0], self.render_dims[1], viewMatrix, projectionMatrix, renderer=p.ER_BULLET_HARDWARE_OPENGL, physicsClientId=self.physicsClientId)\n\n rgb=img_arr[2] #color data RGB\n gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)\n gray = gray.reshape((1, *self.render_dims))\n gray[gray > 0] = 255\n\n # assign patch at bottom to show distance, this is to differentiate frames\n # bar_width_pix = int(y/5.0*self.render_dims[1])\n # bar_height_pix = 10\n # gray[0][self.render_dims[0]-bar_height_pix:, 0:bar_width_pix] = 255\n return gray", "def get_camera(self, video: int, frame: int) -> Union[Camera, None]:\n params = self._get_calibration_params(video, frame)\n # Check if we found the camera parameters\n if not (params[0]):\n print(params[1])\n return\n else:\n params = params[1]\n \n # Construct the Camera object\n camera = Camera(params['K'], params['rvec'], params['tvec'], params['distCoef'], params['w'], params['h'])\n\n # Return the camera\n return camera", "def get_projection_mapping(self, cam_pos, cam_rot, local_frame=False, range1=True):\n\n cam_pos = cam_pos.copy()\n cam_pos[2] += self.h_offset\n\n K = self.make_camera_matrix()\n R_opt = self.make_optical_rotation_matrix()\n T_opt = affines.compose([0, 0, 0], R_opt, [1.0, 1.0, 1.0])\n T_opt_inv = np.linalg.inv(T_opt)\n T = self.make_world_to_camera_mat(cam_pos, cam_rot)\n Tinv = np.linalg.inv(T)\n\n # Get the map position encodings (MxMx3)\n pts_w = self.get_world_coord_grid()[..., np.newaxis]\n\n # Get the coordinates in camera frame:\n if not local_frame:\n # If we're using a global map frame, transform the map coordinates into the camera frame\n pts_cam = np.matmul(Tinv[np.newaxis, ...], pts_w)\n else:\n # If we're using local frame, camera is centered in the map, but pitch must still be taken into account!\n # TODO: Fix this and add pitch\n pts_cam = pts_w\n pts_cam[:, 0:2] = pts_cam[:, 0:2] - self.map_world_size_px / 2\n\n # Get the coordinates in optical frame\n pts_opt = np.matmul(T_opt_inv[np.newaxis, ...], pts_cam)\n\n # Get the 3D coordinates of the map pixels in the image frame:\n pts_img = np.matmul(K[np.newaxis, ...], pts_opt[:, 0:3, :])\n\n # Convert to homogeneous (image-plane) coordinates\n valid_z = pts_img[:, 2:3, :] > 0\n\n pts_img = pts_img / (pts_img[:, 2:3] + 1e-9)\n #pts_img[:, 0] = pts_img[:, 0] / (pts_img[:, 2] + 1e-9)\n #pts_img[:, 1] = pts_img[:, 1] / (pts_img[:, 2] + 1e-9)\n\n # Mask out all the map elements that don't project on the image\n valid_y1 = pts_img[:, 0:1, :] > 0\n valid_y2 = pts_img[:, 0:1, :] < self.res_x\n valid_x1 = pts_img[:, 1:2, :] > 0\n valid_x2 = pts_img[:, 1:2, :] < self.res_y\n\n # Throw away the homogeneous Z coordinate\n pts_img = pts_img[:, 0:2]\n\n valid = valid_y1 * valid_y2 * valid_x1 * valid_x2 * valid_z\n\n # PyTorch takes projection mappings in -1 to 1 range:\n if range1:\n pts_img[:, 0] = (-pts_img[:, 0] + self.res_x / 2) / (self.res_x / 2)\n pts_img[:, 1] = (-pts_img[:, 1] + self.res_y / 2) / (self.res_y / 2)\n\n # Make sure the invalid points are out of range\n pts_img = pts_img * valid + 2 * np.ones_like(pts_img) * (1 - valid)\n else:\n pts_img = pts_img * valid\n\n # Remove the extra 1-length dimension\n pts_img = pts_img.squeeze()\n\n # Reshape into the 2D map representation\n pts_img = np.reshape(pts_img, [self.map_size_px, self.map_size_px, 2])\n\n return pts_img", "def closest_approach_to_camera(scene, speaker_object) -> (float, int):\n max_dist = sys.float_info.max\n at_time = scene.frame_start\n for frame in range(scene.frame_start, scene.frame_end + 1):\n scene.frame_set(frame)\n rel = speaker_object.matrix_world.to_translation() - scene.camera.matrix_world.to_translation()\n dist = norm(rel)\n\n if dist < max_dist:\n max_dist = dist\n at_time = frame\n\n return max_dist, at_time", "def adjust_camera(self):\n pose = deepcopy(self.data['poses']['marker']) # PoseStamped()\n eye_pose = deepcopy(pose)\n eye_pose.pose.position.x += 0.60\n eye_pose.pose.position.z += 0.20\n focus_pose = PoseStamped()\n base_eye_pose = PoseStamped()\n\n try:\n # Convert pose to base frame\n pose.header.stamp = self.tfl. \\\n getLatestCommonTime(self.params['world'], pose.header.frame_id)\n focus_pose = self.tfl.transformPose(self.params['world'], pose)\n except (TfE, LookupException, ConnectivityException):\n Me.error_message(\"Error transforming pose \" + pose.header.frame_id)\n\n try:\n # Convert pose to base frame\n pose.header.stamp = self.tfl. \\\n getLatestCommonTime(self.params['world'],\n eye_pose.header.frame_id)\n base_eye_pose = self.tfl.transformPose(self.params['world'],\n eye_pose)\n except (TfE, LookupException, ConnectivityException):\n Me.error_message(\"Error transforming pose \" + pose.header.frame_id)\n\n cam_place = CameraPlacement()\n cam_place.target_frame = self.params['world']\n cam_place.time_from_start = Duration(1)\n # Position of the camera relative to target_frame\n cam_place.eye.header.frame_id = cam_place.target_frame\n cam_place.eye.point = base_eye_pose.pose.position\n # Target_frame-relative point for the focus\n cam_place.focus.header.frame_id = cam_place.target_frame\n cam_place.focus.point = focus_pose.pose.position\n # Target_frame-relative vector that maps to \"up\" in the view plane.\n cam_place.up.header.frame_id = cam_place.target_frame\n cam_place.up.vector.x = 0\n cam_place.up.vector.y = 0\n cam_place.up.vector.z = 1\n self.pub.publish(cam_place)\n return", "def camera_frame_directions(self) -> _BFRAME_TYPE:\n pass", "def __init__(self, camera):\n self.__camera = camera\n self.__innerOrientationParameters = None\n self.__isSolved = False\n self.__exteriorOrientationParameters = np.array([0, 0, 0, 0, 0, 0], 'f')\n self.__rotationMatrix = None", "def read(self):\r\n\t\t# get data from camera\r\n\t\tarray = self.ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, self.pitch, copy=False)\r\n\t\t# get frame as numpy array\r\n\t\tframe = np.reshape(array,(self.height.value, self.width.value, self.bytes_per_pixel))\r\n\t\t\r\n\t\t\"\"\"\r\n\t\tcamera_matrix = np.array([\r\n\t\t\t[4.5330796457901283e+02, 0., 6.1902229288626302e+02],\r\n\t\t\t[0., 4.5369175559310276e+02, 5.1298362120979994e+02],\r\n\t\t\t[0., 0., 1.]])\r\n\t\t\r\n\t\tdist_coeffs = np.array([\r\n\t\t\t-3.1812973406286371e-01, 9.6396352148682182e-02,\r\n\t\t\t2.9601124432187590e-03, 9.7700591472463412e-04,\r\n\t\t\t-1.1929681608809075e-02\r\n\t\t])\r\n\r\n\t\tframe = cv2.undistort(frame, camera_matrix, dist_coeffs, camera_matrix)\r\n\t\t\"\"\"\r\n\r\n\t\treturn frame", "def snapFrame(camera):\n return camera.read()[1]", "def get_frame(self, camera: int = 0) -> Tuple[float, np.ndarray]:\n result = self.video.read()\n if result[0]:\n return result\n else: # If we reach the end of the video, go back to the beginning.\n self.video.set(cv2.CAP_PROP_POS_FRAMES, 0)\n return self.video.read()", "def get_render_obs(self):\n x, y, z = self.robot.body_xyz\n\n if self.camera_type == 'follow':\n cameraEyePosition = [x, y-1.25, 1.0]\n cameraTargetPosition = [x, y, 1.0]\n elif self.camera_type == 'fixed':\n # y-2.7 not 2.5 since cheetah is longer\n cameraEyePosition = [2.0, y-2.7, 1.0]\n cameraTargetPosition = [2.0, y, 1.0]\n\n cameraUpVector = [0, 0, 1]\n\n fov = 90\n aspect = self.render_dims[0] / self.render_dims[1]\n nearPlane = 0.05 # this ensures outside body, may see limbs\n farPlane = 100.0\n\n viewMatrix = p.computeViewMatrix(cameraEyePosition, cameraTargetPosition, cameraUpVector, physicsClientId=self.physicsClientId)\n projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);\n img_arr = p.getCameraImage(self.render_dims[0], self.render_dims[1], viewMatrix, projectionMatrix, renderer=p.ER_BULLET_HARDWARE_OPENGL, physicsClientId=self.physicsClientId)\n\n rgb=img_arr[2] #color data RGB\n gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)\n gray = gray.reshape((1, *self.render_dims))\n gray[gray > 0] = 255\n\n # assign patch at bottom to show distance, this is to differentiate frames\n # bar_width_pix = int(y/5.0*self.render_dims[1])\n # bar_height_pix = 10\n # gray[0][self.render_dims[0]-bar_height_pix:, 0:bar_width_pix] = 255\n return gray", "def decompose_essential_matrix(E, x1, x2):\n\n # Fix left camera-matrix\n Rl = np.eye(3)\n tl = np.array([[0, 0, 0]]).T\n Pl = np.concatenate((Rl, tl), axis=1)\n\n # TODO: Compute possible rotations and translations\n \n # s must be [1, 1, 0]\n u, s, vh = np.linalg.svd(E)\n E = u @ np.diag([1, 1, 0]) @ vh\n u, s, vh = np.linalg.svd(E)\n\n w = np.array([[ 0, 1, 0], \n [-1, 0, 0], \n [ 0, 0, 1]]) \n \n z = np.array([[ 0, -1, 0], \n [ 1, 0, 0],\n [ 0, 0, 0]])\n \n R1 = u @ w.T @ vh\n s1 = -u @ z @ u.T\n R2 = u @ w @ vh\n s2 = u @ z @ u.T\n\n t1 = np.array([[s1[2, 1]], \n [s1[0, 2]],\n [s1[1, 0]]])\n \n t2 = np.array([[s2[2, 1]], \n [s2[0, 2]], \n [s2[1, 0]]]) \n\n # Four possibilities\n Pr = [np.concatenate((R1, t1), axis=1),\n np.concatenate((R1, t2), axis=1),\n np.concatenate((R2, t1), axis=1),\n np.concatenate((R2, t2), axis=1)]\n\n # Compute reconstructions for all possible right camera-matrices\n X3Ds = [infer_3d(x1[:, 0:1], x2[:, 0:1], Pl, x) for x in Pr]\n\n # Compute projections on image-planes and find when both cameras see point\n test = [np.prod(np.hstack((Pl @ np.vstack((X3Ds[i], [[1]])), Pr[i] @ np.vstack((X3Ds[i], [[1]])))) > 0, 1) for i in\n range(4)]\n test = np.array(test)\n idx = np.where(np.hstack((test[0, 2], test[1, 2], test[2, 2], test[3, 2])) > 0.)[0][0]\n\n # Choose correct matrix\n Pr = Pr[idx]\n\n return Pl, Pr", "def r2n2_cam2world(self):\n if not hasattr(self, '_r2n2_cam2world'):\n ms = []\n for i in range(24):\n cam2v1 = assert_is_4x4(self.r2n2_cam2v1[i, ...])\n v12occnet = assert_is_4x4(self.v12occnet)\n occnet2gaps = assert_is_4x4(self.occnet2gaps)\n cam2occnet = np.matmul(v12occnet, cam2v1)\n cam2gaps = np.matmul(occnet2gaps, cam2occnet)\n ms.append(assert_is_4x4(cam2gaps))\n self._r2n2_cam2world = np.stack(ms).astype(np.float32)\n return self._r2n2_cam2world", "def worldToCameraCentricXform(self):\n return self.rotateAlignXform().dot(self.translateToOriginXform())", "def pixel2cam(self, depth, intrinsics_inv):\n b, _, h, w = depth.size()\n i_range = torch.arange(0, h).view(1, h, 1).expand(1,h,w).type_as(depth) # [1, H, W]\n j_range = torch.arange(0, w).view(1, 1, w).expand(1,h,w).type_as(depth) # [1, H, W]\n ones = torch.ones(1,h,w).type_as(depth)\n pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]\n ###pixel_coords is an array of camera pixel coordinates (x,y,1) where x,y origin is the upper left corner of the image.\n current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).view(b,3,-1) #.contiguous().view(b, 3, -1) # [B, 3, H*W]\n #cam_coords = intrinsic_inv.expand(b,3,3).bmm(current_pixel_coords).view(b,3,h,w)\n cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b,3,h,w)\n return cam_coords * depth", "def get_camera_orientation(self):\n\n # Create the vector from the camera to the robot\n vector_x = self.robot_x - self.camera_x\n vector_y = self.robot_y - self.camera_y\n vector_z = self.robot_z - self.camera_z\n\n # Calculate yaw and pitch from this vector\n yaw = math.atan2(vector_y, vector_x)\n pitch = -math.asin(vector_z)\n\n # Create the quaternion from the euler angles\n self.quaternion = geometry_msgs.msg.Quaternion(\n *tf_conversions.transformations.quaternion_from_euler(0, pitch, yaw))", "def get_frame_root_rot(self, frame):\n root_rot = frame[self.POS_SIZE:(self.POS_SIZE + self.ROT_SIZE)].copy()\n return root_rot", "def getDejaVuMatrix(self):\n mtx = self.getRotMatrix((4, 4), transpose=None) # from Quaternion\n mtx[3] = self.getTranslation()\n mtx[:3, 3] = mtx[3, :3]\n mtx[3, :3] = [0, 0, 0]\n return mtx", "def translate_frame(frame, x, y):\n trans_mat = np.float32([[1, 0, x], [0, 1, y]])\n dimensions = (frame.shape[1], frame.shape[0])\n return cv.warpAffine(frame, trans_mat, dimensions)", "def complex_camera(camera, target_rect):\n l, t, _, _ = target_rect\n _, _, w, h = camera\n l, t, _, _ = -l+300, -t+300, w, h\n\n l = min(0, l)\n l = max(-(camera.width-600), l)\n t = max(-(camera.height-600), t)\n t = min(0, t)\n\n return pygame.Rect(l, t, w, h)", "def rotate_to_local(self,vxyz):\n return sp.mxv(self.mtxtofov,vxyz)", "def get_image(self):\n self.flush_buffer()\n _, frame = self.cam.read()\n shift_frame = self.perspective_shift(frame)\n #shift_frame = None\n return frame, shift_frame", "def get_transform(self, from_frame, to_frame):\n if not self._pipeline:\n return None\n try:\n from_ind = self._get_frame_index(from_frame)\n except ValueError:\n raise CoordinateFrameError(\"Frame {0} is not in the available \"\n \"frames\".format(from_frame))\n try:\n to_ind = self._get_frame_index(to_frame)\n except ValueError:\n raise CoordinateFrameError(\"Frame {0} is not in the available frames\".format(to_frame))\n if to_ind < from_ind:\n #transforms = np.array(self._pipeline[to_ind: from_ind], dtype=\"object\")[:, 1].tolist()\n transforms = [step.transform for step in self._pipeline[to_ind: from_ind]]\n transforms = [tr.inverse for tr in transforms[::-1]]\n elif to_ind == from_ind:\n return None\n else:\n #transforms = np.array(self._pipeline[from_ind: to_ind], dtype=\"object\")[:, 1].copy()\n transforms = [step.transform for step in self._pipeline[from_ind: to_ind]]\n return functools.reduce(lambda x, y: x | y, transforms)", "def extract_camera_sync(sync, chmap=None):\n assert(chmap)\n sr = _get_sync_fronts(sync, chmap['right_camera'])\n sl = _get_sync_fronts(sync, chmap['left_camera'])\n sb = _get_sync_fronts(sync, chmap['body_camera'])\n return {'right': sr.times[::2],\n 'left': sl.times[::2],\n 'body': sb.times[::2]}", "def get_projection_matrix(self, aspect):\n return self.ptr.get_projection_matrix(aspect)", "def determine_rotation_matrix(self, origin, angle, scale):\n # scaling will be ignored at this step\n rotation_matrix = cv2.getRotationMatrix2D(origin, angle * 180 / np.pi, scale)\n return rotation_matrix", "def get_frame(self, camera: int = 0) -> Tuple[float, np.ndarray]:\n return self.video.read()", "def simple_camera(camera, target_rect):\n l, t, _, _ = target_rect\n _, _, w, h = camera\n return pygame.Rect(-l+300, -t+300, w, h)", "def get_frame_joints_vel(self, frame):\n vel = frame[(self.VEL_SIZE + self.ANG_VEL_SIZE):].copy()\n return vel", "def _omni_frame_to_omni_frame_projection(self, agent_rel_pose, agent_rel_mat, uniform_sphere_pixel_coords,\n sphere_pix_coords_f1, sphere_depth_f1, sphere_feat_f1, agent_rel_pose_cov,\n image_var_f1, holes_prior, holes_prior_var, batch_size):\n\n # Frame 1 #\n # --------#\n\n # combined\n\n # B x OH x OW x 3\n angular_pixel_coords_f1 = ivy.concatenate((sphere_pix_coords_f1, sphere_depth_f1), -1)\n\n # sphere coords\n\n # B x OH x OW x 3\n sphere_coords_f1 = \\\n ivy_vision.angular_pixel_to_sphere_coords(angular_pixel_coords_f1, self._pixels_per_degree)\n\n # Frame 2 #\n # --------#\n\n # sphere to sphere pixel projection\n\n sphere_coords_f2 = ivy_vision.sphere_to_sphere_coords(\n sphere_coords_f1, agent_rel_mat, [batch_size], self._sphere_img_dims)\n image_var_f2 = image_var_f1\n\n # to angular pixel coords\n\n # B x OH x OW x 3\n angular_pixel_coords_f2 = \\\n ivy_vision.sphere_to_angular_pixel_coords(sphere_coords_f2, self._pixels_per_degree)\n\n # constant feature projection\n\n # B x OH x OW x (3+F)\n projected_coords_f2 = ivy.concatenate([angular_pixel_coords_f2] + [sphere_feat_f1], -1)\n\n # reshaping to fit quantization dimension requirements\n\n # B x (OHxOW) x (3+F)\n projected_coords_f2_flat = ivy.reshape(projected_coords_f2,\n [batch_size] + [self._sphere_img_dims[0] * self._sphere_img_dims[1]]\n + [3 + self._feat_dim])\n\n # B x (OHxOW) x (3+F)\n image_var_f2_flat = ivy.reshape(image_var_f2,\n [batch_size] + [self._sphere_img_dims[0] * self._sphere_img_dims[1]]\n + [3 + self._feat_dim])\n\n # quantize the projection\n\n # B x N x OH x OW x (3+F) # B x N x OH x OW x (3+F)\n return ivy_vision.quantize_to_image(\n pixel_coords=projected_coords_f2_flat[..., 0:2],\n final_image_dims=self._sphere_img_dims,\n feat=projected_coords_f2_flat[..., 2:],\n feat_prior=holes_prior,\n with_db=self._with_depth_buffer,\n pixel_coords_var=image_var_f2_flat[..., 0:2],\n feat_var=image_var_f2_flat[..., 2:],\n pixel_coords_prior_var=holes_prior_var[..., 0:2],\n feat_prior_var=holes_prior_var[..., 2:],\n var_threshold=self._var_threshold[:, 0],\n uniform_pixel_coords=uniform_sphere_pixel_coords,\n batch_shape=(batch_size,),\n dev_str=self._dev_str)[0:2]", "def gen_locate_camera_frame(self, exp_id, left_cam_id, right_cam_id, frame_id):\n exp = experiment.experiment(new_experiment=False, ts=str(exp_id))\n room_name = exp.metadata[\"room\"]\n if room_name.lower() == \"cears\":\n room_id = 1\n elif room_name.lower() == \"computer_lab\":\n room_id = 0\n\n devices = self.rooms[room_id][\"devices\"]\n devices.sort()\n\n fname = str(frame_id) + \".png\"\n\n l_camera_name = os.path.basename(devices[int(left_cam_id)])\n r_camera_name = os.path.basename(devices[int(right_cam_id)])\n\n path_l = os.path.join(self.um.experiment_path(exp_id), \"raw\", l_camera_name, fname) # noqa: E501\n\n path_r = os.path.join(self.um.experiment_path(exp_id), \"raw\", r_camera_name, fname) # noqa: E501\n\n frame_l = cv2.imread(path_l, )\n frame_r = cv2.imread(path_r, )\n\n retval = True\n\n if frame_l is None:\n frame_l = cv2.imread(\"flask_app/static/task.jpg\")\n ret_val = False\n\n if frame_r is None:\n frame_r = cv2.imread(\"flask_app/static/task.jpg\")\n ret_val = False\n\n pattern_shape = (9, 6) # TODO: CHECK THIS\n grid_size = 30 # TODO: CHECK THIS\n text_l, corners_l, canvas_l, R_l, t_l = computer_vision.calculate_camera_pose(frame_l, self.K, self.d, pattern_shape=pattern_shape, grid_size=grid_size) # noqa: E501\n text_r, corners_r, canvas_r, R_r, t_r = computer_vision.calculate_camera_pose(frame_r, self.K, self.d, pattern_shape=pattern_shape, grid_size=grid_size) # noqa: E501\n \n T_l = np.eye(4)\n T_r = np.eye(4)\n if R_l is not None and R_r is not None:\n T_l[0:3, 0:3] = R_l\n T_r[0:3, 0:3] = R_r\n T_l[0:3, 3] = t_l.ravel()\n T_r[0:3, 3] = t_r.ravel()\n\n T_l = np.eye(4)\n T_r = np.linalg.inv(T_l).dot(T_r)\n\n print T_l\n print T_r\n canvas = np.hstack([canvas_l, canvas_r])\n\n text = \"Left: \" + text_l + \" Right: \" + text_r\n cv2.putText(canvas, text, (20, 40), cv2.FONT_HERSHEY_SIMPLEX,\n 1.0, (0, 0, 255), lineType=cv2.LINE_AA)\n\n ret, jpeg = cv2.imencode('.jpg', canvas)\n\n if ret:\n img = jpeg.tobytes()\n else:\n pass\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/png;base64,\\r\\n\\r\\n' + img + b'\\r\\n')", "def get_bounding_boxes(vehicles, camera):\n '''\n bounding_boxes = np.array([ClientSideBoundingBoxes.get_bounding_box(vehicle, camera) for vehicle in vehicles])\n # filter objects behind camera\n inscene_indices = np.array([1 if all(bb[:, 2]) > 0 else 0 for bb in bounding_boxes])\n inscene_indices = np.argwhere(inscene_indices == 1)\n bounding_boxes_visible = [bb for ]\n rotations = [vehicle.get_transform().rotation for vehicle in vehicles]\n rotation_decomposed = np.array([[rotation.yaw, rotation.roll, rotation.pitch] for rotation in rotations])\n rotations_visible = rotation_decomposed[inscene_indices]\n return bounding_boxes_visible, rotations_visible\n '''\n bounding_boxes = [ClientSideBoundingBoxes.get_bounding_box(vehicle, camera) for vehicle in vehicles]\n # filter objects behind camera\n rotations = []\n for vehicle in vehicles:\n transform = vehicle.get_transform()\n rotation = transform.rotation\n rotations.append([rotation.yaw, rotation.roll, rotation.pitch])\n inscene_indices = []\n for i in range(len(bounding_boxes)):\n bb = bounding_boxes[i]\n if all(bb[:, 2] > 0):\n inscene_indices.append(1)\n else:\n inscene_indices.append(0) \n inscene_boxes = [bounding_boxes[i] for i in range(len(bounding_boxes)) if inscene_indices[i] == 1]\n return inscene_boxes, np.array(rotations)[inscene_indices]", "def getLocalTransform(animation, jointlock, frame, pointpos, handthick = 3.5):\n jointTransform = jointlock.getGlobalTransform(frame)\n if handthick:\n jointPosition = np.dot(jointTransform,[0,0,0,1])[:-1]\n #Find parametric equation to remove hands surface (hand thickness)\n vec = jointPosition - pointpos\n distance = np.linalg.norm(vec)\n t = handthick/distance\n position = pointpos + t*vec\n else:\n position = pointpos\n jointInverse = mathutils.inverseMatrix(jointTransform)\n globalTransform = mathutils.matrixTranslation(position[0], position[1], position[2])\n localTransform = np.dot(jointInverse, globalTransform)\n return localTransform", "def get_camera_stationary_frame_difference_skeleton(threshold, sampling_rate):\n return {\n 'result': [],\n 'hyperparameters': {\n 'threshold': threshold,\n 'sampling_rate': sampling_rate\n }\n }", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n\n origin = np.array([location.x, location.y, location.z])\n return matrix, origin", "def proj_to_velo(calib_data):\n rect = calib_data[\"R0_rect\"].reshape(3, 3)\n velo_to_cam = calib_data[\"Tr_velo_to_cam\"].reshape(3, 4)\n inv_rect = np.linalg.inv(rect)\n inv_velo_to_cam = np.linalg.pinv(velo_to_cam[:, :3])\n return np.dot(inv_velo_to_cam, inv_rect)", "def video(perspective_matrix_path, source=\"cam\", save=False, save_path=None, file_name=\"out\", cam_cal=None):\n if not os.path.isfile(perspective_matrix_path):\n raise FileNotFoundError(\"Path to perspective matrix file not exist!\")\n\n with open(perspective_matrix_path, \"rb\") as p:\n perspective_matrix = pickle.load(p)\n M = perspective_matrix[\"M\"]\n Minv = perspective_matrix[\"Minv\"]\n\n if source == \"cam\":\n cap = cv2.VideoCapture(0)\n else:\n if not os.path.isfile(source):\n raise FileNotFoundError(source, \" not Exist!\")\n cap = cv2.VideoCapture(source)\n\n # camera calibration parameters [ mtx , dist]\n mtx = None\n dist = None\n\n out = None\n if save:\n if not os.path.isdir(save_path):\n raise FileNotFoundError(save_path, \" Not Exist!\")\n file_name += \".mp4\"\n out = cv2.VideoWriter(save_path + file_name, -1, 20, (int(cap.get(3)), int(cap.get(4))))\n\n if cam_cal:\n if not os.path.isfile(cam_cal):\n raise FileNotFoundError(cam_cal, \" Not Exist!\")\n\n with open(cam_cal, \"rb\") as p:\n calibration = pickle.load(p)\n mtx = calibration[\"mtx\"]\n dist = calibration[\"dist\"]\n\n left_line = Line(5)\n right_line = Line(5)\n\n while True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n if not ret:\n print(\"Finished..\")\n sys.exit(0)\n\n # cv2 read frame as BGR, convert it to RGB\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n # camera calibration\n if not (mtx is None or dist is None):\n frame = cv2.undistort(frame, mtx, dist, None, mtx)\n\n # get edges in image\n edges = apply_edge_detection(frame)\n\n # transform image to bird view\n warped = warped_img(edges, M)\n\n # init out image which will draw lane line on it then weight it with original frame\n out_img = np.zeros_like(warped)\n if len(warped.shape) == 3 and warped.shape[2] == 3:\n pass\n else:\n out_img = np.dstack((out_img, out_img, out_img))\n\n # if line not detected, apply sliding window\n if not left_line.detected or not right_line.detected:\n leftx, lefty, rightx, righty = sliding_window(warped, 9, 200)\n\n # if already detected apply search around detected line\n else:\n leftx, lefty = search_around_poly(left_line, warped)\n rightx, righty = search_around_poly(right_line, warped)\n\n # will used for plotting line, find x fitted\n ploty = np.linspace(warped.shape[0] // 4, warped.shape[0] - 1, warped.shape[0])\n\n # check if at least 100 pixels detected as line\n if len(leftx) > 100 and len(rightx) > 100:\n\n # make detected flag true\n left_line.detected = True\n right_line.detected = True\n\n left_line.current_x = leftx\n left_line.current_y = lefty\n\n right_line.current_x = rightx\n right_line.current_y = righty\n\n left_line.fit_polynomial(ploty)\n right_line.fit_polynomial(ploty)\n\n else:\n print(\"Line not detected in this frame \")\n # we just draw line form previous frame\n\n # make detected flag true\n left_line.detected = False\n right_line.detected = False\n\n # update Lane line radius\n left_line.radius()\n right_line.radius()\n\n # avg radius of to lines, and plot it\n radius = (left_line.radius_of_curvature + right_line.radius_of_curvature) // 2\n frame = write_text(frame, \"Radius of Curvature = \" + str(radius) + \" M\", pos=(20, 50))\n\n # calculate Alignment ( how much car away from center between Lane lines\n dir = \"Left\" # car far from left or right\n\n left_line.car_offset(frame.shape) # distance from left line\n right_line.car_offset(frame.shape) # distance from right line\n\n distance = round(right_line.line_base_pos - left_line.line_base_pos, 2)\n\n if distance < 0: # car far away from left line not right line\n distance = -distance\n dir = \"Right\"\n frame = write_text(frame, \"Vehicle is {}m {} of center\".format(distance, dir), pos=(20, 80))\n\n # ** plot lane lines on image **\n # left_line.draw_line(out_img, ploty)\n # right_line.draw_line(out_img, ploty)\n\n # color pixel which belong to lane lines\n left_line.color_pixel(out_img, (255, 0, 0))\n right_line.color_pixel(out_img, (255, 100, 0))\n\n # fit green triangle in area between lane lines\n pts_left = np.array([np.transpose(np.vstack([left_line.bestx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_line.bestx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(out_img, np.int_([pts]), (0, 255, 0))\n\n # return image to normal view from bird view\n out_img_undit = warped_img(out_img, Minv)\n\n # weight out_image_undit with original frame\n frame = cv2.addWeighted(out_img_undit, 0.5, frame, 1, 0)\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n cv2.imshow(\"frame\", frame)\n\n # write video\n if save:\n out.write(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()", "def base_projection_matrix(self, fiber):\n return matrix(ZZ, fiber.vertices()).right_kernel_matrix()", "def get_world_rot(m_obj):\n plug = get_world_matrix_plug(m_obj, 0)\n matrix_obj = plug.asMObject()\n matrix_data = oMa.MFnMatrixData(matrix_obj)\n matrix = matrix_data.matrix()\n\n trans_matrix = oMa.MTransformationMatrix(matrix)\n rot = trans_matrix.rotation()\n\n return rot", "def transform(orbit, frame_orig, frame_dest):\n\n orbit_orig = frame_orig(\n x=orbit.r[0],\n y=orbit.r[1],\n z=orbit.r[2],\n v_x=orbit.v[0],\n v_y=orbit.v[1],\n v_z=orbit.v[2],\n representation=CartesianRepresentation,\n differential_type=CartesianDifferential,\n )\n\n orbit_dest = orbit_orig.transform_to(frame_dest(obstime=orbit.epoch))\n orbit_dest.representation = CartesianRepresentation\n\n return Orbit.from_vectors(\n orbit.attractor,\n orbit_dest.data.xyz,\n orbit_dest.data.differentials[\"s\"].d_xyz,\n epoch=orbit.epoch,\n )", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def copy(self):\n return CameraExtrinsic(self.position, self.direction, self.up)", "def __parseFrameSpherical(self):\n print(\"pointcloud: starting the frame parsing\")\n pointArray = []\n for (y, row) in enumerate(self.frame):\n #trim hfov\n pixelWidth = len(row)\n deltaAngle = self.rawhfov - self.desiredhfov\n pixelsToRemove = pixelWidth / deltaAngle \n row = row[int(ceil(pixelsToRemove/2)) : pixelWidth-int(ceil(pixelsToRemove/2)) ]\n \n for x, depth in enumerate(row):\n if(depth == 0):\n continue\n width = float(len(row))\n height = float(len(self.frame))\n thetax = (pi/2) - radians((self.rawhfov / width) * x)\n thetay = (pi/2) - radians((self.vfov / height) * y)\n vx = float(cos(thetax) * sin(thetay) * depth)\n vy = float(sin(thetax) * sin(thetay) * depth)\n vz = float(cos(thetay) * depth)\n vector3 = [vx, vy, vz]\n pointArray.append(vector3)\n self.points = pointArray\n print(\"pointcloud: ended the frame parsing\")", "def cam_to_body(self, vector):\n cam2hand = generate_frame_transform(self._cam2hand_t[0:3,:],self._cam2hand_R[0:3,0:3],False)\n # Possibly GRASP specific function?\n hand_pose = baxter.get_right_arm_pose()\n (t,R) = get_t_R(hand_pose)\n hand2body = generate_frame_transform(t[0:3,:],R[0:3,0:3],True)\n return np.dot(hand2body,np.dot(cam2hand,vector))", "def _frame_to_omni_frame_projection(self, cam_rel_poses, cam_rel_mats, uniform_sphere_pixel_coords, cam_coords_f1,\n cam_feat_f1, rel_pose_covs, image_var_f1, holes_prior, holes_prior_var,\n batch_size, num_timesteps, num_cams, image_dims):\n\n # cam 1 to cam 2 coords\n\n cam_coords_f2 = ivy_vision.cam_to_cam_coords(\n ivy_mech.make_coordinates_homogeneous(\n cam_coords_f1, [batch_size, num_timesteps, num_cams] + image_dims), cam_rel_mats,\n [batch_size, num_timesteps, num_cams], image_dims)\n\n # cam 2 to sphere 2 coords\n\n sphere_coords_f2 = ivy_vision.cam_to_sphere_coords(cam_coords_f2)\n image_var_f2 = image_var_f1\n\n # angular pixel coords\n\n # B x N x C x H x W x 3\n angular_pixel_coords_f2 = \\\n ivy_vision.sphere_to_angular_pixel_coords(sphere_coords_f2, self._pixels_per_degree)\n\n # constant feature projection\n\n # B x N x C x H x W x (3+F)\n projected_coords_f2 = ivy.concatenate([angular_pixel_coords_f2] + [cam_feat_f1], -1)\n\n # reshaping to fit quantization dimension requirements\n\n # B x N x (CxHxW) x (3+F)\n projected_coords_f2_flat = \\\n ivy.reshape(projected_coords_f2,\n [batch_size, num_timesteps, num_cams * image_dims[0] * image_dims[1], -1])\n\n # B x N x (CxHxW) x (3+F)\n image_var_f2_flat = ivy.reshape(image_var_f2,\n [batch_size, num_timesteps, num_cams * image_dims[0] * image_dims[1], -1])\n\n # quantized result from all scene cameras\n\n # B x N x OH x OW x (3+F) # B x N x OH x OW x (3+F)\n return ivy_vision.quantize_to_image(\n pixel_coords=projected_coords_f2_flat[..., 0:2],\n final_image_dims=self._sphere_img_dims,\n feat=projected_coords_f2_flat[..., 2:],\n feat_prior=holes_prior,\n with_db=self._with_depth_buffer,\n pixel_coords_var=image_var_f2_flat[..., 0:2],\n feat_var=image_var_f2_flat[..., 2:],\n pixel_coords_prior_var=holes_prior_var[..., 0:2],\n feat_prior_var=holes_prior_var[..., 2:],\n var_threshold=self._var_threshold,\n uniform_pixel_coords=uniform_sphere_pixel_coords,\n batch_shape=(batch_size, num_timesteps),\n dev_str=self._dev_str)[0:2]", "def projection_matrix(self):\n scene = self.figure.scene\n scene_size = tuple(scene.get_size())\n aspect_ratio = float(scene_size[0]) / float(scene_size[1])\n p = scene.camera.get_perspective_transform_matrix(\n aspect_ratio, -1, 1).to_array().astype(np.float32)\n return p", "def get_M(self, theta, phi, gamma, dx, dy, dz):\n w = self.width\n h = self.height\n f = self.focal\n # Projection 2D -> 3D matrix\n A1 = np.array([[1, 0, -w / 2],\n [0, 1, -h / 2],\n [0, 0, 1],\n [0, 0, 1]])\n # Rotation matrices around the X, Y, and Z axis\n RX = np.array([[1, 0, 0, 0],\n [0, np.cos(theta), -np.sin(theta), 0],\n [0, np.sin(theta), np.cos(theta), 0],\n [0, 0, 0, 1]])\n RY = np.array([[np.cos(phi), 0, -np.sin(phi), 0],\n [0, 1, 0, 0],\n [np.sin(phi), 0, np.cos(phi), 0],\n [0, 0, 0, 1]])\n RZ = np.array([[np.cos(gamma), -np.sin(gamma), 0, 0],\n [np.sin(gamma), np.cos(gamma), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n # Composed rotation matrix with (RX, RY, RZ)\n R = np.dot(np.dot(RX, RY), RZ)\n # Translation matrix\n T = np.array([[1, 0, 0, dx],\n [0, 1, 0, dy],\n [0, 0, 1, dz],\n [0, 0, 0, 1]])\n # Projection 3D -> 2D matrix\n A2 = np.array([[f, 0, w / 2, 0],\n [0, f, h / 2, 0],\n [0, 0, 1, 0]])\n # Final transformation matrix\n return np.dot(A2, np.dot(T, np.dot(R, A1)))", "def export_camera(file, scene, global_matrix, render, tab_write):\n camera = scene.camera\n\n # DH disabled for now, this isn't the correct context\n active_object = None # bpy.context.active_object # does not always work MR\n matrix = global_matrix @ camera.matrix_world\n focal_point = camera.data.dof.focus_distance\n\n # compute resolution\n q_size = render.resolution_x / render.resolution_y\n tab_write(file, \"#declare camLocation = <%.6f, %.6f, %.6f>;\\n\" % matrix.translation[:])\n tab_write(\n file,\n (\n \"#declare camLookAt = <%.6f, %.6f, %.6f>;\\n\"\n % tuple(degrees(e) for e in matrix.to_3x3().to_euler())\n ),\n )\n\n tab_write(file, \"camera {\\n\")\n if scene.pov.baking_enable and active_object and active_object.type == \"MESH\":\n tab_write(file, \"mesh_camera{ 1 3\\n\") # distribution 3 is what we want here\n tab_write(file, \"mesh{%s}\\n\" % active_object.name)\n tab_write(file, \"}\\n\")\n tab_write(file, \"location <0,0,.01>\")\n tab_write(file, \"direction <0,0,-1>\")\n\n else:\n if camera.data.type == \"ORTHO\":\n # XXX todo: track when SensorHeightRatio was added to see if needed (not used)\n sensor_height_ratio = (\n render.resolution_x * camera.data.ortho_scale / render.resolution_y\n )\n tab_write(file, \"orthographic\\n\")\n # Blender angle is radian so should be converted to degrees:\n # % (camera.data.angle * (180.0 / pi) )\n # but actually argument is not compulsory after angle in pov ortho mode\n tab_write(file, \"angle\\n\")\n tab_write(file, \"right <%6f, 0, 0>\\n\" % -camera.data.ortho_scale)\n tab_write(file, \"location <0, 0, 0>\\n\")\n tab_write(file, \"look_at <0, 0, -1>\\n\")\n tab_write(file, \"up <0, %6f, 0>\\n\" % (camera.data.ortho_scale / q_size))\n\n elif camera.data.type == \"PANO\":\n tab_write(file, \"panoramic\\n\")\n tab_write(file, \"location <0, 0, 0>\\n\")\n tab_write(file, \"look_at <0, 0, -1>\\n\")\n tab_write(file, \"right <%s, 0, 0>\\n\" % -q_size)\n tab_write(file, \"up <0, 1, 0>\\n\")\n tab_write(file, \"angle %f\\n\" % (360.0 * atan(16.0 / camera.data.lens) / pi))\n elif camera.data.type == \"PERSP\":\n # Standard camera otherwise would be default in pov\n tab_write(file, \"location <0, 0, 0>\\n\")\n tab_write(file, \"look_at <0, 0, -1>\\n\")\n tab_write(file, \"right <%s, 0, 0>\\n\" % -q_size)\n tab_write(file, \"up <0, 1, 0>\\n\")\n tab_write(\n file,\n \"angle %f\\n\"\n % (2 * atan(camera.data.sensor_width / 2 / camera.data.lens) * 180.0 / pi),\n )\n\n tab_write(\n file,\n \"rotate <%.6f, %.6f, %.6f>\\n\" % tuple(degrees(e) for e in matrix.to_3x3().to_euler()),\n )\n\n tab_write(file, \"translate <%.6f, %.6f, %.6f>\\n\" % matrix.translation[:])\n if camera.data.dof.use_dof and (focal_point != 0 or camera.data.dof.focus_object):\n tab_write(\n file, \"aperture %.3g\\n\" % (1 / (camera.data.dof.aperture_fstop * 10000) * 1000)\n )\n tab_write(\n file,\n \"blur_samples %d %d\\n\"\n % (camera.data.pov.dof_samples_min, camera.data.pov.dof_samples_max),\n )\n tab_write(file, \"variance 1/%d\\n\" % camera.data.pov.dof_variance)\n tab_write(file, \"confidence %.3g\\n\" % camera.data.pov.dof_confidence)\n if camera.data.dof.focus_object:\n focal_ob = scene.objects[camera.data.dof.focus_object.name]\n matrix_blur = global_matrix @ focal_ob.matrix_world\n tab_write(file, \"focal_point <%.4f,%.4f,%.4f>\\n\" % matrix_blur.translation[:])\n else:\n tab_write(file, \"focal_point <0, 0, %f>\\n\" % focal_point)\n if camera.data.pov.normal_enable:\n tab_write(\n file,\n \"normal {%s %.4f turbulence %.4f scale %.4f}\\n\"\n % (\n camera.data.pov.normal_patterns,\n camera.data.pov.cam_normal,\n camera.data.pov.turbulence,\n camera.data.pov.scale,\n ),\n )\n tab_write(file, \"}\\n\")", "def _calculate_camera_array(self):\n look_list = []\n\n row_step_vec = normalize(self.look_up) * self.interspatial_distance\n col_step_vec = self._get_look_right() * self.interspatial_distance\n\n # Start at the top left camera position\n for i in range(self.spatial_rows):\n row_movement = row_step_vec * (-i)\n row_look_from = self.look_from + row_movement\n row_look_to = self.look_to + row_movement\n\n for j in range(self.spatial_cols):\n col_movement = col_step_vec * j\n cam_look_from = row_look_from + col_movement\n cam_look_to = row_look_to + col_movement\n\n look_list.append((cam_look_from, cam_look_to))\n\n return look_list", "def publish_camera_frame(self):\n executive.get_camera_orientation()\n self.t.start()\n # Wait for transformation to be published\n rospy.sleep(2)", "def perspective_transform():\n src = np.float32([(220,720), (1110, 720), (570, 470), (722, 470)]) # Manually get these numbers from plot\n dst = np.float32([[320, 720], [920, 720], [320, 1], [920, 1]])\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n return M, Minv", "def _calculate_camera_pose(frame, K, d, corners, pattern_shape=(6, 4), grid_size=30): # noqa: E501\n img = frame.copy()\n axis = np.float32([[grid_size, 0, 0], [0, grid_size, 0],\n [0, 0, -grid_size]]).reshape(-1, 3)*2\n\n objp = np.zeros((np.prod(pattern_shape), 3), np.float32)\n objp[:, :2] = np.mgrid[0:pattern_shape[0],\n 0:pattern_shape[1]].T.reshape(-1, 2) * grid_size\n\n _, rvecs, tvecs = cv2.solvePnP(objp, corners, K, d)\n R, _ = cv2.Rodrigues(rvecs)\n # project 3D points onto image plane\n imgpts, _ = cv2.projectPoints(axis,\n rvecs, tvecs,\n K, d)\n\n canvas = computer_vision.draw_axis(img, corners, imgpts)\n return R, tvecs, canvas", "def query_camera(self):\n ok, orig_pic = self.vs.read() # Read video stream\n if ok: # If no errors\n orig_pic = imutils.rotate(orig_pic, angle=self.camera_rot)\n curr_pic = imutils.resize(orig_pic, width=self.image_width)\n return curr_pic, orig_pic\n else:\n return None, None", "def transMatrix( source=None ):\n if source is None:\n return None,None\n else:\n (x,y,z) = source[:3]\n if x == y == z == 0.0:\n return None, None \n return tmatrixaccel.transMatrix( x,y,z ),tmatrixaccel.transMatrix( -x, -y, -z )" ]
[ "0.653753", "0.6497488", "0.6299928", "0.62356544", "0.61046827", "0.60589045", "0.60228235", "0.5942271", "0.5935986", "0.5927937", "0.5889109", "0.58773947", "0.58653027", "0.5864766", "0.5847944", "0.58465064", "0.5835871", "0.5828304", "0.57876843", "0.5779836", "0.5772962", "0.5770148", "0.5765084", "0.57462746", "0.57462454", "0.5741935", "0.5724905", "0.5662365", "0.5619435", "0.5584499", "0.5577126", "0.55650276", "0.55534947", "0.55207044", "0.5518501", "0.55038434", "0.5487898", "0.5474158", "0.5439808", "0.5418603", "0.5416651", "0.5413313", "0.5397355", "0.53836036", "0.5351097", "0.53421444", "0.533866", "0.5331934", "0.53237665", "0.5323134", "0.53219324", "0.5312476", "0.5303562", "0.5299455", "0.52988386", "0.5289084", "0.52846175", "0.5273258", "0.5264596", "0.5256064", "0.5253539", "0.52508336", "0.5244755", "0.5243597", "0.5228789", "0.5227275", "0.52129406", "0.51838034", "0.51815265", "0.5168", "0.5166467", "0.5160764", "0.5146032", "0.5142636", "0.5139099", "0.51204526", "0.51004064", "0.50978196", "0.50930643", "0.5092066", "0.509138", "0.5083791", "0.5077928", "0.50715184", "0.5070569", "0.50667727", "0.5063481", "0.50499946", "0.5047451", "0.50457287", "0.50439996", "0.50398993", "0.50326836", "0.5026103", "0.5025114", "0.5020177", "0.5016095", "0.49941266", "0.49790922", "0.49757993", "0.49747086" ]
0.0
-1
return enantiomer of self, either D or L
def D_or_L(self) -> str: CO = np.array([self['C'].xyz.x, self['C'].xyz.y, self['C'].xyz.z]) CA = np.array([self['CA'].xyz.x, self['CA'].xyz.y, self['CA'].xyz.z]) CB = np.array([self['CB'].xyz.x, self['CB'].xyz.y, self['CB'].xyz.z]) N = np.array([self['N'].xyz.x, self['N'].xyz.y, self['N'].xyz.z]) v1 = N - CO v2 = CA - CO cp = np.cross(v1, v2) CB_infront = cp.dot(CB-CA) > 0 print(CB_infront) return 'D' if CB_infront else 'L'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def L(self):\n if not self.isVaild():\n pass\n return self.Lq() + self.r()", "def exterior_der(self):\n from utilities import format_unop_txt, format_unop_latex\n if self._exterior_derivative is None:\n vmodule = self._vmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n resu = vmodule.alternating_form(self._tensor_rank+1, name=rname, \n latex_name=rlname)\n for dom, rst in self._restrictions.iteritems():\n resu._restrictions[dom] = rst.exterior_der()\n self._exterior_derivative = resu\n return self._exterior_derivative", "def L(self):\n return self._L", "def L(self):\n return self.__L", "def L(self):\n return self.__L", "def L(self):\n return self.__L", "def L(self):\n return self.__L", "def getL(self):\r\n return self.L", "def LDLT(self):\n\t\tpass", "def lform(self):\n a, c, d, b = self.to_ccw()\n if b < c:\n a += b\n b -= b\n c -= b\n d += b\n else:\n a += c\n b -= c\n c -= c\n d += c\n return self.__class__.from_ccw(a, c, d, b)", "def dilemma(self, disjunction):\n\t\tinterp1 = _match(((A, impl, B), conj, (C, impl, D)), self)\n\t\tinterp2 = _match((A, disj, C), disjunction)\n\t\tif interp1[A] == interp2[A] and interp1[C] == interp2[C]:\n\t\t\treturn self.__class__((interp1[B], disj, interp1[D]))\n\t\telse:\n\t\t\traise LogicException()", "def Dang_l(self):\n return self._Dang_l", "def either(self):\n return self._v", "def dual(self):\n letter = self.letter()\n # the self-dual cases\n if letter != 'BC' and letter[0] in ['B','C']:\n if letter == 'BB': letter = 'CC'\n elif letter == 'CC': letter = 'BB'\n elif letter[0] == 'B': letter = 'C' + letter[1:]\n elif letter[0] == 'C': letter = 'B' + letter[1:]\n rank = self._rank\n if self.is_affine():\n rank -= 1\n twist = self._twist\n return QuiverMutationType(letter,rank,twist)\n # the cases F and G have non-trivial duality in some cases\n elif letter in ['F','G']:\n if self.is_finite(): return self\n elif self.is_affine():\n rank = self._rank - 1\n twist = - self._twist\n elif self.is_elliptic():\n twist = self._twist\n rank = self._rank - 2\n if letter == 'F':\n if self._twist == [2,2]:\n twist == [1,1]\n if self._twist == [1,1]:\n twist == [2,2]\n if letter == 'G':\n if self._twist == [3,3]:\n twist = [1,1]\n elif self._twist == [1,1]:\n twist = [3,3]\n else: rank = self._rank\n return QuiverMutationType(letter,rank,twist)\n else:\n return self", "def ewald(self):\n return self._ewald", "def getD(self):\r\n return self.D", "def _repr_(self):\n return \"Lie algebra %s over %s\" % (self._classification,\n self.base_ring())", "def right(self):\n return self.__r", "def __le__(self, other):\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._le_1d(other=other)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._le_2d(other=other)", "def get_E(self):\r\n return self.E", "def right(self):\n return self.r", "def l_un_degenerate(self):\n self.right = self.tmp", "def __arb__(self):\n if self.tree.total < 1:\n return None\n if self.tree.total % 2 == 0:\n return self.first()\n else:\n return self.last()", "def d(self):\n pass", "def d(self):\n pass", "def __aiter__(self):\n return self", "def one_sided(self):\n return self._one_sided", "def disagreement(self):\n return 0.5*(np.dot(np.dot(np.transpose(self.x),self.L),self.x)).item(0)", "def __invert__(self):\n return type(self)(self.parent(),\n self._simplify(SR.one() / self._express))\n # NB: self._express.__invert__() would return 1/self._express\n # (cf. the code of __invert__ in src/sage/symbolic/expression.pyx)\n # Here we prefer SR(1)/self._express", "def get_right(self):\r\n if getattr(self, '_right') is None:\r\n warnings.warn('Hmmm... I will use \"call\" right, since you did not provide any', UserWarning)\r\n self._right = 'call'\r\n\r\n return self._right", "def _der(self, x):\n return self._evalOrDer(x, False, True)[0]", "def get_E(self):\n return self.E", "def leverancier(self):\n return self._leverancier.get_waarde()", "def l(self):\n return self._l", "def lro(self) -> global___Snippet.Lro:", "def solveL(self,level=-1) :\n for g in self.L() :\n if level >= 0 :\n printIndent('%s' % (g,),level=level)\n print \n if g.solveR(level=(level+1) if level>=0 else level) is None : \n return g\n return None", "def __invert__(self):\n return self.obj", "def fodder(self):\n return self._fodder", "def exterior_der(self):\n from sage.calculus.functional import diff\n from utilities import format_unop_txt, format_unop_latex\n from sage.tensor.modules.comp import CompFullyAntiSym\n from vectorframe import CoordFrame\n if self._exterior_derivative is None:\n # A new computation is necessary:\n fmodule = self._fmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n self._exterior_derivative = DiffFormParal(fmodule, \n self._tensor_rank+1, \n name=rname, \n latex_name=rlname)\n # 1/ List of all coordinate frames in which the components of self\n # are known\n coord_frames = []\n for frame in self._components:\n if isinstance(frame, CoordFrame):\n coord_frames.append(frame)\n if coord_frames == []:\n # A coordinate frame is searched, at the price of a change of\n # frame, priveleging the frame of the domain's default chart\n dom = self._domain\n def_coordf = dom._def_chart._frame\n for frame in self._components:\n if (frame, def_coordf) in dom._frame_changes:\n self.comp(def_coordf, from_basis=frame)\n coord_frames = [def_coordf]\n break\n if coord_frames == []:\n for chart in dom._atlas:\n if chart != dom._def_chart: # the case def_chart is treated above\n coordf = chart._frame\n for frame in self._components:\n if (frame, coordf) in dom._frame_changes:\n self.comp(coordf, from_basis=frame)\n coord_frames[coordf]\n break\n if coord_frames != []:\n break \n # 2/ The computation:\n for frame in coord_frames:\n chart = frame._chart\n sc = self._components[frame]\n dc = CompFullyAntiSym(fmodule._ring, frame, \n self._tensor_rank+1, \n start_index=fmodule._sindex,\n output_formatter=fmodule._output_formatter)\n for ind, val in sc._comp.iteritems():\n for i in fmodule.irange():\n ind_d = (i,) + ind\n if len(ind_d) == len(set(ind_d)): \n # all indices are different\n dc[[ind_d]] += \\\n val.function_chart(chart).diff(i).scalar_field()\n self._exterior_derivative._components[frame] = dc\n return self._exterior_derivative", "def get_right(self):\n return self.__right", "def wedge(self, other):\n from sage.tensor.modules.free_module_alt_form import FreeModuleAltForm\n from sage.tensor.modules.format_utilities import is_atomic\n if self._domain.is_subdomain(other._domain):\n if not self._ambient_domain.is_subdomain(other._ambient_domain):\n raise TypeError(\"Incompatible ambient domains for exterior \" + \n \"product.\")\n elif other._domain.is_subdomain(self._domain):\n if not other._ambient_domain.is_subdomain(self._ambient_domain):\n raise TypeError(\"Incompatible ambient domains for exterior \" + \n \"product.\")\n dom_resu = self._domain.intersection(other._domain)\n ambient_dom_resu = self._ambient_domain.intersection(\n other._ambient_domain)\n self_r = self.restrict(dom_resu)\n other_r = other.restrict(dom_resu)\n if ambient_dom_resu.is_manifestly_parallelizable():\n # call of the FreeModuleAltForm version:\n return FreeModuleAltForm.wedge(self_r, other_r)\n # otherwise, the result is created here:\n if self._name is not None and other._name is not None:\n sname = self._name\n oname = other._name\n if not is_atomic(sname):\n sname = '(' + sname + ')'\n if not is_atomic(oname):\n oname = '(' + oname + ')'\n resu_name = sname + '/\\\\' + oname\n if self._latex_name is not None and other._latex_name is not None:\n slname = self._latex_name\n olname = other._latex_name\n if not is_atomic(slname):\n slname = '(' + slname + ')'\n if not is_atomic(olname):\n olname = '(' + olname + ')'\n resu_latex_name = slname + r'\\wedge ' + olname\n dest_map = self._vmodule._dest_map\n dest_map_resu = dest_map.restrict(dom_resu, \n subcodomain=ambient_dom_resu)\n vmodule = dom_resu.vector_field_module(dest_map=dest_map_resu)\n resu_degree = self._tensor_rank + other._tensor_rank\n resu = vmodule.alternating_form(resu_degree, name=resu_name, \n latex_name=resu_latex_name)\n for dom in self_r._restrictions:\n if dom in other_r._restrictions:\n resu._restrictions[dom] = self_r._restrictions[dom].wedge(\n other_r._restrictions[dom])\n return resu", "def __or__(self, other):\n raise NotImplementedError(\"failed monkey-patch: material stacker needs\"\n \" to replace __or__ in Scatterer\")", "def getLEq(self):\r\n return self.lEq;", "def left(self):\n return self.l", "def simplify(self):\n return self", "def inner_endomorphism(self, multiplier):\n return InnerEndomorphism(self.domain, multiplier)", "def __repr__(self):\n\n return f\"Legislator: {self.full_name} party: {self.party}\"", "def __call__(self):\r\n return self[-1]", "def DOR(self):\n a, c, d, b = self.to_ccw()\n ad, bc = a * d, b * c\n return _div(ad, bc)", "def __call__(self):\n return self._left() + self._right()", "def get_right(self):\n return self.right", "def __invert__(self):\r\n if self.field.characteristic == 2:\r\n return runtime.invert(self)\r\n\r\n return super().__invert__()", "def pillar_e_room(self):\r\n return self.__pillar_e", "def related(self):\n return self.__class__ != Rol and self or getattr(self, self.get_tipo_display())", "def __le__(self, other):\n return _generate_relational_expression(_le, self, other)", "def E(self):\n return self._E", "def E(self):\n return self._E", "def __repr__(self):\n return self.__dado", "def a_realization(self):\n if self.t==1:\n return self.kmonomial()\n else:\n return self.kHallLittlewoodP()", "def one(self):\n return self.a_realization().one()", "def __invert__(self):\r\n return 1 - self", "def E_local(Walker):\n return Local_Kinetic(Walker)+potential(Walker)", "def _getLilyAccidental(self):\n return \"\"", "def joined(self):\n joined = self._joined\n if self._joined is None and self._joined_function is not None:\n joined = self._joined = self._joined_function()\n\n if joined is None:\n if self.path_is_string:\n joined = self._joined = self.path\n else:\n joined = self._joined = dot_joiner(self.path, self.path_type)\n return joined", "def lsd(self):\n return self._lsd", "def tag(self):\n \n tag = super(self.__class__, self).tag();\n tag = als.tag_join(tag, als.stra(self.strain));\n tag = als.tag_join(tag, als.stra(self.dtype));\n tag = als.tag_join(tag, 'w=%s' % als.stra(self.wid)); \n tag = als.tag_join(tag, 's=%s' % als.stra(self.stage));\n #tag = analysis.tag_join(tag, 'l=%s' % analysis.stra(self.label)); \n\n return tag;", "def _merge_type(self):\n if self._allow_solar_only and self._allow_wind_only:\n return 'outer'\n elif self._allow_solar_only and not self._allow_wind_only:\n return 'left'\n elif not self._allow_solar_only and self._allow_wind_only:\n return 'right'\n return 'inner'", "def slp(self):\n return self.elevcomp", "def get_left(self):\n return self.__left", "def modus_ponens(self, left_side):\n\t\tchecked_proposition(left_side)\n\t\tinterp = _match((A, impl, B), self)\n\t\tif interp[A] == left_side:\n\t\t\treturn self.__class__(interp[B])\n\t\traise LogicException()", "def selfie(f):\n return f.__get__(f, type(f))", "def _as_rhs(self):\n raise NotImplementedError", "def equidistant(self):\n return self._equidistant", "def playL(self,level=-1) :\n g = None\n if level >= 0 :\n level += 1\n for g in self.L() :\n if level>=0 :\n printIndent('%s' % (g,),level=level)\n print \n if g.playR(level) == None and not g.Tie() :\n return g\n for g in self.L() :\n if g.playR(level) == None and g.Tie() :\n return g\n return g", "def __add__(self, right):\n # TODO: move over to any coercion model!\n if not isinstance(right, MatrixMorphism):\n R = self.base_ring()\n return self.parent()(self.matrix() + R(right))\n if not right.parent() == self.parent():\n right = self.parent()(right)\n M = self.matrix() + right.matrix()\n return self.domain().Hom(right.codomain())(M)", "def to_adjective(self):\n return None", "def right(self):\n\t\treturn self._right", "def __le__(self, other):\n return self.master.abs2phy(pos=other)", "def __le__(self, other):\n if self is other:\n return True\n if not isinstance(other, UniSet):\n other = self.fam.c_uniset(other)\n return self.fam.c_le(self, other)", "def __or__(self, other):\n if is_FiniteStateMachine(other):\n return self.disjoint_union(other)\n else:\n raise TypeError(\"Can only add finite state machine\")", "def L(self) -> float:\n return self._L", "def express(self):\n raise NotImplementedError", "def get_shortening_and_split_point(self):\n\n def get_shortening_type():\n \"\"\"Vrati Type of Lexical Shortening alebo None.\"\"\"\n if self.len_fn(self.sw) == self.get_length(): # self.len_fn je obycajne len\n return 'FSW'\n elif self.alignment.sw_idx == 0:\n return 'RS'\n elif self.alignment.sw_idx + len(self.alignment.generate()) == len(self.sw):\n return 'LS'\n\n shortening = get_shortening_type()\n\n if shortening not in ('RS', 'LS'):\n return shortening, None\n\n split_interface = {\n 'RS': (self.alignment.score - 1, self.alignment.score),\n 'LS': (self.alignment.sw_idx - 1, self.alignment.sw_idx)\n }[shortening]\n\n # ohodnotit rozhranie\n # ci je slabikove\n if split_interface in get_interfaces(self.sw_str):\n return shortening, 'syllable'\n\n types = [Phones.get_type(self.sw[idx]) for idx in split_interface]\n\n if types == ['c', 'v']:\n return shortening, 'onset-nucleus' # consonant - vowel\n elif types == ['v', 'c']:\n return shortening, 'nucleus-coda' # vowel - consonant\n else:\n return shortening, None", "def __pout__(self):\n return self.__str__()", "def get_right(self):\n return BinaryNode.or_none(self.right)", "def downlinker(self):\n return self.__downlinker", "def narration_self(self):\n pass", "def __rmul__(self, other):\r\n if isinstance(other, tuple):\r\n return self.transform_point(other)\r\n if isinstance(other, LinearTransformation):\r\n return self.right_composition(other)\r\n else:\r\n raise NotImplementedError", "def right(self, p):\n raise NotImplementedError('must be implemented by subclass')", "def __str__(self):\n pos = self._pos + 1\n if len(self._refr) > len(self._alt):\n dellength = len(self._refr) - len(self._alt)\n return '{:s}:{:d}:{:d}D'.format(self._seqid, pos, dellength)\n else:\n insertion = self._alt[1:]\n return '{:s}:{:d}:I->{:s}'.format(self._seqid, pos, insertion)", "def either(self):\n return self.v.index", "def get_difficulte(self):\n return self.difficulte", "def _successor(self):\n if self.right is None:\n # get first rightward ancestor\n m = self\n n = m.parent\n while n is not None and m is n.right:\n m = n\n n = n.parent\n else:\n # get leftmost of right child\n n = self.right\n while n.left is not None:\n n = n.left\n return n", "def get_val_outer(cls, *args, **kwargs):\n return NotImplemented", "def get_E(self):\r\n return self.Real.E, self.Ideal.E", "def get_E(self):\r\n return self.Real.E, self.Ideal.E", "def __le__(self, *args):\n return _ida_hexrays.ccase_t___le__(self, *args)", "def get_aligner_edge(self):\n\n return self._aligner_edge", "def right(self) -> Optional[\"ExpressionNode\"]:\n return self.__right", "def normalize(self):\r\n return self" ]
[ "0.58051234", "0.5680773", "0.5520372", "0.5517061", "0.5517061", "0.5517061", "0.5517061", "0.54927427", "0.53145987", "0.53131396", "0.53023165", "0.52391076", "0.51286054", "0.5116414", "0.5094388", "0.5080737", "0.50450814", "0.50200135", "0.4995237", "0.4990676", "0.49585143", "0.495757", "0.49496916", "0.4949266", "0.4949266", "0.49331263", "0.49173918", "0.4904543", "0.48959923", "0.48696694", "0.4866807", "0.484898", "0.48487023", "0.484504", "0.48406687", "0.48325294", "0.48183498", "0.48130405", "0.4809159", "0.47994956", "0.4788736", "0.47742686", "0.47682858", "0.47649083", "0.47558814", "0.47510013", "0.4746676", "0.4745134", "0.47420457", "0.47355372", "0.4733411", "0.4730426", "0.4729896", "0.4726305", "0.47185355", "0.4717856", "0.4717856", "0.47001714", "0.4697558", "0.46903816", "0.46821204", "0.46715882", "0.46693975", "0.46690753", "0.4657426", "0.46558982", "0.46468335", "0.46397272", "0.46358076", "0.4629119", "0.46290645", "0.4627924", "0.46272817", "0.46217936", "0.4617234", "0.4617076", "0.4615747", "0.46139702", "0.46105495", "0.46066588", "0.46031442", "0.46029815", "0.45997778", "0.45931652", "0.45929706", "0.45904127", "0.45847973", "0.45778957", "0.4569975", "0.4567407", "0.456434", "0.456424", "0.4563033", "0.45582944", "0.45568603", "0.45568603", "0.45567557", "0.4554619", "0.4548449", "0.45436767" ]
0.5174654
12
removes all Hydrogen atoms from instance
def remove_hydrogens(self) -> None: for cid, c in self: for rid, r in c: for aid, a in r: if a.element == 'H': print('removing H at %s' % aid) r.remove_atom(a)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self):\n del self.shx.atoms[self.index]", "def strip(self):\n types = [type(self.strip),\n type(self.values),\n type(self.__ne__),\n type(self.__class__)]\n\n for attr in dir(self):\n if not type(getattr(self, attr)) in types:\n if any(i in attr for i in self.keep) or attr[0:2] == '__':\n continue\n else:\n x = getattr(self, attr)\n del x\n for molecule in self.values():\n molecule.strip_molecule(self.keep)\n exit()", "def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")", "def removeDoubleUnbondedAtoms (self):\r\n atomsToRemove = [] # Stores index of atoms we will need to remove\r\n \r\n # Go through each mol\r\n for i in range(len(self.mol)):\r\n # Atom is disconnected if number of unbonded spikes is equal to the number of spikes in the atom\r\n numUnbondedSpikes = 0\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == False:\r\n # Spike not bonded so increment counter\r\n numUnbondedSpikes += 1\r\n # If atom disconnected then need to check to see if dangling nodes or tails are bonded\r\n if numUnbondedSpikes == len(self.mol[i].spikeArray):\r\n print (\"Atom: \" + str(self.mol[i].rbnNumber) + \" is being removed \\n\")\r\n anyBondedDanglingNodes = False\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.isUnbondedAtomConnected(self.mol[i].spikeArray[j]) == True:\r\n anyBondedDanglingNodes = True\r\n # If atom has connected dangling nodes then need to convert atom to metaAtom, add metaAtom to metaMolecule and\r\n # remove atom from ring\r\n if anyBondedDanglingNodes == True:\r\n print (\"A new metaAtom is being created \\n\")\r\n newMetaAtom = self.convertUnbondedAtomToMetaAtom(self.mol[i])\r\n self.metaMolecule.addMetaAtom(newMetaAtom)\r\n atomsToRemove.append(i)\r\n \r\n # Now need to remove atoms\r\n print (\"Length of ring before removal: \" + str(len(self.mol)) + \"\\n\")\r\n for i in range(len(atomsToRemove)):\r\n self.mol.pop(atomsToRemove[i])\r\n print (\"Length of ring after removal: \" + str(len(self.mol)) + \"\\n\")\r\n # Finally need to update metaMolecule with new mol \r\n self.metaMolecule.updateListMols(self)", "def _removeOcean(self):\r\n\t\tnodesToClean = [CONST.OCEANDISPSHADER, CONST.OCEANANIMSHADER, CONST.OCEAN_ANIM_PREVIEWPLANENAME]\r\n\t\tfor eachNode in nodesToClean:\r\n\t\t\ttry:\r\n\t\t\t\tcmds.delete(each)\r\n\t\t\texcept:\r\n\t\t\t\tpass", "def remove_dummy(self) -> None:\n\n for i, atom in enumerate(self):\n if isinstance(atom, DummyAtom):\n del self[i]\n return", "def remove(self, atom):\n try:\n self.hutch.remove_atom(atom)\n except:# AttributeError or ValueError:\n pass\n self.atoms.remove(atom)\n self.natoms -= 1\n self.atomtypes[atom.z] -= 1", "def destroy(self):\n self.remove()\n for inst in reversed(self.insts[:]):\n uses = inst.uses()\n for tmp_inst in uses:\n if tmp_inst.op_name == 'OpPhi':\n IRError('Not implemented: remove from phi node') # XXX\n inst.destroy()\n self.module = None", "def reset():\n for hist in (\"Epair_Etagm\", \"Etagm_Epair\", \"ttagm_pair\", \n \"Epair_Etagm_fit\", \"dEpair_Etagm_fit\"):\n h = ROOT.gROOT.FindObject(hist)\n if h:\n h.Delete()", "def delX(self):\n del self.components[0]", "def delX(self):\n del self.components[0]", "def remove():", "def _removeFX(self):\r\n\t\tnodesToClean = [CONST.FOAM_FLUID_SHAPENODE, CONST.WAKE_FLUID_SHAPENODE, 'fluids_hrc']\r\n\t\tfor eachNode in nodesToClean:\r\n\t\t\ttry:\r\n\t\t\t\tcmds.delete(each)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\tfor eachCache in cmds.ls(type = 'cacheFile'):\r\n\t\t\tcmds.delete(eachCache)", "def remove(self):", "def destroyGlobalNuclides():\n global instances\n global byName\n global byDBName\n global byLabel\n global byMcc2Id\n global byMcc3Id\n global byMcnpId\n global byAAAZZZSId\n\n instances = []\n byName.clear()\n byDBName.clear()\n byLabel.clear()\n byMcc2Id.clear()\n byMcc3Id.clear()\n byMcnpId.clear()\n byAAAZZZSId.clear()", "def __call__(self, mol):\n return self.remove(mol)", "def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)", "def remove_atom(atom_list, atom):\n del atom_list[atom.atom_number - 1]\n del atom\n return atom_list", "def clear(self):\n\n\t\tself.atomid = []\n\t\tself.resi = []\n\t\tself.resn = []\n\t\tself.atom = []\n\t\tself.element = []\n\t\tself.chain = []\n\t\tself.type = []\n\t\tself.inverted = False\n\t\tself.atomlist = []\n\t\tself.keeplist = []\n\t\tself.macros = []\n\n\t\tself.invresi = False\n\t\tself.invresn = False\n\t\tself.invatom = False\n\t\tself.invelement = False\n\t\tself.invchain = False\n\t\tself.invtype = False\n\t\tself.invatomid = False", "def cleanup(self):\n for key in list(self.__dict__.keys()):\n delattr(self, key)", "def purgeHis(atoms):\n for a in atoms:\n if getAtype(a) == \"N\" or getAtype(a) == \"NA\":\n found = 0\n for c in atoms:\n if not c == a and dist(c,a) < COVALENT_BOND_DIST:\n found = 1\n break\n if not found:\n atoms.remove(a)\n return atoms\n if DEBUG: print \"Warning! Residue %s appears to be incomplete\" % (atoms[0][17:20]+atoms[0][22:26]+atoms[0][21])\n return False", "def removeMySims(self):\n for sim in self.sims:\n try:\n sim.destroy()\n except:\n sim.removeNode()", "def clean_copy(self):\n # this is a stub implementation\n #return Molecule(\"H2O\")\n m = self._gettokens()\n for t in self._gettokens():\n #if there is value errors or key errors, remove the invalid tokens\n if (t.isalpha() and t not in _atomic_mass) or (t not in \"()\" and not t.isalnum()):\n m.remove(t)\n str2 = \"\".join(m) \n return Molecule(str2)", "def clean(self):\n for i in self.winfo_children():\n i.destroy()", "def clear_quantities(shared):\n\n del_list = [i for i, fm in enumerate(shared.field_mappings)\n if fm.extra is not None]\n for index in reversed(del_list):\n del shared.field_mappings[index]\n\n shared.config.remove_section('extra')\n shared.config.add_section('extra')", "def __del__(self):\n\n # Base class destructor is called ?? needed\n sim.Simulation.__del__(self)\n\n if self.verbose:\n print \"Cleaning derived simulation object LAMMPS1\"\n\n del self.pairCoeffDct\n del self.bondCoeffDct", "def remove_atom(self, atom):\n assert atom.altloc == self\n del self[atom.alt_loc]\n atom.altloc = None", "def delete():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # The interatomic data.\n for interatom in interatomic_loop():\n # The data.\n if hasattr(interatom, 'j_coupling'):\n del interatom.j_coupling\n\n # The error.\n if hasattr(interatom, 'j_coupling_err'):\n del interatom.j_coupling_err", "def remove_atom(self, atom):\n assert isinstance(atom, Atom)\n assert atom.model_id == self.model_id \n self.chain_dict[atom.chain_id].remove_atom(atom)", "def clear_cached_attributes(self):\n setattr(self, '_atoms', None)\n setattr(self, '_bonds', None)\n setattr(self, '_rings', None)\n setattr(self, '_ring_systems', None)", "def destroy(self):\n del self.nodes\n self.nodes = {}", "def remove_all(self):\n self._options.clear()\n self._programs.clear()", "def noh(ls, dsets):\n data_set = build_set(ls[1], dsets)\n\n noh_set = set()\n pred = oechem.OEIsHydrogen()\n\n for idx in data_set:\n atom = system.GetAtom(oechem.OEHasAtomIdx(idx))\n if not pred(atom):\n noh_set.add(idx)\n\n return noh_set", "def clear_protein(self):\n list.clear(self.occupied)", "def remove_from_hand(self):\n pass", "def clear(self):\n \n self.node_set.clear()\n self.prefix.clear()\n self.suffix.clear()\n self.num_node = 0\n self.edges = 0", "def __del__(self):\r\n self.clearList()", "def remove_atom(self, atom):\n assert atom.fragment == self\n\n if self.alt_loc_dict.has_key(atom.name):\n altloc = self.alt_loc_dict[atom.name]\n if altloc.has_key(atom.alt_loc):\n altloc.remove_atom(atom)\n if len(altloc) == 0:\n del self.alt_loc_dict[atom.name]\n self.atom_order_list.remove(altloc)\n if atom in self.atom_list:\n self.atom_list.remove(atom)\n del self.atom_dict[atom.name]\n else:\n self.atom_order_list.remove(atom)\n self.atom_list.remove(atom)\n del self.atom_dict[atom.name]\n\n atom.fragment = None", "def del_cells(self):\t\r\n del self._cells", "def removeAll(self):\n self.pDict.clear()", "def clean(self):\n for nodeId in list(self.nodes.keys()):\n if not self.nodes[nodeId].safe:\n del self.nodes[nodeId]", "def remove_from_model(self, destructive=False):\n self._model.remove_metabolites(self, destructive)", "def remove_all(self):\n self.initial = None\n self.contour = None\n self.control_points = []", "def del_lx(self):\r\n del self._lx", "def remove(self):\n pass", "def remove(self):\n pass", "def remove(self):\n pass", "def purge(self):\n keys = [k for (k, v) in self.get_range()]\n\n [self.remove(k) for k in keys]", "def clear(self):\r\n\t\tself.free_objects[:] = []", "def clear(self):\n for tag in self.meta.findall(CN('meta:user-defined')):\n self.meta.remove(tag)", "def reset(self):\n self.mol.RHF(doPrint=False)\n self.dipole = []\n self.angmom = []\n self.Energy = []\n self.shape = []", "def delDimension(self):\n self.components = []", "def clear(self):\n self._nodes = { }\n self._arcs = set()", "def clear(self):\n for vertex in self.vertices():\n del self[vertex]", "def clear(self) -> None:\n self._moments = []", "def clear_binstar(cli, owner):\n for channel in cli.list_channels(owner):\n cli.remove_channel(owner, channel)\n\n for package in cli.user_packages(owner):\n cli.remove_package(owner, package['name'])", "def remove_eliminated_homes(self):\n filter_homes = []\n for home in self.homes:\n if not home.eliminated:\n filter_homes.append(home)\n self.homes = filter_homes", "def unoccupied(self):\n self.is_occupied = 0\n for hex in self.fon:\n hex.remove_neighbor()\n hex.set_quality()", "def delete_all_wells(self):\n self.wells = []", "def _remove(self):\n pass", "def unload(self):\n if self.material_background:\n self.parent.removeItem(self.material_background)\n self.material_background = None\n if self.mod_background:\n self.parent.removeItem(self.mod_background)\n self.mod_background = None\n if self.material_foreground:\n self.parent.removeItem(self.material_foreground)\n self.material_foreground = None\n if self.mod_foreground:\n self.parent.removeItem(self.mod_foreground)\n self.mod_foreground = None\n if self.liquid:\n self.parent.removeItem(self.liquid)\n self.liquid = None", "def remove_all_spaces(self):\n self._register.clear()", "def clear_extensions(self):\n self.extensions = set()", "def delY(self):\n del self.components[1]", "def delY(self):\n del self.components[1]", "def convertUnbondedAtomToMetaAtom (self,atom):\r\n \r\n \r\n # Create a molecule consisting of just the atom\r\n mol = []\r\n mol.append(atom)\r\n # metaAtomNumber will be the length of the current metaMolecule\r\n metaAtomNumber = len(self.metaMolecule.metaAtoms)\r\n newMetaAtom = MetaAtom(metaAtomNumber,atom)\r\n # Next need to generate metaspikes we can do this by generating a metaspike object then finding the dangling nodes\r\n # and tails already created for this spike\r\n type1MetaSpike = metSpk.MetaSpike(1,0)\r\n type2MetaSpike = metSpk.MetaSpike(2,0)\r\n numDanglingNodes = 0\r\n numDanglingTails = 0\r\n danglingNodesToRemove = []\r\n danglingTailsToRemove = []\r\n indexType1Spike = 0\r\n indexType2Spike = 0\r\n for i in range(len(self.metaSpikes)):\r\n if self.metaSpikes[i].typeSpike == 1:\r\n indexType1Spike = i\r\n for j in range(len(self.metaSpikes[i].danglingNodeList)):\r\n print (\"The length of the dangling node list is: \" + str(len(self.metaSpikes[i].danglingNodeList)) + \"\\n\")\r\n print (\"The value of j is: \" + str(j) + \"\\n\")\r\n # See if dangling node belongs to atom being removed\r\n if self.metaSpikes[i].danglingNodeList[j].spike.RBN == atom:\r\n # If so then add to list of nodes being removed\r\n type1MetaSpike.addDanglingNode(self.metaSpikes[i].danglingNodeList[j])# Add to metaspike\r\n danglingNodesToRemove.append(j)\r\n numDanglingNodes += 1\r\n else:\r\n indexType2Spike = i\r\n for j in range(len(self.metaSpikes[i].danglingTailList)):\r\n print (\"The length of the dangling tail list is: \" + str(len(self.metaSpikes[i].danglingTailList)) + \"\\n\")\r\n print (\"The value of j is: \" + str(j) + \"\\n\")\r\n # See if dangling tail belongs to atom being removed\r\n if self.metaSpikes[i].danglingTailList[j].spike.RBN == atom:\r\n # If so then add to list of rails being removed\r\n type2MetaSpike.addTailDanglingBonds(self.metaSpikes[i].danglingTailList[j]) # Add to metaspike\r\n danglingTailsToRemove.append(j)\r\n numDanglingTails += 1 \r\n \r\n # If any dangling nodes have been found then the spike can be added to the metaAtom\r\n if numDanglingNodes > 0: \r\n newMetaAtom.addMetaSpike(type1MetaSpike)\r\n # Next need to remove danglingNodes no longer located in this metaAtom\r\n danglingNodesToRemove.sort(reverse = True) # Sort so popping values doesnt affect other indexes\r\n for i in range (len(danglingNodesToRemove)):\r\n self.metaSpikes[indexType1Spike].danglingNodeList.pop(danglingNodesToRemove[i])\r\n \r\n # If any dangling tails have been found then the spike can be added to the metaAtom\r\n if numDanglingTails > 0: \r\n newMetaAtom.addMetaSpike(type2MetaSpike)\r\n # Next need to remove danglingtails no longer located in this metaAtom\r\n danglingTailsToRemove.sort(reverse = True) # Sort so popping values doesnt affect other indexes\r\n for i in range (len(danglingTailsToRemove)):\r\n self.metaSpikes[indexType2Spike].danglingNodeList.pop(danglingTailsToRemove[i])\r\n\r\n \r\n \r\n \r\n # We repeated this process for dangling tails\r\n \r\n type2MetaSpike = metSpk.MetaSpike(2,1)\r\n \r\n numDanglingTails = 0\r\n for i in range(len(self.metaSpikes)):\r\n if self.metaSpikes[i].typeSpike == 2:\r\n for j in range(len(self.metaSpikes[i].danglingTailList)):\r\n if self.metaSpikes[i].danglingTailList[j].spike.RBN == atom:\r\n type2MetaSpike.addTailDanglingBonds(self.metaSpikes[i].danglingTailList[j])\r\n danglingTailsToRemove.append(j)\r\n numDanglingTails += 1\r\n # If any dangling nodes have been found then the spike can be added to the metaAtom\r\n if numDanglingTails > 0: \r\n newMetaAtom.addMetaSpike(type2MetaSpike)\r\n # Next need to remove danglingNodes no longer located in this metaAtom\r\n danglingTailsToRemove.sort(reverse = True) # Sort so popping values doesnt affect other indexes\r\n for i in range (len(danglingTailsToRemove)):\r\n self.metaSpikes[i].danglingTailList.pop(danglingTailsToRemove[i])\r\n \r\n return newMetaAtom", "def clear(self):\r\n ElementSet.clear(self)\r\n self.update()", "def full_clear(self):\n self.clear()\n self.class_hooks.clear()", "def remove_abs_vars(self):\n self.m.remove(self.bp_abs)\n self.m.remove(self.bn_abs)\n self.m.remove(self.gp_abs)\n self.m.remove(self.gn_abs)\n self.m.remove(self.beta_p)\n self.m.remove(self.beta_n)\n self.m.remove(self.gamma_p)\n self.m.remove(self.gamma_n)", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def CleanUp(self):\n for Ind in self.IndList():\n if amax(abs(self[Ind]))<1e-10:\n del self[Ind]", "def clearAllElementMetaIdList(self):\n return _libsbml.Model_clearAllElementMetaIdList(self)", "def removeAtomStrainDuplicates(self, tol_mag = 7, verbose = 1):\n\n keep = self.getAtomStrainDuplicates(tol_mag = tol_mag, verbose = verbose - 1)\n\n self.deleteInterfaces(keep = keep, verbose = verbose)", "def remove_stems(graph = None):\n\tfor x,y in basepairs(graph = graph):\n\t\tgraph.remove_node(x)\n\t\tgraph.remove_node(y)", "def delete_all(self):\n for tag in self._segments['APP1'].get_tag_list():\n try:\n self.__delattr__(tag)\n except AttributeError:\n warnings.warn(\"could not delete tag \" + tag, RuntimeWarning)", "def remove(self, egg):", "def remove(self) -> None:\n self.map.cordons.remove(self)", "def clear(self) -> None:\n # Delete these so the .by_class/name values are cleared.\n self['classname'] = 'info_null'\n del self['targetname']\n self._keys.clear()\n # Clear $fixup as well.\n self._fixup = None", "def clear(self) -> None:\n logger.info(\"destroying all registered mutants\")\n try:\n uuids = list(self)\n for uuid in uuids:\n del self[uuid]\n except Exception:\n logger.exception(\"failed to destroy all registered mutants\")\n raise\n logger.info(\"destroyed all registered mutants\")", "def _RemoveFromCloneList(self, clone, attrNamesToClone):\n attrNamesToClone = super(EquationUnit, self)._RemoveFromCloneList(clone, attrNamesToClone)\n \n dontClone = [\"_Funcs\", \"_FuncsDefs\"]\n \n for name in dontClone:\n if name in attrNamesToClone:\n attrNamesToClone.remove(name)\n \n return attrNamesToClone", "def reset(self):\n self.entities = set()\n self.frozen = False", "def removeDegenerate(self):\n return self[~self.testDegenerate()]", "def clear_dummy_obj(self):\n for d in self.dummies:\n self.map.remove_node(d)\n\n self.dummies = []", "def prune(self): # HashMap.prune\n for hashval, list in self.contentHash.iteritems():\n newlist=[]\n for entry in list:\n if not entry.deleted:\n newlist.append(entry)\n self.contentHash[hashval]=newlist", "def clear(self):\n self.chromosome_list = []", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def unload(self) -> None:\n for attr in self._attrs:\n setattr(self, attr, None)", "def del_lz(self):\r\n del self._lz", "def delete_lattice(): \n # Delete the existing lattice\n for o in bpy.data.objects:\n if o.type == 'LATTICE':\n # Delete the existing lattice\n object_to_delete = bpy.data.objects[\"Lattice\"]\n bpy.data.objects.remove(object_to_delete, do_unlink=True)", "def remove_tactic(self):\n tactic_removed = input(\"Enter a tactic to be removed: \")\n self.proof.tactics.remove(tactic_removed)\n for gene in self.population:\n gene.chromosome = [e for e in gene.chromosome if e != tactic_removed]", "def clear_structure(self):\n self.structure = []\n return", "def cleanup(self):\n for attribute in self._all_db_field_names:\n delattr(self, attribute)", "def parse_hydrogen(self, res, topo):\n name = self.map[res].name\n opttype = self.map[res].opttype\n optangle = self.map[res].optangle\n map_ = self.map[res].map\n mydef = HydrogenDefinition(name, opttype, optangle, map_)\n patch_map = []\n atoms = []\n refatoms = []\n conformernames = []\n refmap = {}\n titrationstatemap = {}\n tautomermap = {}\n conformermap = {}\n atommap = {}\n ntrmap = {}\n hmap = {}\n nonhmap = {}\n # reference map from TOPOLOGY.xml\n for res_ in topo.residues:\n refmap[res_.name] = res_.reference\n for atom in refmap[res_.name].atoms:\n atommap[res_.name, atom.name] = atom\n for titrationstate in res_.titration_states:\n titrationstatemap[titrationstate.name] = titrationstate\n for tautomer in titrationstate.tautomers:\n tautomermap[tautomer.name] = tautomer\n for conformer in tautomer.conformers:\n conformermap[conformer.name] = conformer\n if name == \"CYS\":\n _ = refmap[\"CYS\"]\n atoms = [\"HG\"]\n refatoms = [\"SG\", \"CB\"]\n elif name == \"HIS\":\n _ = refmap[\"HIS\"]\n atoms = [\"HD1\", \"HE2\"]\n for atom in atoms:\n refatoms = [\"ND1\", \"CG\", \"CE1\"]\n elif name == \"LYS\":\n _ = self.debumper.biomolecule.reference_map[name]\n patch_map = self.debumper.biomolecule.patch_map[\"LYN\"]\n atoms = patch_map.remove\n refatoms = [\"HZ1\", \"HZ2\", \"NZ\"]\n elif name == \"TYR\":\n _ = self.debumper.biomolecule.reference_map[name]\n patch_map = self.debumper.biomolecule.patch_map[\"TYM\"]\n atoms = patch_map.remove\n refatoms = [\"OH\", \"CZ\", \"CE2\"]\n elif name == \"WAT\":\n _ = self.debumper.biomolecule.reference_map[name]\n patch_map = self.debumper.biomolecule.patch_map[\"HOH\"]\n atoms = [\"H1\", \"H2\"]\n refatoms = None\n elif name == \"NTR\":\n ntrmap = {} # map for N-TERM\n for tautomer in titrationstatemap[\"NTER\"].tautomers:\n for conformer in tautomermap[tautomer.name].conformers:\n for conformeradds in conformermap[\n conformer.name\n ].conformer_adds:\n for atom in conformeradds.atoms:\n ntrmap[atom.name] = atom\n atoms = [\"H3\", \"H2\"]\n refatoms = [\"CA\", \"H\", \"N\"]\n elif name == \"CTR\":\n hmap = {} # map for h atoms\n nonhmap = {} # map for refatoms\n conformernames = []\n for tautomer in titrationstatemap[\"CTER\"].tautomers:\n for conformer in tautomermap[tautomer.name].conformers:\n for conformeradds in conformermap[\n conformer.name\n ].conformer_adds:\n for atom in conformeradds.atoms:\n nonhmap[atom.name] = atom\n for tautomer in titrationstatemap[\"CTER0\"].tautomers:\n for conformer in tautomermap[tautomer.name].conformers:\n conformernames.append(conformer.name)\n for conformeradds in conformermap[\n conformer.name\n ].conformer_adds:\n for atom in conformeradds.atoms:\n hmap[conformer.name, atom.name] = atom\n atoms = [\"HO\"]\n refatoms = [\"O\", \"C\", \"OXT\"]\n elif name in [\"SER\", \"GLN\", \"THR\", \"ARG\", \"ASN\"]:\n _ = refmap[name]\n if name == \"SER\":\n atoms = [\"HG\"]\n refatoms = [\"OG\", \"CB\"]\n elif name == \"GLN\":\n atoms = [\"HE21\"]\n refatoms = [\"NE2\"]\n elif name == \"THR\":\n atoms = [\"HG1\"]\n refatoms = [\"OG1\", \"CB\"]\n elif name == \"ARG\":\n atoms = [\"HH11\", \"HH12\", \"HH21\", \"HH22\", \"HE\"]\n for atom in atoms:\n refatoms = [\"NH1\", \"NH2\", \"CZ\"]\n elif name == \"ASN\":\n atoms = [\"HD21\"]\n refatoms = [\"ND2\"]\n elif name == \"ASH\":\n hmap = {} # map for h atoms\n nonhmap = {} # map for refatoms\n conformernames = []\n _ = refmap[\"ASP\"]\n for tautomer in titrationstatemap[\"ASH\"].tautomers:\n for conformer in tautomermap[tautomer.name].conformers:\n for conformeradds in conformermap[\n conformer.name\n ].conformer_adds:\n for atom in conformeradds.atoms:\n hmap[conformer.name, atom.name] = atom\n conformernames.append(conformer.name)\n atoms = [\"HD1\", \"HD2\"]\n refatoms = [\"OD1\", \"CG\", \"OD2\"]\n elif name == \"GLH\":\n hmap = {} # map for h atoms\n nonhmap = {} # map for refatoms\n conformernames = []\n _ = refmap[\"GLU\"]\n for tautomer in titrationstatemap[\"GLH\"].tautomers:\n for conformer in tautomermap[tautomer.name].conformers:\n for conformeradds in conformermap[\n conformer.name\n ].conformer_adds:\n for atom in conformeradds.atoms:\n hmap[conformer.name, atom.name] = atom\n conformernames.append(conformer.name)\n atoms = [\"HE1\", \"HE2\"]\n refatoms = [\"OE1\", \"CD\", \"OE2\"]\n else:\n patch_map = self.debumper.biomolecule.patch_map[name]\n atoms = list(patch_map.map.keys())\n atoms.sort()\n if name in [\"NTR\"]:\n bondlength = 1.0\n for atom in atoms:\n hname = atom\n x = ntrmap[hname].x\n y = ntrmap[hname].y\n z = ntrmap[hname].z\n bondatom = ntrmap[hname].bonds[0]\n myconf = HydrogenConformation(hname, bondatom, bondlength)\n atom = defns.DefinitionAtom(hname, x, y, z)\n myconf.add_atom(atom)\n\n # TODO - lots of arbitrary undefined numbers in this section\n for atom_ in refatoms:\n if atom_ == \"N\":\n natom = defns.DefinitionAtom(atom_, 1.201, 0.847, 0.0)\n myconf.add_atom(natom)\n elif atom_ == \"CA\":\n caatom = defns.DefinitionAtom(atom_, 0.0, 0.0, 0.0)\n myconf.add_atom(caatom)\n elif atom_ == \"H\":\n caatom = defns.DefinitionAtom(\n atom_, 1.201, 1.847, 0.000\n )\n myconf.add_atom(caatom)\n mydef.add_conf(myconf)\n elif name in [\"CTR\"]:\n bondlength = 1.0\n for conformer in conformernames:\n for atom in atoms:\n hname = atom\n x = hmap[conformer, hname].x\n y = hmap[conformer, hname].y\n z = hmap[conformer, hname].z\n bondatom = hmap[conformer, hname].bonds[0]\n myconf = HydrogenConformation(hname, bondatom, bondlength)\n atom = defns.DefinitionAtom(hname, x, y, z)\n myconf.add_atom(atom)\n\n # TODO - the following code is almost nonsensical\n for atom_ in refatoms:\n if atom_ == \"C\":\n catom = defns.DefinitionAtom(\n atom_, -1.250, 0.881, 0.000\n )\n myconf.add_atom(catom)\n else:\n atomname = atom_\n x = nonhmap[atom_].x\n y = nonhmap[atom_].y\n z = nonhmap[atom_].z\n atom2 = defns.DefinitionAtom(atomname, x, y, z)\n myconf.add_atom(atom2)\n mydef.add_conf(myconf)\n\n elif name in [\"ASH\", \"GLH\"]:\n for conformer in conformernames:\n for atom in atoms:\n hname = atom\n if (\"1\" in conformer and \"1\" in atom) or (\n \"2\" in conformer and \"2\" in atom\n ):\n x = hmap[conformer, hname].x\n y = hmap[conformer, hname].y\n z = hmap[conformer, hname].z\n bondatom = hmap[conformer, hname].bonds[0]\n bondlength = 1.0\n myconf = HydrogenConformation(\n hname, bondatom, bondlength\n )\n atom = defns.DefinitionAtom(hname, x, y, z)\n myconf.add_atom(atom)\n\n for atom_ in refatoms:\n atomname = atom_\n refresname = \"\"\n if name == \"ASH\":\n refresname = \"ASP\"\n elif name == \"GLH\":\n refresname = \"GLU\"\n x = atommap[refresname, atom_].x\n y = atommap[refresname, atom_].y\n z = atommap[refresname, atom_].z\n atom2 = defns.DefinitionAtom(atomname, x, y, z)\n myconf.add_atom(atom2)\n mydef.add_conf(myconf)\n elif name not in [\"WAT\"]:\n bondlength = 1.0\n for atom in atoms:\n hname = atom\n x = atommap[name, hname].x\n y = atommap[name, hname].y\n z = atommap[name, hname].z\n bondatom = atommap[name, hname].bonds[0]\n myconf = HydrogenConformation(hname, bondatom, bondlength)\n atom = defns.DefinitionAtom(hname, x, y, z)\n myconf.add_atom(atom)\n if refatoms is not None:\n if name == \"HIS\" and atom.name == \"HE2\":\n refatoms = [\"NE2\", \"CE1\", \"CD2\"]\n if name == \"ARG\" and atom.name == \"HE\":\n refatoms = [\"NE\", \"CZ\", \"NH1\"]\n # FIXME: 2020/07/06 intendo - the \"atom\" is reused in\n # the outer for loop and ambiguous\n for atom in refatoms:\n atomname = atom\n x = atommap[name, atomname].x\n y = atommap[name, atomname].y\n z = atommap[name, atomname].z\n atom = defns.DefinitionAtom(atomname, x, y, z)\n myconf.add_atom(atom)\n mydef.add_conf(myconf)\n return mydef", "def destroy(self):\n for inst in self.module.global_insts[:]:\n if (inst.op_name in spirv.DECORATION_INSTRUCTIONS or\n inst.op_name in spirv.DEBUG_INSTRUCTIONS):\n if self.result_id in inst.operands:\n inst.destroy()\n if self.basic_block is None:\n if self not in self.module.global_insts:\n raise IRError('Instruction is not in basic block or module')\n self.module.global_insts.remove(self)\n return\n self.basic_block.insts.remove(self)\n if self.result_id is not None:\n del self.module.id_to_inst[self.result_id]\n self.basic_block = None\n self.op_name = None\n self.result_id = None\n self.type_id = None\n self.operands = None" ]
[ "0.6937832", "0.6847128", "0.6714238", "0.6525774", "0.64063466", "0.63946915", "0.62644595", "0.6262052", "0.6121503", "0.60625106", "0.60625106", "0.6042291", "0.6015206", "0.5961813", "0.5960132", "0.592724", "0.5911329", "0.58740425", "0.5869668", "0.5853029", "0.5847566", "0.5840705", "0.5833174", "0.5787099", "0.5776653", "0.57677335", "0.57639223", "0.57606786", "0.5751832", "0.57430935", "0.5741274", "0.5732762", "0.5730973", "0.57041174", "0.5673703", "0.5659852", "0.5655323", "0.5625569", "0.5612792", "0.560116", "0.55973524", "0.55921537", "0.55850035", "0.55831873", "0.5573268", "0.5573268", "0.5573268", "0.5572562", "0.5571267", "0.5570425", "0.5565631", "0.55567133", "0.5548888", "0.5543478", "0.5540727", "0.55274117", "0.5525144", "0.5524484", "0.5521269", "0.55211055", "0.55194247", "0.5517653", "0.551067", "0.5502076", "0.5502076", "0.5500197", "0.5496005", "0.54911", "0.5481963", "0.5477364", "0.54766905", "0.5476186", "0.54635966", "0.54614806", "0.54575616", "0.5456814", "0.54482466", "0.54472005", "0.54445165", "0.54339266", "0.5430146", "0.5429851", "0.5429524", "0.5427501", "0.54173166", "0.5417313", "0.5417313", "0.5417313", "0.5417313", "0.5417313", "0.5417313", "0.5417313", "0.5413941", "0.54078954", "0.5405436", "0.5402282", "0.54015267", "0.53934985", "0.5392373", "0.53857416" ]
0.7476278
0
collect a set of residues with memb_z within [15, 15]
def memb_residues(pdb: MyPDB) -> list(): result = [] for ch in pdb.chains.values(): for res in ch.values(): if res.memb_z is not None: result.append(res) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_subset_mz(self, mz):\n regions = self.boxes_mz.at(mz)\n it = BoxHolder()\n for r in regions:\n box = r.data\n it.add_box(box)\n return it", "def get_subset(mlist,year):\n newlist = []\n for entry in mlist:\n if int(entry[0][:4]) > int(year):\n continue\n newvec = entry[:8]\n citations = entry[8]['citations']\n citations = filter(lambda a: int(a[:4]) <= int(year), citations)\n newvec[2] = len(citations)\n newlist.append(newvec)\n return newlist", "def filter_pores_by_z(network, pores, z=1):\n pores = network._parse_indices(pores)\n Nz = network.num_neighbors(pores=pores)\n orphans = np.where(Nz == z)[0]\n hits = pores[orphans]\n return hits", "def getLigandNbrs(resids: List[Residue], struct:Structure)->List[ResidueDict]:\n\n ns = NeighborSearch(list( struct.get_atoms() ))\n nbrs = []\n\n for r in resids:\n # a ligand consists of residues\n resatoms = r.child_list[0]\n # each residue has an atom plucked at random\n for nbrresidues in ns.search(resatoms.get_coord(), 5,level='R'):\n # we grab all residues in radius around that atom and extend the list of neighbors with those\n nbrs.extend([nbrresidues])\n\n # Filter out the residues that constitute the ligand itself\n filtered = [] \n for neighbor in nbrs:\n present = 0\n for constit in resids:\n if ResidueDict(constit)==ResidueDict( neighbor ):\n present = 1\n if present == 0:\n filtered.append(ResidueDict(neighbor))\n\n return [ * map(lambda x: addBanClass(x) , set(filtered) ) ]", "def create_subsets(x, y):\n # initiate empty list for return variables.\n sets_x = []\n sets_y = []\n indices = []\n\n # iterate through value of PRI_JET_NUM (ranged inclusively from 0 until 3)\n for pri_jet_num_val in np.unique(x[:,22]):\n \n # Find subset which DER_MASS_MMC is not equal to -999\n mask = (x[:,22] == pri_jet_num_val) & (x[:,0] != -999)\n x_tmp = x[mask,:]\n y_tmp = y[mask]\n\n # store the subset into list\n sets_x.append(x_tmp)\n sets_y.append(y_tmp)\n indices.append(mask)\n\n # Find subset which DER_MASS_MMC is equal to -999\n mask = (x[:,22] == pri_jet_num_val) & (x[:,0] == -999)\n x_tmp = x[mask,:]\n y_tmp = y[mask]\n\n # store the subset into list\n sets_x.append(x_tmp)\n sets_y.append(y_tmp)\n indices.append(mask) \n \n # return subsets of x, y, and corresponding indices\n return sets_x, sets_y, indices", "def get_regions_mask(self, input):", "def _find_members(self, given_members):\n if len(list(self.points)) > 3:\n out_mem = [m for m in given_members if\n self.intersects_poly(m.polygon)]\n else:\n out_mem = []\n return out_mem", "def fetchCooler(c, regions, coolerFetch = lambda coo, ext:coo.matrix(balance=True, sparse=True).fetch(ext),\n mask=True, force=False, ):\n regions = [list(i) for i in regions]\n resolution = c.binsize\n\n for i in regions:\n if i[1] == None:\n i[1] = 0 \n if i[2] == None:\n i[2] = c.chromsizes[i[0]]\n\n \n for a in regions: \n if str(a[0]) not in c.chromnames:\n raise ValueError(\"Chromosome {0} from regions not found in cooler\".format(a))\n if (a[1] % resolution) != 0:\n raise ValueError(\"Start of an region should be a multiple fo resolution\")\n \n# bins = c.bins()[:]\n \n# # managing masks \n# if mask is False: \n# bins[\"mask\"] = 1 \n# elif mask is None:\n# assert \"mask\" in bins.columns\n# elif mask is True: \n# pass \n# elif callable(mask):\n# pass \n# else:\n# bins[\"mask\"] = mask \n \n \n for region in regions:\n matrix = coolerFetch(c, region)\n try: # setting matrix nans to zeros.\n matrix.data = np.nan_to_num(matrix.data, copy=False)\n except TypeError: #workaround for old numpy versions\n matrix.data = np.nan_to_num(matrix.data)\n# st,end = c.extent(region)\n# subbins = bins[st:end].copy()\n if mask is True: \n newmask = np.array((matrix.sum(axis=0) > 0 ))[0]\n# if callable(mask):\n# new_mask = mask(matrix)\n# subbins[\"mask\"] = newmask \n\n assert len(newmask) == matrix.shape[0]\n\n yield matrix, newmask", "def get_relevant_zones(array,threshold=3):\n\n\treturn [item for item in array if len(item)>3]", "def roi_vecs(layer_coords, vec_coords, region):\n \n if region == 'crown':\n #find threshold for vectors inside roi\n start_x_lst = []\n stop_x_lst = []\n for i in range(1,5):\n start_x_lst.append(layer_coords[i][0][0])\n stop_x_lst.append(layer_coords[i][-1][0])\n\n start_x = max(start_x_lst)\n stop_x = min(stop_x_lst)\n \n roi_vec_coords = [i for i in vec_coords if i[0][0] in list(range(start_x, stop_x+5))]\n \n return roi_vec_coords\n \n elif region == 'fundus':\n #find threshold for vectors inside roi\n start_x_lst = []\n stop_x_lst = []\n for i in range(1,5):\n start_x_lst.append(layer_coords[i][0][0])\n stop_x_lst.append(layer_coords[i][-1][0])\n\n start_x = max(start_x_lst)\n stop_x = min(stop_x_lst)\n\n # roi_vec_coords = [i for i in vec_coords if i[1][0] in list(range(start_x-10, stop_x+3))]\n roi_vec_coords = [i for i in vec_coords if i[0][0] in list(range(stop_x, start_x))]\n \n # print(roi_vec_coords)\n return roi_vec_coords", "def bounds(self, resids: NDArray) -> List[Tuple[float, float]]:", "def get_zones(array,kind,relevant=False,threshold=3):\n\n\tresulting_set=[]\n\n\ti=0\n\tif array[i]==kind:\n\t\tcount=1\n\telse:\n\t\tcount=0\n\n\twhile i<len(array):\n\t\t\n\t\tif array[i]==kind:\n\t\t\tcount+=1\n\t\telif array[i]!=kind and array[i-1]==kind:\n\t\t\tresulting_set.append(([kind]*count,i-count))\n\t\t\tcount=0\n\t\telse:\n\t\t\tpass\n\n\t\ti+=1\n\n\tif count>0:\n\t\tresulting_set.append(([kind]*count, i-count))\n\n\tif relevant == False:\n\t\treturn resulting_set\n\telse:\n\t\treturn [item for item in resulting_set if len(item[0])>threshold]", "def _get_ring_nodes(m, namin=3, namax=9, remove_redudant=T):\n # first search for rings\n sets = []\n for i in range(namin, namax+1):\n #if i in [3,4,5]:\n pat_i = '*~1' + '~*'*(i-2) + '~*1'\n #else:\n # pat_i = '*:1' + ':*'*(i-2) + ':*1'\n Qi = Chem.MolFromSmarts( pat_i )\n for tsi in m.GetSubstructMatches(Qi):\n set_i = set(tsi)\n if set_i not in sets:\n sets.append( set(tsi) )\n if remove_redudant:\n # now remove those rings that are union of smaller rings\n n = len(sets)\n sets_remove = []\n ijs = itl.combinations( list(range(n)), 2 )\n sets_u = []\n for i,j in ijs:\n set_ij = sets[i].union( sets[j] )\n if (set_ij in sets) and (set_ij not in sets_remove):\n sets_remove.append( set_ij )\n sets_u = cim.get_compl(sets, sets_remove)\n else:\n sets_u = sets\n return sets_u", "def get_feats(mz_list,intensity_list,feat_matrix,instance_index,feats,max_dist=275,allowed_c=[]):\n\t# UNCOMMENT var below if standard library combinations is used\n\t#allowed_c = set(allowed_c)\n\t\n\tspectrum = zip(mz_list,intensity_list)\n\tdists_mz = []\n\tdists_mz_intens = []\n\tprev_analyzed = set()\n\t\n\t#Make deepcopy since we are going to change the spectra!\n\tspec_one = copy.deepcopy(spectrum)\n\tspec_two = copy.deepcopy(spectrum)\n\t\n\t#Iterate over the peaks and measure the distance in m/z between all combinations\n\tfor peak_one in spec_one:\n\t\tif len(spec_two) == 1: continue\n\t\tspec_two = spec_two[1:]\n\t\tfor peak_two in spec_two:\n\t\t\tdist_mz = abs(peak_one[0]-peak_two[0])\n\t\t\tif dist_mz > max_dist: break\n\t\t\tdists_mz.append(dist_mz)\n\t\t\tdists_mz_intens.append(peak_one[1]+peak_two[1])\n\t\n\t# UNCOMMENT code below if standard library combinations is used\n\t#for c in combinations(spectrum,2):\n\t#\tdist_mz = abs(c[0][0]-c[1][0])\n\t#\tif c[0][0] in prev_analyzed: continue\n\t#\tif dist_mz > max_dist: \n\t#\t\tprev_analyzed.add(c[0][0])\n\t#\t\tcontinue\n\t#\tif len(allowed_c) != 0:\n\t#\t\tif dist_mz not in allowed_c: continue\n\t#\tdists_mz.append(dist_mz)\n\t#\tdists_mz_intens.append(c[0][1]+c[1][1])\n\t\n\t#Digitize the delta m/z; assign bins for all delta m/z s\n\tindex_bins = np.digitize(dists_mz,feats)\n\t\n\t#Iterate over assigned bins and sum the intensity for possible existing values\n\tfor index,intens in zip(index_bins,dists_mz_intens):\n\t\tfeat_matrix[instance_index,index-1] += intens\n\n\treturn(feat_matrix)", "def get_data(n):\n data = pd.read_csv('map_data/lior_results_2.csv')\n data = data.drop(['estimated_mass', 'estimated_pop'], axis=1)\n data = data[data.binomial != 'Sus scrofa'] # Wild Boar\n data = data[data.binomial != 'Ursus maritimus'] # Polar bear\n data = data[data.binomial != 'Sus bucculentus'] # EX\n data = data[data.binomial != 'Melomys rubicola'] # EX\n data = data.assign(total_mass=data.AdultBodyMassG * data.pop_density * data.Range,\n total_mass_density=data.AdultBodyMassG * data.pop_density)\n data = data.sort_values(by='total_mass_density', ascending=False)\n data = data.iloc[0:n - 1]\n geo_data = gpd.read_file('TERRESTRIAL_MAMMALS/TERRESTRIAL_MAMMALS.shp').to_crs(\"EPSG:6933\")\n geo_data = geo_data[geo_data.category != 'EX']\n range_polygons = geo_data.loc[(geo_data['legend'] == 'Extant & Introduced (resident)') |\n (geo_data['legend'] == 'Extant & Origin Uncertain (resident)') |\n (geo_data['legend'] == 'Extant & Reintroduced (resident)') |\n (geo_data['legend'] == 'Extant & Vagrant (seasonality uncertain)') |\n (geo_data['legend'] == 'Extant (non breeding)') |\n (geo_data['legend'] == 'Extant (resident)') |\n (geo_data['legend'] == 'Probably Extant & Origin Uncertain (resident)') |\n (geo_data['legend'] == 'Probably Extant (resident)') |\n (geo_data['legend'] == 'Reintroduced')]\n range_polygons = range_polygons.merge(data, on='binomial')\n range_polygons = range_polygons.to_crs(\"EPSG:6933\")\n return range_polygons", "def items():\n for i in self._iter_restrict(zeros, ones):\n yield self.pcdata[i]", "def check_point(self, mz, rt):\n regions = self.boxes_mz.at(mz)\n hits = set()\n for r in regions:\n if r.data.rt_match(rt):\n hits.add(r.data)\n return hits", "def get_carboxyl_map(atom_list):\n carboxyl_map = [[atom_list[x], atom_list[x+1], atom_list[x+2], atom_list[x+3]] for x in range(len(atom_list)-3) if ((atom_list[x].residue_name == atom_list[x+1].residue_name == atom_list[x+2].residue_name == atom_list[x+3].residue_name == \"C1A\") and (atom_list[x].residue_number == atom_list[x+1].residue_number == atom_list[x+2].residue_number == atom_list[x+3].residue_number) and (atom_list[x].atom_name != \"CY\" != atom_list[x+1].atom_name != atom_list[x+2].atom_name != \"CY\" != atom_list[x+3].atom_name ))]\n return carboxyl_map", "def filter_residues(residues, biomolecule='PROTEIN'):\n biomolecule = biomolecule.strip().upper()\n standard_residues = []\n for res in residues:\n if res.get_resname().strip() in STANDARD_RESIDUES[biomolecule]:\n if not res.id[0].strip(): standard_residues.append(res) # filter out hetro residues\n return standard_residues", "def get_obstList(self,X,Y,Z):\n\n x = np.array(X); y = np.array(Y); z = np.array(Z);\n dist = (x - self.x_c)**2 + (y - self.y_c)**2 + (z - self.z_c)**2\n \n return list(np.where(dist < self.r**2))", "def get_obstList(self,X,Y,Z):\n\n x = np.array(X); y = np.array(Y); z = np.array(Z);\n dist = (x - self.x_c)**2 + (y - self.y_c)**2 + (z - self.z_c)**2\n \n return list(np.where(dist < self.r**2))", "def select_regions(binary,f,min=0,nbest=100000):\n if binary.max() == 1:\n labels,_ = label(binary)\n else:\n labels = binary.astype(uint8)\n objects = find_objects(labels)\n scores = [f(o) for o in objects]\n best = argsort(scores)\n keep = zeros(len(objects)+1,'i')\n if nbest > 0:\n for i in best[-nbest:]:\n if scores[i]<=min: continue\n keep[i+1] = 1\n # print scores,best[-nbest:],keep\n # print sorted(list(set(labels.ravel())))\n # print sorted(list(set(keep[labels].ravel())))\n return keep[labels]", "def findSubsetIndices(grdMODEL, min_lat, max_lat, min_lon, max_lon):\n\n\n if min_lon<0 and max_lon>0:\n splitExtract = True; Turns=2\n grdMODEL.splitExtract=splitExtract\n else:\n splitExtract = False; Turns=1\n grdMODEL.splitExtract=splitExtract\n grdMODEL.lon = np.where(grdMODEL.lon>180,grdMODEL.lon-360,grdMODEL.lon)\n \n # Array to store the results returned from the function\n res=np.zeros((Turns,4),dtype=np.float64)\n \n lats=grdMODEL.lat[:,0]\n lons=grdMODEL.lon[0,:]\n\n \n for k in range(Turns):\n\n if k==0 and splitExtract == True:\n minLon=min_lon; maxLon=0\n minLon=minLon+360\n maxLon=maxLon+360\n elif k==1 and splitExtract == True:\n minLon=0; maxLon=max_lon\n else:\n minLon=min_lon; maxLon=max_lon\n \n distances1 = []\n distances2 = []\n indices=[]\n index=1\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n \n distances1 = []\n distances2 = []\n index=1\n \n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n \n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n \n res[k,0]=minI; res[k,1]=maxI; res[k,2]=minJ; res[k,3]=maxJ;\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n grdMODEL.indices=res", "def extract_upstream_for_meme(genomes, locuses, upstream, radius, overlap):\n\n records = []\n for genome in genomes:\n feature_len = len(genome.features)\n\n index = 0\n locations = set()\n for feature in filter(lambda f: f.type == \"CDS\", genome.features):\n locus = feature.qualifiers[\"locus_tag\"][0] \n if locus in locuses:\n locations.add(index)\n for i in range(index - radius, index + radius):\n locations.add(i)\n \n index += 1\n\n print(locations)\n records += extract_upstream(locations, genome, upstream, overlap)\n\n return records", "def getratios(results_condition,conditions):\n setlist = {}\n for r in range(conditions):\n setlist[r] = []\n \n for gene in results_condition.genelist:\n conditions = len(gene.logfold)\n\n count = 0\n for set_ in setlist:\n if gene.logfold[count] > 1.5 or gene.logfold[count] < -1.5 :\n setlist[count].append(gene.name)\n count +=1\n return setlist", "def extract_pi_region(vcf_file,chrom,start,end,mincov=0,maxcov=10000,inds=\"all\",bgzip=True,min_nsites=0,min_variants=0,verbose=\"min\",called=True,output=\"pi\"):\n\tinput_vcf=vcf.Reader(fsock=None, filename=vcf_file, compressed=bgzip, prepend_chr=\"False\", strict_whitespace=False)#open the vcf parser\n\tif inds==\"all\" or inds==[\"all\"]:inds=input_vcf.samples# transform \"all\" in a list of all individuals in the vcf\n\t#Function\n\tpi_values=[]#list \n\tnsites_considered=0#iterator for sampling frequency\n\ttotal_nsites=0\n\tnvariants=0# iterator for sites that are varying\n\t###identify individual to remove when calculating stats\n\tinds_to_delete=[]\n\tfor i,ind in enumerate(input_vcf.samples):#check which ind is ion sample and compare it to our list of inds\n\t\t if ind not in inds:#delete this ind\n\t\t \tinds_to_delete.append(i)\n\t#go along the region\n\tif chrom!=\"all\":\n\t\tfor record in input_vcf.fetch(chrom,start,end):# for every site\n\t\t\tcond=checkRecord_Cov(input_vcf,record,mincov,maxcov,inds=inds,called=True,nalleles=[1,2])# check if the site respect our condition\n\t\t\ttotal_nsites+=1\n\t\t\tif cond:# if it does\n\t\t\t\tnsites_considered+=1 \n\t\t\t \tif total_nsites%100000==0: print total_nsites,\"sites\",nsites_considered,\"sites passed filter\"\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \tif verbose==True:print record.POS\n\t\t\t \tif verbose==True:print \"inds\",inds\t\t \t\n\t\t\t \tif verbose==True:print \"GT\",[sample[\"GT\"] for sample in record.samples] \n\t\t\t \tif verbose==True:print \"DP\",[sample[\"DP\"] for sample in record.samples]\n\t\t\t\tpi_values.append(record.nucl_diversity)#calculate pi\n\t\t\t\tif record.nucl_diversity>0.0:nvariants+=1\n\t\t\t#compute total information for the window\n\telif chrom==\"all\":\n\t\tfor record in input_vcf:# for every site\n\t\t\tcond=checkRecord_Cov(input_vcf,record,mincov,maxcov,inds=inds,called=True,nalleles=[1,2])# check if the site respect our condition\n\t\t\ttotal_nsites+=1\n\t\t\tif cond:# if it does\n\t\t\t\tnsites_considered+=1\n\t\t\t \tif total_nsites%100000==0: print total_nsites,\"sites\",nsites_considered,\"sites passed filter\"\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \tif verbose==True:print record.POS\n\t\t\t \tif verbose==True:print \"inds\",inds\t\t \t\n\t\t\t \tif verbose==True:print \"GT\",[sample[\"GT\"] for sample in record.samples] \n\t\t\t \tif verbose==True:print \"DP\",[sample[\"DP\"] for sample in record.samples]\n\t\t\t\tpi_values.append(record.nucl_diversity)#calculate pi\n\t\t\t\tif record.nucl_diversity>0.0:nvariants+=1\n\tif verbose==True or verbose==\"min\":print \"nvariants:\",nvariants,\"nsites_considered:\",nsites_considered\n\tif output==\"pi\":\n\t\tif nsites_considered>=min_nsites and nvariants>=min_variants and len(pi_values):\n\t\t\tpi_value=sum(pi_values)/nsites_considered\t\t\n\t\t\treturn pi_value\n\t\telse:\n\t\t\treturn \"NA\"\n\telif output==\"extended\":\n\t\tif nsites_considered>=min_nsites and nvariants>=min_variants and len(pi_values):\n\t\t\tpi_value=sum(pi_values)/nsites_considered\t\t\n\t\t\treturn [nvariants,nsites_considered,pi_value]\n\t\telse:\n\t\t\treturn [nvariants,nsites_considered,\"NA\"]\n\telse:\n\t\traise Exception(\"incorrect output argumnent, should be pi or extended\")", "def getIndexes(z_nummers):\n sortCriteria = util.get_prop(\"ixsm\")\n if not sortCriteria:\n sortCriteria = \"z_index\"\n sortCriteria = \"ORDER BY z_nummer, %s\" % sortCriteria\n return getRecordsByAttributeIn(\"zeichnung\", \"z_nummer\", z_nummers, addtl=sortCriteria)", "def find_own_objects(cs):\n own_objects = {}\n for con in cs:\n own_objects[con] = []\n for obj in con.extent:\n own_objects[con].append(obj)\n for sub_con in cs:\n if sub_con.extent < con.extent and\\\n obj in sub_con.extent:\n own_objects[con].pop()\n break\n return own_objects", "def find_progenitors_at_z(self, SH, mtree, z1, z2):\n \n for ss in range(z1, z2):\n # nodes at redshift ss\n ss_indx = np.where(mtree.data.snapshotNumber.values == ss)\n nodeID = mtree.data.index.values[ss_indx]\n nodeID_desc = mtree.data.descendantIndex.values[ss_indx]\n \n # find number of progenitors for nodes at redshift ss\n if ss != z1:\n _progcounts = np.zeros(len(nodeID))\n for ii in range(len(nodeID_past_desc)):\n if nodeID_past_desc[ii] in nodeID:\n indx = np.where(nodeID == nodeID_past_desc[ii])\n _progcounts[indx] = count[ii]\n\n nodeID_desc_unique, count = np.unique(nodeID_desc, return_counts=True)\n nodeID_desc_unique=nodeID_desc_unique[1:]; count=count[1:]\n \n nodeID_past = nodeID\n nodeID_past_desc = nodeID_desc_unique\n if ss != z1:\n _progcounts_past = _progcounts\n print('_progcounts', _progcounts)", "def find_nearby_membranes(all_membranes, all_membrane_map, vert_normals):\r\n membrane_tree = scipy.spatial.cKDTree(all_membranes)\r\n nearby_membranes = np.array(list(membrane_tree.query_pairs(adhesion_max_dist, p=2)))\r\n nearby_membrane_map = defaultdict(list)\r\n if nearby_membranes.shape[0] > 0:\r\n # Exclude same-cell membrane interactions and same-direction-facing segments\r\n all_vert_normals = np.concatenate(vert_normals, axis=0)\r\n subset = np.where(\r\n (all_membrane_map[nearby_membranes[:, 0], 0] !=\r\n all_membrane_map[nearby_membranes[:, 1], 0])\r\n & (np.einsum('ij,ik->i', all_vert_normals[nearby_membranes[:, 0]], all_vert_normals[nearby_membranes[:, 1]]) < 0.0)\r\n )\r\n nearby_membranes = nearby_membranes[subset]\r\n # {cell idx: (vert idx, other cell idx, other vert idx, 'all_membranes' vert idx)}\r\n for nm in nearby_membranes:\r\n m0 = all_membrane_map[nm[0]]\r\n m1 = all_membrane_map[nm[1]]\r\n nearby_membrane_map[m0[0]].append((m0[1], m1[0], m1[1], nm[1]))\r\n nearby_membrane_map[m1[0]].append((m1[1], m0[0], m0[1], nm[0]))\r\n nearby_membrane_map = {k: np.array(v)\r\n for k, v in nearby_membrane_map.items()}\r\n# print(nearby_membrane_map)\r\n return nearby_membranes, nearby_membrane_map", "def xz_plane(blkmap, y):\n result = []\n base = y*blkmap.plane_span\n z = 0\n while z < blkmap.plane_span:\n start = base + z\n z += blkmap.row_span\n end = base + z\n result.append(tuple(blkmap.blocks[start:end]))\n\n return result", "def get_z_ranges(self):\n\n summary = self.get_rasters_summary()\n\n # Convert to dict in format:\n # { 'stat' : { 'z': (min, max), ... } ... }\n\n ranges = summary.groupby(['stat', 'z'], as_index=False)\n ranges = ranges.agg({'min': 'min', 'max': 'max'})\n ranges['vals'] = ranges.apply(\n lambda row: {\n row['z']: (row['min'], row['max'])\n }, axis=1)\n ranges = ranges.groupby('stat')['vals'].apply(\n lambda group: group.values)\n ranges = ranges.apply(\n lambda group: {\n int(k): v for d in group for k,\n v in d.items()})\n\n return ranges.to_dict()", "def pixelise_region(coordinates, shapefile):\n return [coordinate for coordinate in coordinates if\n (np.sum(shapefile['geometry'].apply(lambda x: Point(coordinate[1], coordinate[0]).within(x))) != 0) |\n (np.sum(shapefile['geometry'].apply(lambda x: Point(coordinate[3], coordinate[0]).within(x))) != 0) |\n (np.sum(shapefile['geometry'].apply(lambda x: Point(coordinate[1], coordinate[2]).within(x))) != 0) |\n (np.sum(shapefile['geometry'].apply(lambda x: Point(coordinate[3], coordinate[2]).within(x))) != 0)]", "def __iteratively_retain(\n self,\n orf_regions: List[Tuple[int, int]]) -> List[Tuple[int, int]]:\n\n ret = []\n\n arr = np.zeros((len(self.seq), ))\n\n for start, end in orf_regions:\n ret.append((start, end))\n arr[start-1:end] = 1\n orf_coverage = np.sum(arr) / len(arr)\n if orf_coverage > self.min_orf_coverage:\n break\n\n return ret", "def get_subregions(xint,conn):\n\n subregions = ('SELECT DISTINCT cvt.name, fip.value, f.name '\n 'FROM interaction i, feature_interaction fi, feature_interactionprop fip, ' \n 'feature f, cvterm cvt, cvterm cvt2, feature_relationship fr, feature f2 '\n 'WHERE f.feature_id = fi.feature_id AND fi.interaction_id = i.interaction_id '\n 'AND fi.feature_interaction_id = fip.feature_interaction_id '\n 'AND fi.role_id = cvt.cvterm_id '\n 'AND fip.type_id = cvt2.cvterm_id AND '\n 'cvt2.name = \\'subpart_info\\' AND f.feature_id = fr.subject_id '\n 'AND f2.feature_id = fr.object_id AND f.is_obsolete = \\'f\\' AND '\n 'f2.uniquename = %s AND i.uniquename = %s')\n subs = connect(subregions,xint,conn)\n return(subs)", "def findlines(z,bgsub=True,SATURATED=57000.):\n z = z.copy()\n s = z.copy()\n\n \"\"\" First identify peaks. \"\"\"\n max = ndimage.maximum_filter(z,9)\n p = scipy.where((max==z)&(z<SATURATED)&(max>0))[0]\n s = z[p]\n\n \"\"\" Reject low peaks. \"\"\"\n bg = ndimage.percentile_filter(s,10,21)\n peaks = scipy.where(s>bg*5.)[0]\n return p[peaks]", "def get_protein_from_mz(self, mz, ppm=None, match_name=\"name\", data_slot=\"mzWeight\"):\n\n possibleMatches = []\n\n ppmDist = self.get_ppm(mz, ppm)\n overlaps = self.mz_tree.overlap(mz-ppmDist, mz+ppmDist)\n for overlap in overlaps:\n \n protMass = overlap[2][data_slot]\n protDist = abs(mz-protMass)\n \n for name in overlap[2][match_name]:\n possibleMatches.append((name, protMass, protDist))\n\n possibleMatches = sorted(possibleMatches, key=lambda x: x[2])\n possibleMatches = [(x[0], x[1]) for x in possibleMatches]\n\n return possibleMatches", "def test_subset_reconstruction_iterable(self, wires):\n circuit = hadamard_circuit(wires)\n bits, recipes = circuit()\n shadow = ClassicalShadow(bits, recipes)\n\n # choose 1000 random indices\n snapshots = np.random.choice(np.arange(10000, dtype=np.int64), size=1000, replace=False)\n state = shadow.global_snapshots(snapshots=snapshots)\n assert state.shape == (len(snapshots), 2**wires, 2**wires)\n\n # check the results against obtaining the full global snapshots\n expected = shadow.global_snapshots()\n for i, t in enumerate(snapshots):\n assert np.allclose(expected[t], state[i])", "def filtered_xyz(self) -> tuple[int, int, int]:", "def non_mcnugget():\n nugget = [0, 6, 9, 20]\n mcnugget = set([6, 9, 20])\n\n while True:\n mcnugget = set([m+n for m in mcnugget for n in nugget])\n\n for m in mcnugget:\n found = all([m+j in mcnugget for j in range(6)])\n if found:\n return [k for k in range(1, m) if k not in mcnugget]", "def select_regions(data, region_col, regions, combine_subregions=True):", "def get_points_in_range(self, x, y, z):\n\n return_set = []\n for point in self.get_points():\n min_x = x - 1000\n max_x = x + 1000\n if point[0] < min_x or max_x < point[0]:\n continue\n\n min_y = y - 1000\n max_y = y + 1000\n if point[1] < min_y or max_y < point[1]:\n continue\n\n min_z = z - 1000\n max_z = z + 1000\n if point[2] < min_z or max_z < point[2]:\n continue\n return_set.append(point)\n return return_set", "def extract_region_curvilinear(cube, lat_bounds):\n\n cube = cube.copy() \n \n region_mask = create_region_mask(cube.coord('latitude').points, cube.shape, lat_bounds)\n land_ocean_mask = cube.data.mask\n complete_mask = region_mask + land_ocean_mask\n\n cube.data = numpy.ma.asarray(cube.data)\n cube.data.mask = complete_mask\n\n return cube", "def defaulter(arr):\n return list(set(map(lambda application: application['customer_id'], filter(lambda application: application['repaid_amount'] < (application['principal'] + application['fee']), arr))))", "def all_xs(tran_flag=None):\n xs_ofinterest = {}\n if tran_flag == 'tran':\n xs_ofinterest[\"r\"] = ['abso', 'fiss', 'nufi',\n 'spec', 'tran', 'ener', 'difc', 'tota', 'excs']\n if tran_flag == 'tranXYZ':\n xs_ofinterest[\"r\"] = ['abso', 'fiss', 'nufi', 'spec', 'ener', 'difc', 'tota', 'excs',\n 'tran121', 'tran221', 'tran011', 'tran311', 'tran012', 'tran112', 'tran122', 'tran322', 'tran321', 'tran212', 'tran211', 'tran312', 'tran111', 'tran222', 'tran021', 'tran022']\n xs_ofinterest[\"g\"] = ['1', '2']\n\n return xs_ofinterest", "def test08(self):\n a = np.arange(10000) > 5000\n b = bcolz.carray(a, chunklen=100)\n u = b.wheretrue(skip=1020, limit=1020)\n w = b.wheretrue(skip=1030, limit=1030)\n self.assertEqual(a.nonzero()[0].tolist()[1020:2040], list(u))\n self.assertEqual(a.nonzero()[0].tolist()[1030:2060], list(w))", "def afindwithin(data):\r\n numfact = len(data[0])-2\r\n withinvec = [0]*numfact\r\n for col in range(1,numfact+1):\r\n rows = pstats.linexand(data,col,pstats.unique(pstats.colex(data,1))[0]) # get 1 level of this factor\r\n if len(pstats.unique(pstats.colex(rows,0))) < len(rows): # if fewer subjects than scores on this factor\r\n withinvec[col-1] = 1\r\n return withinvec", "def get_obstList(self,X,Y,Z):\n \n x_c_cone = self.x_c\n\tz_c_cone = self.z_c\n y_c_cone = 0\n x_s = 2.25*2*self.cyl_rad\n rad_cone = x_s + self.cyl_rad\n\th_cone = rad_cone*0.57735\n\n floor_part = np.array(np.where(Y < h_cone)).flatten()\n\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n scour_pit = np.array(np.where( (X - x_c_cone)**2 + (Z - z_c_cone)**2 <= ((self.cyl_rad/cone)/(h_cone))**2*(Y - y_c_cone)**2))\n\n # remove the scour pit from the floor\n obst_list = np.setxor1d(floor_part[:], \n np.intersect1d(floor_part[:],scour_pit[:]))\n\n\n # then add the cylinder\n obst_list = np.union1d(obst_list[:],cyl_part[:])\n \n return list(obst_list[:])", "def get_uv_coverage(Nbase, z, ncells, boxsize=None):\n\tif not boxsize: boxsize = c2t.conv.LB\n\tuv_map = np.zeros((ncells,ncells))\n\ttheta_max = c2t.conv.LB/c2t.z_to_cdist(z)\n\tfor p in xrange(Nbase.shape[0]):\n\t\ti,j,k = np.round(Nbase[p,0]*theta_max),np.round(Nbase[p,1]*theta_max),np.round(Nbase[p,2]*theta_max)\n\t\tif np.abs(i)<ncells:\n\t\t\tif np.abs(j)<ncells:\n\t\t\t\tuv_map[int(i),int(j)] += 1\n\treturn uv_map", "def create_city_map(n: int) -> set:\n return set((row, col) for row in range(0, n) for col in range(0, n))", "def C(v,securite):\n to_return = set()\n x,y = l[v][0],l[v][1]\n a,b = id_case(x,y) #on recupere la case ou se trouve le disque qu'on test\n voisinage = set(cases[a,b]) #on recupere la liste du voisinage (pas forcement contact)\n #4\n #012\n #345\n #678 \n if a>100:\n voisinage = add_list(voisinage,cases[a-4*rayon,b]) #3\n if b>200:\n voisinage = add_list(voisinage,cases[a-4*rayon,b-4*rayon]) #0\n voisinage = add_list(voisinage,cases[a,b-4*rayon]) #1\n if b<600:\n voisinage = add_list(voisinage,cases[a-4*rayon,b+4*rayon]) #6\n voisinage = add_list(voisinage,cases[a,b+4*rayon]) #7\n if a<1100-4*rayon:\n voisinage = add_list(voisinage,cases[a+4*rayon,b]) #5\n if b>200:\n voisinage = add_list(voisinage,cases[a+4*rayon,b-4*rayon]) #2\n voisinage = add_list(voisinage,cases[a,b-4*rayon]) #1\n if b<600:\n voisinage = add_list(voisinage,cases[a+4*rayon,b+4*rayon]) #8\n voisinage = add_list(voisinage,cases[a,b+4*rayon]) #7\n \n #On ajoute plusieurs fois le meme a un ensemble -> pas grave\n for i in voisinage:\n xb,yb = l[i][0],l[i][1]\n if 0<sqrt((x-xb)**2+(y-yb)**2)<=2*rayon+securite:\n to_return.add(i)\n return to_return", "def test10(self):\n a = np.arange(1, 11)\n b = bcolz.carray(a)\n bi = b.where(a <= 5)\n ai = (v for v in a if v <= 5)\n self.assertEqual([i for i in ai], [i for i in bi])\n self.assertEqual([i for i in ai], [i for i in bi])", "def _setup_all_residues(self, model_num=0):\n all_residues = vector1()\n\n for chain_id in self.get_chain_ids(model_num):\n #print \"ChainID: \"+chain_id\n residues = self.residues(chain_id, model_num)\n all_residues.extend(residues)\n\n return all_residues", "def find_unique_elements(molecule_map):\n atoms = []\n for molec_name in molecule_map.keys():\n atoms += [subst['atom'] for subst in molecule_map[molec_name]]\n return set(atoms)", "def filter(self, rois):\n can_fragments = np.array([roi.can_fragment for roi in rois])\n return can_fragments", "def zmembers(self):\n return self.zrange(0, -1)", "def match_regions(self):\n l = []\n for r1 in self.regions_names():\n for r2 in self.metric.index:\n r11 = r1.replace('-', ' ').lower()\n r22 = r2.replace('-', ' ').lower()\n l.append([r1,r2,fuzz.ratio(r11, r22)])\n\n matched = np.array([x for x in l if x[2] > 80])\n\n return {key: value for (key, value) in matched[:,[1,0]]}", "def extractstates(self, bias=1.0):\n items = []\n print(\"weights:\")\n print([round(comp.weight, 7) for comp in self.gmm])\n for comp in self.gmm:\n val = comp.weight * float(bias)\n if val > 0.5:\n for _ in range(int(round(val))):\n items.append(deepcopy(comp.loc))\n for x in items: print(x.T)\n return items", "def gen_mass_spec(self, mz_lst, bin_num=20000):\n # m2z is from [MZ_MIN, MZ_MAX], with bin_num bins. Note its length is len(hist) + 1\n # intensity: counts from samples in that bin.\n self.intensity, self.m2z = np.histogram(mz_lst, bins=bin_num, range=(MZ_MIN, MZ_MAX))", "def mask_range(b, z, lam_em, lam_cent):\n dz = 2. * b * np.sqrt(np.log(2.)) / c\n del_lam = lam_cent * dz\n return lam_cent - del_lam, lam_cent + del_lam", "def segregate(primers: list):\n\n group1 = list(filter(lambda primer: 53 <= primer.tm < 58, primers))\n group2 = list(filter(lambda primer: 58 <= primer.tm <= 62, primers))\n\n return group1, group2", "def residues(ls):\n # List residue atom index to be restrained\n res_atom_set = set()\n\n # Dictionary of lists with the chain residues selected to be restrained\n # e.g. {chainA:[res1, res15], chainB:[res19, res17]}\n chain_dic = {'': []}\n\n # Fill out the chain dictionary\n i = 0\n while i < len(ls):\n if ls[i].isdigit():\n chain_dic[''].append(int(ls[i]))\n i += 1\n else:\n try:\n chain_dic[ls[i]].append(int(ls[i + 2]))\n except:\n chain_dic[ls[i]] = []\n chain_dic[ls[i]].append(int(ls[i + 2]))\n i += 3\n\n # Loop over the molecular system to select the atom indexes to be selected\n hv = oechem.OEHierView(system, oechem.OEAssumption_BondedResidue + oechem.OEAssumption_ResPerceived)\n for chain in hv.GetChains():\n chain_id = chain.GetChainID()\n if chain_id not in chain_dic:\n continue\n for frag in chain.GetFragments():\n for hres in frag.GetResidues():\n res_num = hres.GetOEResidue().GetResidueNumber()\n if res_num not in chain_dic[chain_id]:\n continue\n for oe_at in hres.GetAtoms():\n res_atom_set.add(oe_at.GetIdx())\n\n return res_atom_set", "def dummy_nms(list_maps, prob, parent_idx, scores):# slice_idx=0):\n rval = []\n for slice_idx, maps in zip(parent_idx, list_maps):\n for s in maps:\n # Apply the filter based on proba\n maps[s] = maps[s] * (maps[s] > prob - scores[slice_idx])\n n_z = np.transpose(np.nonzero(maps[s]))\n rval.extend([[s,\n n_z[e, 0], n_z[e, 1],\n maps[s][n_z[e, 0], n_z[e, 1]],\n slice_idx]\n for e in range(len(n_z))])\n #print 'nb of nonzero patches :', len(rval)\n if rval != []:\n rval.sort(key=lambda x: x[3], reverse=True)\n #print 'min :', min(rval, key=lambda x: x[3])\n #print'max :', max(rval, key=lambda x: x[3])\n return rval", "def cfdGetOwnersSubArrayForBoundaryPatch(self):\r\n \r\n for iBPatch, theBCInfo in self.cfdBoundaryPatchesArray.items():\r\n \r\n startBFace=self.cfdBoundaryPatchesArray[iBPatch]['startFaceIndex']\r\n \r\n endBFace=startBFace+self.cfdBoundaryPatchesArray[iBPatch]['numberOfBFaces']\r\n \r\n iBFaces=list(range(int(startBFace),int(endBFace))) \r\n \r\n self.cfdBoundaryPatchesArray[iBPatch]['owners_b']=[self.owners[i] for i in iBFaces]", "def get_all_bandgaps(cluster, spin_list, dir=\"/home/pv278/Platinum/\"):\n E, s = [], []\n for spin in spin_list:\n Ebg = get_bandgap(cluster, spin, dir)\n if Ebg:\n E.append(Ebg)\n s.append(spin)\n return np.vstack((s,E)).T", "def slice_zvals(self):\n return np.sort([z.val for z in self.zvals])", "def all_subconstituents(self, compute=False):\n out = {}\n for i in range(self._.d+1):\n try:\n out[i] = self.subconstituent(i, compute=compute)\n except IndexError:\n pass\n return out", "def find_features_geojson(self, geojson_tagset):\n kreis_region_bund_list = []\n only_regs_set = set()\n for feature in geojson_tagset:\n bundesl = feature.properties.get('NAME_1')\n region = feature.properties.get('NAME_2')\n kreis = feature.properties.get('NAME_3')\n\n kreis_region_bund_list.append((kreis, region, bundesl))\n #Check: does \"Göttingen\" appear in this list as a region? Why does Goettingen need to be a region?)\n return kreis_region_bund_list", "def subdata(min_,dict_):\n list_ = []\n return [value for value,freq in dict_.items() if freq > min_]", "def get_element_indices_within_rectangle(self, xmin, xmax, zmin, zmax):\n centroids = self.get_element_centroids()\n indices_list = []\n for nr, (x, z) in enumerate(centroids):\n if x >= xmin and x <= xmax and z >= zmin and z <= zmax:\n indices_list.append(nr)\n return np.array(indices_list)", "def _get_invariom_list(self):\n self.invariom_list = []\n for molecule in self.values():\n for atom in molecule.atoms:\n for invariom in atom.invarioms:\n if not invariom in self.invariom_list:\n self.invariom_list.append(invariom)", "def z_scan(self, z_position_list):\n self.generic_scan(self.z, z_position_list)", "def count_sites_under_condition_vcf_to_set(vcf_file,chrom,start,end,mincov=0,maxcov=10000,inds=\"all\",bgzip=True,nb_ind_with_min_cov=\"all\",nalleles=[1,2],snps=False):\n\tset_ok_sites = set()\n\tinput_vcf=vcf.Reader(fsock=None, filename=vcf_file, compressed=bgzip, prepend_chr=\"False\", strict_whitespace=False)#open the vcf parser\n\tnsites_OK=0\n\tnsites_total=0\n\t#print \"in count_sites_under_condition_vcf nb_ind_with_min_cov :\",nb_ind_with_min_cov, \" inds\", ind\n\tif chrom!=\"all\":\n\t\t\t#print chrom,start,end\n\t\t\tcheck=len(sh.tabix(vcf_file,str(chrom)+\":\"+str(start)+\"-\"+str(end)))\n\t\t\t#print check\n\t\t\t#print \"check;' \",check,\"'\"\n\t\t\tif check==0: \n\t\t\t\treturn [0,0]\n\t\t\tfor record in input_vcf.fetch(chrom,start,end):# for every site\n\t\t\t\tcond=checkSnp_Cov(input_vcf,record,mincov,maxcov,inds=inds,nalleles=nalleles,nb_ind_with_min_cov=nb_ind_with_min_cov,snps=snps)# check if the site respect our condition\n\t\t\t\tnsites_total+=1\n\t\t\t\tif cond:# if it does\n\t\t\t\t\t#if any([int(sample['DP'])<5 for sample in record.samples]): print [int(sample['DP']) for sample in record.samples] # to check this argument nb_ind_with_min_cov\n\t\t\t\t\tset_ok_sites.add(str(record.CHROM)+\"_\"+str(record.POS))\n\treturn set_ok_sites", "def getCaseDisp():\r\n liste = []\r\n for i in range(3):\r\n for j in range(3):\r\n if Grille[i][j] == 0:\r\n liste.append([i,j])\r\n return liste", "def get_my_mutations(quality_cutoff, coverage_cutoff):\n\n # my_mutations = {}\n # with open('/home/perry/Projects/loh/working/murim.exome.aa_chg.vars') as f:\n # for line in f:\n # my_mutations[line.strip()] = True\n # return my_mutations\n\n bed_file = 'data/nimblegen/2.1M_Human_Exome_Annotation/2.1M_Human_Exome.bed'\n bed_chr2st2end, bed_chr2posLs = bed_tools.load_bed(bed_file, \n 'NimbleGen Tiled Regions')\n # NimbleGen Tiled Regions\n # Target Regions\n\n use_data_dir = '/home/perry/Projects/loh/data/all_non_ref_hg18/'\n all_somatic = {}\n all_inherited = {}\n cancer_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanT.ann')\n normal_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanN.ann')\n for exome in global_settings.exome_types:\n data_file = use_data_dir + exome\n inherited, somatic, murim = mutations.get_mutations(data_file, normal_qualities,\n cancer_qualities, quality_cutoff,\n False, coverage_cutoff)\n # only use the bed_tools NimbleGen\n # restriction for hg18 data\n for s in somatic['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_somatic[s] = True\n for i in inherited['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_inherited[i] = True\n return (set(all_somatic.keys()) & set(get_murim_covered(quality_cutoff)), set(all_inherited.keys()) & set(get_murim_covered(quality_cutoff)))", "def getLigandResIds(ligchemid:str, struct: Structure)->List[Residue]:\n \"\"\"*ligchemids are of type https://www.rcsb.org/ligand/IDS\"\"\"\n ligandResidues: List[Residue] = list(filter(lambda x: x.get_resname() == ligchemid, list( struct.get_residues() )))\n return ligandResidues", "def ifcs_subsets(me, ifcs):\n un = set()\n for i in ifcs:\n if i in me._ifc_subs:\n un.update(me._ifc_subs[i])\n return un", "def __generate_streets(x_intercepts: list, y_intercepts: list) -> set:\n for x_start, x_end in zip(x_intercepts[:-1], x_intercepts[1:]):\n for y in y_intercepts:\n yield {(x, y) for x in range(x_start, x_end + 1)}\n for y_start, y_end in zip(y_intercepts[:-1], y_intercepts[1:]):\n for x in x_intercepts:\n yield {(x, y) for y in range(y_start, y_end + 1)}", "def filter_to_candidate(self):\n filtered = { k: [] for k in self.annsets }\n for key, annset in self.annsets.items():\n for a in annset:\n if a.overlaps(self.candidate):\n filtered[key].append(a)\n self.annsets = filtered", "def _getMdrizskyValues(self):\n mdict = {}\n mlist = []\n for member in self.assoc.parlist:\n fname = member['image'].datafile\n if not mdict.has_key(fname): \n mlist.append((fname, member['image'].getSubtractedSky()))\n mdict[fname] = 1\n \n return mlist", "def exo3_2(mu,x0,n,m):\r\n S = []\r\n valeur = x0\r\n for i in range(0,n+1):\r\n valeur = exo2_1(valeur,mu)\r\n S.append(valeur)\r\n for i in range(m,n):\r\n valeur = exo2_1(valeur,mu)\r\n S.append(valeur)\r\n return S", "def crime_list(loc:List[CrimeStatistics])->List[int]:\n # return [] #stub\n #template from List[CrimeStatistics]\n # crime_count is all the seperate university total crimes seen so far\n crime_count = [] #type: List[int]\n for cs in loc:\n crime_count.append(sum_crimes(cs))\n return crime_count", "def mi_bin_ccd(x, y, z, bins):\n # get unique z elements\n z_u = np.unique(z)\n n_z = len(z_u)\n # compute mi for each elements of z\n pz = np.zeros((np.int64(n_z)), dtype=np.float32)\n icond = np.zeros((np.int64(n_z)), dtype=np.float32)\n for n_k, k in enumerate(z_u):\n idx_z = z == k\n pz[n_k] = idx_z.sum()\n _x, _y = x[idx_z], y[idx_z]\n icond[n_k] = mi_bin(_x, _y, bins, bins)\n # conditional mutual information\n pz /= len(z)\n cmi = np.sum(pz * icond)\n\n return cmi", "def select_within_range(mags, colours, mag_min, mag_max, col_min, col_max):\n\n idx1 = np.where(colours >= col_min)[0]\n idx2 = np.where(colours <= col_max)[0]\n idx3 = np.where(mags >= mag_min)[0]\n idx4 = np.where(mags <= mag_max)[0]\n idx = set(idx1).intersection(set(idx2))\n idx = idx.intersection(set(idx3))\n idx = list(idx.intersection(set(idx4)))\n\n return idx", "def test08(self):\n a = np.arange(1, 11)\n b = bcolz.carray(a)\n ul = [v for v in a if v <= 5]\n u = b.where(a <= 5)\n wl = [v for v in a if v <= 6]\n w = b.where(a <= 6)\n self.assertEqual(ul, list(u))\n self.assertEqual(wl, list(w))", "def like(self, cname: str, mx: int = None)->list:\n res = self.data[self.data['cname'].str.contains(cname)].to_dict(orient='record')\n if not res: return []\n if (mx is not None) and (len(res) > mx):\n assert mx > 0, \"CountryCodes().like: `mx` argument must be positive or `None`.\"\n return res[:mx]\n return res", "def Dominant_AZ(grsaz):\n\n M = {}\n RTs = list(set([y[1] for y in grsaz]))\n for rt in RTs:\n AZs = list(set([y[3] for y in grsaz if y[1] == rt]))\n MM = []\n for az in AZs:\n MM.append([az, sum([1 for y in grsaz if (y[1] == rt and y[3] == az)])])\n mini = min([m[0] for m in MM if m[1] == max([m[1] for m in MM])]) #we take the first AZ that has the maximal number of subnets\n M.update({rt: mini})\n return M", "def zn_star(n):\n return [x for x in range(1, n) if coprime(x, n)]", "def getClusterMembers(Z):\n\n clusters = {}\n for i, merge in enumerate(Z):\n cid = 1 + i + Z.shape[0]\n clusters[cid] = [merge[0], merge[1]]\n\n def _getIndices(clusters, i):\n if i <= Z.shape[0]:\n return [int(i)]\n else:\n return _getIndices(clusters, clusters[i][0]) + _getIndices(clusters, clusters[i][1])\n\n members = {i:_getIndices(clusters, i) for i in range(Z.shape[0] + 1, max(clusters.keys()) + 1)}\n return members", "def get_complexes_with_abundance(self, query_abundance: int) -> List[KappaComplex]:\n result_complexes = []\n for complex_expression, complex_abundance in self._complexes.items():\n if query_abundance == complex_abundance:\n result_complexes.append(complex_expression)\n return result_complexes", "def _as_set(self):\n included = set()\n excluded = set()\n # Add all points lying within each inclusion contour to S.\n for contour in self.contours:\n contour_matrix = contour.to_matrix()[:,:2]\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n # Create path object and test all pixels \n # within the contour's bounding box.\n path = mplpath.Path(contour_matrix, closed=True)\n mn = contour_matrix.min(axis=0)\n mx = contour_matrix.max(axis=0)\n x,y = np.mgrid[mn[0]:mx[0]+1, mn[1]:mx[1]+1]\n test_points = np.c_[x.flatten(), y.flatten()]\n points_in_contour = test_points[path.contains_points(test_points)]\n\n # Add the z coordinate.\n points_in_contour = np.c_[\\\n points_in_contour,\\\n np.ones(points_in_contour.shape[0])*contour.image_z_position\n ]\n\n # Now turn the numpy matrix into a list of tuples,\n # so we can add it to the corresponding set.\n points_in_contour = map(tuple, points_in_contour)\n\n # Update the corresponding set.\n if contour.inclusion:\n included.update(points_in_contour)\n else:\n excluded.update(points_in_contour)\n # Return the included points minus the excluded points.\n return included.difference( excluded )", "def find_mgrs_intersection_100km(footprint, gzd_list):\n\n total_mgrs_100km_list = []\n\n for gzd in gzd_list:\n sub_list = find_mgrs_intersection_100km_single(footprint, gzd)\n for mgrs_id in sub_list:\n total_mgrs_100km_list.append(mgrs_id)\n\n return total_mgrs_100km_list", "def test09(self):\n a = np.arange(10000) > 5000\n b = bcolz.carray(a, chunklen=100)\n b1 = b.wheretrue(skip=1020, limit=1020)\n b2 = b.wheretrue(skip=1030, limit=1020)\n a1 = a.nonzero()[0].tolist()[1020:2040]\n a2 = a.nonzero()[0].tolist()[1030:2050]\n # print \"result:\", [i for i in zip(b1, b2)]\n self.assertEqual([i for i in zip(a1, a2)], [i for i in zip(b1, b2)])", "def addresses( data ) :\n return list( set(chain.from_iterable( [ re.sub(r'\\[.*?\\]\\s+','',x['C1']).split('; ') for x in data ] )))", "def get_murim_mutations(quality_cutoff):\n\n os.system(\"grep somatic /home/perry/Projects/loh/data/murim/CANCER_specific.csv | cut -f 3,4,10 > /home/perry/Projects/loh/data/murim/somatic_coding_parsed_chr_pos_qual\")\n \n murim_mutations = {}\n with open('/home/perry/Projects/loh/data/murim/somatic_coding_parsed_chr_pos_qual') as f:\n for line in f:\n sp = line.strip().split('\\t')\n if float(sp[-1]) > float(quality_cutoff): # murim uses 80\n murim_mutations[sp[0] + ':' + sp[1]] = True\n remove_deletions(murim_mutations)\n return set(murim_mutations.keys()) & set(get_murim_covered(quality_cutoff))", "def get_response_weights_vector(zenith,azimuth,binsize=5,cut=57.4):\n\n # assuming useful input:\n # azimuthal angle is periodic in the range [0,360[\n # zenith ranges from [0,180[ \n # checking azimuth range (can be exactly 360?)\n azimuth[azimuth == 360] -= 0.01\n \n # check which pixel (index) was hit on regular grid\n hit_pixel_zi = np.floor(zenith/binsize).astype(int)\n hit_pixel_ai = np.floor(azimuth/binsize).astype(int)\n\n # and which pixel centre\n hit_pixel_z = (hit_pixel_zi+0.5)*binsize\n hit_pixel_a = (hit_pixel_ai+0.5)*binsize\n\n # check which zeniths are beyond threshold\n bad_idx = np.where(hit_pixel_z > cut) \n \n # calculate nearest neighbour pixels indices\n za_idx = np.array([[np.floor(azimuth/binsize+0.5),np.floor(zenith/binsize+0.5)],\n [np.floor(azimuth/binsize+0.5),np.floor(zenith/binsize-0.5)],\n [np.floor(azimuth/binsize-0.5),np.floor(zenith/binsize+0.5)],\n [np.floor(azimuth/binsize-0.5),np.floor(zenith/binsize-0.5)]]).astype(int)\n\n # take care of bounds at zenith (azimuth is allowed to be -1!)\n (za_idx[:,1,:])[np.where(za_idx[:,1,:] < 0)] += 1\n (za_idx[:,1,:])[np.where(za_idx[:,1,:] >= 180/binsize)] = int(180/binsize-1)\n # but azimuth may not be larger than range [0,360/binsize[\n (za_idx[:,0,:])[np.where(za_idx[:,0,:] >= 360/binsize)] = 0\n \n # and pixel centres of neighbours\n azimuth_neighbours = (za_idx[:,0]+0.5)*binsize\n zenith_neighbours = (za_idx[:,1]+0.5)*binsize\n\n # calculate angular distances to neighbours\n dists = angular_distance(azimuth_neighbours,zenith_neighbours,azimuth,zenith)\n\n # inverse weighting to get impact of neighbouring pixels\n n_in = len(zenith)\n weights = (1/dists)/np.sum(1/dists,axis=0).repeat(4).reshape(n_in,4).T\n # if pixel is hit directly, set weight to 1.0\n weights[np.isnan(weights)] = 1\n # set beyond threshold weights to zero\n weights[:,bad_idx] = 0\n\n return za_idx,weights", "def test02b(self):\n a = np.arange(1, 11)\n b = bcolz.carray(a)\n wt = [v for v in a if v <= 5 and v > 2]\n cwt = [v for v in b.where((a <= 5) & (a > 2))]\n # print \"numpy ->\", [v for v in a if v<=5 and v>2]\n # print \"where ->\", [v for v in b.where((a<=5) & (a>2))]\n self.assertTrue(wt == cwt, \"where() does not work correctly\")", "def find_progenitors_until_z(self, mtree, nodeID, z1, z2):\n snapcount = 0\n print('from %d until %d' % (z2, z1))\n for ss in range(z2, z1, -1):\n if ss == z2:\n df_target = pd.DataFrame({'nodeID':nodeID})\n _indx = np.where(mtree.data.snapshotNumber.values == ss-1)\n nodeID_prog = mtree.data.index.values[_indx]\n nodeID_prog_desc = mtree.data.descendantIndex.values[_indx]\n _indx = np.where((nodeID_prog_desc < 1e15) &\n (nodeID_prog_desc > 1e11))\n nodeID_prog = nodeID_prog[_indx]\n nodeID_prog_desc = nodeID_prog_desc[_indx]\n\n df_prog = pd.DataFrame({'nodeID' : nodeID_prog,\n 'nodeID_target' : nodeID_prog_desc})\n\n # Initiliaze Output Array\n progcounts = np.zeros((df_target['nodeID'].size, z2-z1))\n\n # nodeID_prog_desc_unic is sorted\n nodeID_prog_desc_unic, count = np.unique(nodeID_prog_desc,\n return_counts=True)\n # remove -1's\n nodeID_prog_desc_unic=nodeID_prog_desc_unic[1:]; count=count[1:]\n\n # Nr. of progenitors for sub-&halos at snapshot z2\n s = pd.Index(df_target['nodeID'].tolist())\n _indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n now_sort_indx = np.argsort(df_target['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(nodeID_prog_desc_unic)\n progcounts[_indx_now[now_sort_indx], snapcount] = count[pro_sort_indx]\n \n else:\n df_now = df_prog\n _indx = np.where(mtree.data.snapshotNumber.values == ss-1)\n nodeID_prog = mtree.data.index.values[_indx]\n nodeID_prog_desc = mtree.data.descendantIndex.values[_indx]\n #_indx = np.where((nodeID_prog_desc < 1e15) &\n # (nodeID_prog_desc > 1e10))\n #nodeID_prog = nodeID_prog[_indx]\n #nodeID_prog_desc = nodeID_prog_desc[_indx]\n df_prog = pd.DataFrame({'nodeID' : nodeID_prog})\n \n progcounts_local = np.zeros(df_now['nodeID'].size)\n nodeID_prog_desc_unic, count = np.unique(nodeID_prog_desc,\n return_counts=True)\n # remove -1's\n nodeID_prog_desc_unic=nodeID_prog_desc_unic[1:]; count=count[1:]\n \n # progenitors for snapshot ss\n s = pd.Index(df_now['nodeID'].tolist())\n _indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n now_sort_indx = np.argsort(df_now['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(nodeID_prog_desc_unic)\n progcounts_local[_indx_now[now_sort_indx]] = count[pro_sort_indx]\n df_now['progcount'] = pd.Series(progcounts_local,\n index=df_now.index, dtype=int)\n\n # Nr. of progenitors for sub-&halos at snapshot z2\n df_inter = df_now.groupby(['nodeID_target'],\n as_index=False)['progcount'].sum()\n # only real progeniteurs\n df_inter = df_inter[(df_inter['nodeID_target'] > 1e10) & \n (df_inter['nodeID_target'] < 1e15)]\n df_inter = df_inter.drop_duplicates(subset=['nodeID_target'],\n keep='first')\n \n s = pd.Index(df_target['nodeID'].tolist())\n _indx_now = s.get_indexer(df_inter['nodeID_target'].tolist())\n now_sort_indx = np.argsort(df_target['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(df_inter['nodeID_target'].values)\n progcounts[_indx_now[now_sort_indx], snapcount] = df_inter['progcount'].values[pro_sort_indx]\n\n # sort nodeID_prog to nodeID\n #s = pd.Index(df_now['nodeID'].tolist())\n #_indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n #df_now['nodeID_target'].values[_indx_now]\n \n obs_ref_local = np.zeros(df_prog['nodeID'].size)\n for ii in range(len(nodeID_prog_desc_unic)):\n tarID = df_now.loc[\n df_now['nodeID'] == nodeID_prog_desc_unic[ii],\n 'nodeID_target'].values.astype(int)\n if tarID:\n _indx = np.where(\n nodeID_prog_desc == nodeID_prog_desc_unic[ii])\n obs_ref_local[_indx] = tarID\n df_prog['nodeID_target'] = pd.Series(obs_ref_local,\n index=df_prog.index)\n\n snapcount += 1\n del nodeID_prog_desc\n del df_now, df_inter, df_prog\n return np.asarray(df_target['nodeID'].tolist()), progcounts", "def test07(self):\n a = np.arange(10000)\n b = bcolz.carray(a,)\n wt = [v for v in a if v <= 5000][1010:2020]\n cwt = [v for v in b.where(bcolz.carray(a <= 5000, chunklen=100),\n limit=1010, skip=1010)]\n # print \"numpy ->\", [v for v in a if v>=5000][1010:2020]\n # print \"where ->\", [v for v in b.where(bcolz.carray(a>=5000,\n # chunklen=100),\n # limit=1010, skip=1010)]\n self.assertTrue(wt == cwt, \"where() does not work correctly\")", "def filter_renters(data_df):\n return data_df[(data_df['sc116'] == 2) # Only renters\n & (data_df['uf17'] < 8000) # With a real rent provided\n ]" ]
[ "0.5860528", "0.5294917", "0.5274569", "0.5238881", "0.51128125", "0.50799537", "0.50743914", "0.4973342", "0.49630117", "0.49471557", "0.49072868", "0.48891437", "0.48831913", "0.48573068", "0.48536083", "0.4842539", "0.4838259", "0.48302126", "0.48269477", "0.48252738", "0.48252738", "0.47874793", "0.47615215", "0.47506213", "0.4731234", "0.4727928", "0.47208488", "0.47161487", "0.47065267", "0.47056028", "0.46979076", "0.46955457", "0.4693687", "0.46924844", "0.46898326", "0.46838233", "0.46837205", "0.46769276", "0.46662793", "0.46621603", "0.46604034", "0.46506578", "0.46492493", "0.46392006", "0.46377832", "0.46303982", "0.46223432", "0.46208256", "0.46171883", "0.461183", "0.46106964", "0.4602705", "0.46026492", "0.46024603", "0.46015605", "0.45938286", "0.45937926", "0.45919457", "0.45864853", "0.4585179", "0.45850745", "0.45837298", "0.45785147", "0.45742166", "0.45734", "0.45724922", "0.4571986", "0.4569257", "0.4568298", "0.4568218", "0.45674047", "0.45664465", "0.4564145", "0.45603573", "0.45480317", "0.4547539", "0.45463285", "0.45454586", "0.45430183", "0.454249", "0.45398495", "0.45383015", "0.45340255", "0.45236278", "0.4521765", "0.45210484", "0.45202768", "0.45197627", "0.45094135", "0.45062017", "0.45028785", "0.4500924", "0.44992474", "0.44968927", "0.44958377", "0.449549", "0.4491847", "0.44907", "0.44828823", "0.44805866" ]
0.59874827
0