code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def cancel(self): <NEW_LINE> <INDENT> self.active = False
Cancel this event. If it has not yet executed, it will not execute during any subsequent :meth:`TimerList.expire` call.
625941b60fa83653e4656dc6
def get_contract_position_info(self, symbol=''): <NEW_LINE> <INDENT> params = {} <NEW_LINE> if symbol: <NEW_LINE> <INDENT> params["symbol"] = symbol <NEW_LINE> <DEDENT> request_path = '/api/v1/contract_position_info' <NEW_LINE> return api_key_post(self.__url, request_path, params, self.__access_key, self.__secret_key)
:param symbol: "BTC","ETH"...如果缺省,默认返回所有品种 :return:
625941b6cc0a2c11143dcca0
def _try_place_heroes(self): <NEW_LINE> <INDENT> dim = self.world_size <NEW_LINE> accessible = np.full((dim, dim), False, dtype=bool) <NEW_LINE> q = deque([]) <NEW_LINE> unoccupied_cells = [] <NEW_LINE> for (i, j), obj in np.ndenumerate(self.objects): <NEW_LINE> <INDENT> if obj is not None: <NEW_LINE> <INDENT> accessible[i, j] = True <NEW_LINE> q.append((i, j)) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> while q: <NEW_LINE> <INDENT> i, j = q.popleft() <NEW_LINE> for di, dj in zip(DI, DJ): <NEW_LINE> <INDENT> new_i, new_j = i + di, j + dj <NEW_LINE> if not self.terrain[new_i, new_j].reachable(): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if accessible[new_i, new_j]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> accessible[new_i, new_j] = True <NEW_LINE> q.append((new_i, new_j)) <NEW_LINE> if self.objects[new_i, new_j] is None: <NEW_LINE> <INDENT> unoccupied_cells.append((new_i, new_j)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if len(unoccupied_cells) < self.mode.num_teams: <NEW_LINE> <INDENT> logger.debug('World generation failed, try again...') <NEW_LINE> return <NEW_LINE> <DEDENT> for team_idx in range(self.mode.num_teams): <NEW_LINE> <INDENT> team = Hero.teams[team_idx] <NEW_LINE> hero = Hero(self, team=team) <NEW_LINE> hero_pos_idx = self.rng.randint(0, len(unoccupied_cells)) <NEW_LINE> hero.pos = Vec(*unoccupied_cells[hero_pos_idx]) <NEW_LINE> unoccupied_cells.pop(hero_pos_idx) <NEW_LINE> self.heroes.append(hero)
Determine starting positions of heroes.
625941b64c3428357757c133
def sample(self,n,**kwargs): <NEW_LINE> <INDENT> raise NotImplementedError
Return a set of n values obtained from the sampler.
625941b6d10714528d5ffae7
def get_location_data_objects(self, payload): <NEW_LINE> <INDENT> for i in range(0, len(payload)): <NEW_LINE> <INDENT> location = Location(payload) <NEW_LINE> <DEDENT> return location
Convert the JSON payload into a location object. Return that object.
625941b630c21e258bdfa2a4
def getReactionTemplate(self, reaction): <NEW_LINE> <INDENT> forwardTemplate = self.top[:] <NEW_LINE> temporary = [] <NEW_LINE> symmetricTree = False <NEW_LINE> for entry in forwardTemplate: <NEW_LINE> <INDENT> if entry not in temporary: <NEW_LINE> <INDENT> temporary.append(entry) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> assert len(forwardTemplate)==2 , 'Can currently only do symmetric trees with nothing else in them' <NEW_LINE> symmetricTree = True <NEW_LINE> <DEDENT> <DEDENT> forwardTemplate = temporary <NEW_LINE> template = [] <NEW_LINE> for entry in forwardTemplate: <NEW_LINE> <INDENT> group = entry.item <NEW_LINE> if isinstance(entry.item, LogicNode): <NEW_LINE> <INDENT> group = entry.item.getPossibleStructures(self.entries)[0] <NEW_LINE> <DEDENT> atomList = group.getLabeledAtoms() <NEW_LINE> for reactant in reaction.reactants: <NEW_LINE> <INDENT> if isinstance(reactant, Species): <NEW_LINE> <INDENT> reactant = reactant.molecule[0] <NEW_LINE> <DEDENT> if not all([reactant.containsLabeledAtom(label) for label in atomList]): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> atoms = reactant.getLabeledAtoms() <NEW_LINE> matched_node = self.descendTree(reactant, atoms, root=entry) <NEW_LINE> if matched_node is not None: <NEW_LINE> <INDENT> template.append(matched_node) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> forwardTemplate = self.top[:] <NEW_LINE> if self.label.lower().startswith('r_recombination'): <NEW_LINE> <INDENT> forwardTemplate.append(forwardTemplate[0]) <NEW_LINE> <DEDENT> if len(template) != len(forwardTemplate): <NEW_LINE> <INDENT> msg = 'Unable to find matching template for reaction {0} in reaction family {1}.'.format(str(reaction), str(self)) <NEW_LINE> msg += 'Trying to match {0} but matched {1}'.format(str(forwardTemplate),str(template)) <NEW_LINE> raise UndeterminableKineticsError(reaction, message=msg) <NEW_LINE> <DEDENT> return template
For a given `reaction` with properly-labeled :class:`Molecule` objects as the reactants, determine the most specific nodes in the tree that describe the reaction.
625941b6d8ef3951e3243345
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, **col_name_col_instance): <NEW_LINE> <INDENT> if migrate_engine.name == "sqlite": <NEW_LINE> <INDENT> _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name, **col_name_col_instance) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> meta = MetaData() <NEW_LINE> meta.bind = migrate_engine <NEW_LINE> t = Table(table_name, meta, autoload=True) <NEW_LINE> uc = UniqueConstraint(*columns, table=t, name=uc_name) <NEW_LINE> uc.drop()
This method drops UC from table and works for mysql, postgresql and sqlite. In mysql and postgresql we are able to use "alter table" construction. In sqlite is only one way to drop UC: 1) Create new table with same columns, indexes and constraints (except one that we want to drop). 2) Copy data from old table to new. 3) Drop old table. 4) Rename new table to the name of old table. :param migrate_engine: sqlalchemy engine :param table_name: name of table that contains uniq constraint. :param uc_name: name of uniq constraint that will be dropped. :param columns: columns that are in uniq constraint. :param col_name_col_instance: contains pair column_name=column_instance. column_instance is instance of Column. These params are required only for columns that have unsupported types by sqlite. For example BigInteger.
625941b6d6c5a10208143e4f
def DownloadLocation(self, event): <NEW_LINE> <INDENT> from startup.locdownload import LocationDownloadDialog <NEW_LINE> loc_download = LocationDownloadDialog(parent=self, database=self.gisdbase) <NEW_LINE> loc_download.ShowModal() <NEW_LINE> location = loc_download.GetLocation() <NEW_LINE> if location: <NEW_LINE> <INDENT> self.UpdateLocations(self.gisdbase) <NEW_LINE> self.UpdateMapsets(os.path.join(self.gisdbase, location)) <NEW_LINE> self.lblocations.SetSelection( self.listOfLocations.index(location)) <NEW_LINE> self.SetLocation(self.gisdbase, location, 'PERMANENT') <NEW_LINE> self.OnSelectLocation(None) <NEW_LINE> <DEDENT> loc_download.Destroy()
Download location online
625941b62c8b7c6e89b355cc
def ctf_Top_04(self): <NEW_LINE> <INDENT> for x in self.envdict.keys(): <NEW_LINE> <INDENT> e = self.envdict[x] <NEW_LINE> e.Clear() <NEW_LINE> e.Reset() <NEW_LINE> e.BLoad("i_%s_c.bdat" % x) <NEW_LINE> e.Assert("(duck)") <NEW_LINE> self.assert_(e.AgendaChanged()) <NEW_LINE> e.Run() <NEW_LINE> self.assertEqual(e.FactList()[-1].CleanPPForm(), "(quack)")
Testing: BLoad
625941b6925a0f43d2549c7b
def get_sum_signs(rna, mcff_args='-t 1', mcff_timeout=300, win_size=79, win_skip=1): <NEW_LINE> <INDENT> win_count = 0 <NEW_LINE> signature_vector = np.zeros(940) <NEW_LINE> for i in range(0, len(rna.seq), win_skip): <NEW_LINE> <INDENT> if len(rna.seq[i:i+win_size]) < win_size: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> cur_win = rna.seq[i:i+win_size] <NEW_LINE> dotbs, shapes = mv.dotbs_and_shapes(cur_win, parameters=mcff_args, timeout_in_seconds=mcff_timeout) <NEW_LINE> signature_vector += mv.shape60_ncm40_ncmexp500_expseq340(cur_win, dotbs, shapes) <NEW_LINE> win_count += 1 <NEW_LINE> <DEDENT> return (signature_vector, win_count)
Takes as an argument an RNA as a SeqRecord object. Returns a vector of the sum of signatures for all windows (non-averaged) and the number of windows used. mcff_args = mcff parameters, defaulted to -t 1 mcff_timeout = time to take before mcff exits win_size = length of windows to use win_skip = how many nucleotides to move to the right each time
625941b60a366e3fb873e61e
def socket(self): <NEW_LINE> <INDENT> if self._socket is None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) <NEW_LINE> s.connect(self._socket_path) <NEW_LINE> s.settimeout(1) <NEW_LINE> self._socket = s <NEW_LINE> <DEDENT> except OSError as ex: <NEW_LINE> <INDENT> msg = self._get_error_message(ex.errno) <NEW_LINE> err = BackendError(msg.format(self._socket_path)) <NEW_LINE> raise err from ex <NEW_LINE> <DEDENT> <DEDENT> return self._socket
Returns connected socket.
625941b626238365f5f0ec71
def get_cursinfo(self): <NEW_LINE> <INDENT> return Cursinfo(self.connection) if self.connection else None
recupere un curseur
625941b69f2886367277a699
def __init__(self, conf, session, callback, node_name, node_opts, link_name, link_opts): <NEW_LINE> <INDENT> self.callback = callback <NEW_LINE> self.receiver = None <NEW_LINE> self.session = None <NEW_LINE> if conf.qpid_topology_version == 1: <NEW_LINE> <INDENT> addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": True, "auto-delete": True, }, }, "link": { "name": link_name, "durable": True, "x-declare": { "durable": False, "auto-delete": True, "exclusive": False, }, }, } <NEW_LINE> addr_opts["node"]["x-declare"].update(node_opts) <NEW_LINE> <DEDENT> elif conf.qpid_topology_version == 2: <NEW_LINE> <INDENT> addr_opts = { "link": { "x-declare": { "auto-delete": True, }, }, } <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise_invalid_topology_version() <NEW_LINE> <DEDENT> addr_opts["link"]["x-declare"].update(link_opts) <NEW_LINE> self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) <NEW_LINE> self.reconnect(session)
Declare a queue on an amqp session. 'session' is the amqp session to use 'callback' is the callback to call when messages are received 'node_name' is the first part of the Qpid address string, before ';' 'node_opts' will be applied to the "x-declare" section of "node" in the address string. 'link_name' goes into the "name" field of the "link" in the address string 'link_opts' will be applied to the "x-declare" section of "link" in the address string.
625941b66aa9bd52df036baa
@pytest.mark.parametrize( "filename, algorithm, hash, correct", [ ("timeseries2.csv", "md5", "a5c4032e2d8f5205ca99dedcfa4cd18e", True), ( "timeseries2.csv", "sha256", "0f75b3cee325d37112687d3d10596f44e0add374f4e40a1b6687912c05e65366", True, ), ("timeseries2.h5", "md5", "0f6c65a36851c89c7c4e63ab1893554b", True), ( "timeseries2.h5", "md5", "1272702d60694f3417b910fb158e717de4fccdbf6aa10aa37f1c95cd78f8075e", False, ), ("timeseries2.csv", "md5", "A5C4032E2D8F5205CA99DEDCFA4CD18E", True), ], ) <NEW_LINE> def test_hash_timeseries2_(filename, algorithm, hash, correct): <NEW_LINE> <INDENT> fullname = os.path.join(TEST_FOLDER, "models", filename) <NEW_LINE> if correct: <NEW_LINE> <INDENT> hashes.check_hash(fullname, hash, algorithm=algorithm) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> with pytest.raises(hashes.HashMismatchError): <NEW_LINE> <INDENT> hashes.check_hash(fullname, hash, algorithm=algorithm)
Test the hash value of files in the models directory
625941b61b99ca400220a8b8
def _stixelnet_kl_loss(pred, label): <NEW_LINE> <INDENT> reg_loss = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) <NEW_LINE> tf.summary.scalar('loss/regularization', reg_loss) <NEW_LINE> label_kl = label['indices'] <NEW_LINE> label_kl_prob = label['gt_kl'] <NEW_LINE> tf.summary.histogram('kl/indices', label_kl) <NEW_LINE> tf.summary.histogram('kl/gt_kl_prob', label_kl_prob) <NEW_LINE> pred_prob = 1 / (1 + tf.exp(tf.multiply( tf.reshape(pred['seg_mask'][:, 0], [-1, 1]), label_kl - tf.reshape(pred['seg_mask'][:,1]*51, [-1, 1]) ) ) ) <NEW_LINE> tf.summary.histogram('kl/pred_alpha', tf.reshape(pred['seg_mask'][:, 0], [-1, 1])) <NEW_LINE> tf.summary.histogram('kl/pred_beta', tf.reshape(pred['seg_mask'][:, 1], [-1, 1])) <NEW_LINE> tf.summary.histogram('kl/pred_prob', pred_prob) <NEW_LINE> kld = tf.keras.losses.KLDivergence() <NEW_LINE> kl_loss = kld(label_kl_prob, pred_prob) <NEW_LINE> tf.summary.scalar('loss/kl', kl_loss) <NEW_LINE> tot_loss = kl_loss+reg_loss <NEW_LINE> tf.summary.scalar('loss/total', tot_loss) <NEW_LINE> return kl_loss+reg_loss
stixelnet Kullback-Leibler divergence loss Input: pred_logit : prediction logits (2) alpha_0 (confidence), beta_1 (transition point) label_prob : label probabilities (256)
625941b66fece00bbac2d543
def GetSPPosition(topo): <NEW_LINE> <INDENT> posSP=[] <NEW_LINE> b = topo.find('S') <NEW_LINE> if b != -1: <NEW_LINE> <INDENT> e=topo.rfind('S')+1 <NEW_LINE> posSP.append((b,e)) <NEW_LINE> <DEDENT> return posSP
Get position of Signal Peptide given a topology 2015-02-10
625941b6dd821e528d63afb4
def get_all_element_disruptions(elem, response): <NEW_LINE> <INDENT> DisruptAndElt = namedtuple('DisruptAndElt', ['disruption', 'impacted_object']) <NEW_LINE> disruption_by_obj = defaultdict(list) <NEW_LINE> all_disruptions = {d['id']: d for d in response['disruptions']} <NEW_LINE> def disruptions_filler(_, obj): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if 'links' not in obj: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> <DEDENT> except TypeError: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> real_disruptions = [all_disruptions[d['id']] for d in obj['links'] if d['type'] == 'disruption'] <NEW_LINE> for d in real_disruptions: <NEW_LINE> <INDENT> disruption_by_obj[d['id']].append(DisruptAndElt(disruption=d, impacted_object=obj)) <NEW_LINE> <DEDENT> <DEDENT> from jormungandr import utils <NEW_LINE> utils.walk_dict(elem, disruptions_filler) <NEW_LINE> return disruption_by_obj
return a map with the disruption id as key and the list of disruption + impacted object as value for a item of the response
625941b64e4d5625662d41e6
def __init__(self): <NEW_LINE> <INDENT> self.Name = None <NEW_LINE> self.Description = None <NEW_LINE> self.AuthType = None <NEW_LINE> self.DataTemplate = None <NEW_LINE> self.DataProtocol = None
:param Name: 产品名称,同一区域产品名称需唯一,支持中文、英文字母、中划线和下划线,长度不超过31个字符,中文占两个字符 :type Name: str :param Description: 产品描述 :type Description: str :param AuthType: 鉴权模式(1:动态令牌,推荐使用动态令牌) :type AuthType: int :param DataTemplate: 数据模版(json数组) :type DataTemplate: list of str :param DataProtocol: 数据协议(native表示自定义,template表示数据模板,默认值为template) :type DataProtocol: str
625941b6293b9510aa2c30a1
def p_while_statement(p): <NEW_LINE> <INDENT> p[0] = {} <NEW_LINE> p[0]['code'] = [] <NEW_LINE> p[0]['begin'] = symbol_table.newlabel() <NEW_LINE> p[0]['after'] = symbol_table.newlabel() <NEW_LINE> p[0]['code'] += ['label, ' + p[0]['begin']] <NEW_LINE> if p[3]!=None and 'code' in p[3]: <NEW_LINE> <INDENT> p[0]['code'] += p[3]['code'] <NEW_LINE> <DEDENT> p[0]['code'] += ['ifgoto, '+'==, '+p[3]['value']+", 0, "+ p[0]['after']] <NEW_LINE> if p[5]!=None and 'code' in p[5]: <NEW_LINE> <INDENT> p[0]['code'] += p[5]['code'] <NEW_LINE> <DEDENT> p[0]['code'] += ['goto, ' + p[0]['begin']] <NEW_LINE> p[0]['code'] += ['label, ' + p[0]['after']]
while_statement : WHILE LPAREN boolean_expression RPAREN embedded_statement
625941b6046cf37aa974cb53
def retrieve_serial_number(self): <NEW_LINE> <INDENT> serial_number = None <NEW_LINE> if self._serial_number not in (None, "None", ""): <NEW_LINE> <INDENT> serial_number = self._serial_number <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> status, status_msg = self.run_cmd("adb get-serialno", self._uecmd_default_timeout, force_execution=True, silent_mode=True) <NEW_LINE> if status == Global.SUCCESS: <NEW_LINE> <INDENT> expr = "(?P<serial>[0-9A-Za-z\-]*).*" <NEW_LINE> result = re.match(expr, status_msg) <NEW_LINE> if result is not None: <NEW_LINE> <INDENT> serial_number = result.group("serial") <NEW_LINE> if serial_number == "unknown": <NEW_LINE> <INDENT> serial_number = None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> serial_number = None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.get_logger().warning("Fail to retrieve serial number of the device") <NEW_LINE> serial_number = None <NEW_LINE> <DEDENT> self._serial_number = serial_number <NEW_LINE> <DEDENT> return serial_number
Retrieve the serial number of the device, returning the value read in phone catalog or bench_config files (depending of single or multi phones campaign), or in last resort with the "adb get-serialno" command. .. attention:: If the parameter is empty, we retrieve it from 'adb get-serialno' command, assuming that only one device is connected. If more than one device or emulator is present, this method returns the first serial number. :rtype: str :return: serial number of the device, or None if unknown
625941b610dbd63aa1bd29b8
@app.route('/api/foodtrucks', methods=['GET']) <NEW_LINE> def foodtrucks(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> longitude = float(request.args.get('longitude')) <NEW_LINE> latitude = float(request.args.get('latitude')) <NEW_LINE> distance = float(request.args.get('distance')) <NEW_LINE> <DEDENT> except (TypeError, ValueError): <NEW_LINE> <INDENT> abort(400) <NEW_LINE> <DEDENT> foodtrucks = [x for x in get_db().gen_within_distance(distance, latitude, longitude)] <NEW_LINE> return make_response( json.dumps(foodtrucks), 201 )
handles the '/api/foodtrucks' endpoint, return a list of foodtrucks within the distance of the point passed as GET parameters
625941b67b25080760e39264
def jsonParse(self,arr_in,arr_out): <NEW_LINE> <INDENT> frm_id = 0 <NEW_LINE> kframe_id = 0 <NEW_LINE> arr_in = sorted(arr_in, key=lambda k: k['keyframes'][kframe_id]['frame']) <NEW_LINE> for i in range(len(arr_in)): <NEW_LINE> <INDENT> if i == 0: <NEW_LINE> <INDENT> frm_id = 0 <NEW_LINE> arr_out.append([]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if arr_out[frm_id][kframe_id]['frame'] != arr_in[i]['keyframes'][kframe_id]['frame']: <NEW_LINE> <INDENT> arr_out.append([]) <NEW_LINE> frm_id +=1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> frm_id += 0 <NEW_LINE> <DEDENT> <DEDENT> obj = {} <NEW_LINE> obj['color'] = arr_in[i]['color'] <NEW_LINE> obj['type'] = arr_in[i]['type'] <NEW_LINE> obj['x'] = arr_in[i]['keyframes'][kframe_id]['x'] <NEW_LINE> obj['y'] = arr_in[i]['keyframes'][kframe_id]['y'] <NEW_LINE> obj['w'] = arr_in[i]['keyframes'][kframe_id]['w'] <NEW_LINE> obj['h'] = arr_in[i]['keyframes'][kframe_id]['h'] <NEW_LINE> obj['frame'] = arr_in[i]['keyframes'][kframe_id]['frame'] <NEW_LINE> obj['continueInterpolation'] = arr_in[i]['keyframes'][kframe_id]['continueInterpolation'] <NEW_LINE> try: <NEW_LINE> <INDENT> obj['prob'] = arr_in[i]['keyframes'][kframe_id]['prob'] <NEW_LINE> obj['bbID'] = arr_in[i]['keyframes'][kframe_id]['bbID'] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> obj['prob'] = -1 <NEW_LINE> obj['bbID'] = -1 <NEW_LINE> <DEDENT> obj['detected'] = -1 <NEW_LINE> obj['BBA'] = -1 <NEW_LINE> obj['frameID'] = frm_id <NEW_LINE> arr_out[frm_id].append(obj) <NEW_LINE> <DEDENT> print(arr_out[frm_id])
Re-structures json object list to list of frames with objects for easy metrics analysis :param arr_in: Input json array of objects to parse :param arr_out: Ouput array of frames with its objects list :return: void
625941b6cb5e8a47e48b78b8
def myAtoi(self, str: str) -> int: <NEW_LINE> <INDENT> state_map = { 'start': ['start', 'sign', 'number', 'end'], 'sign': ['end', 'end', 'number', 'end'], 'number': ['end', 'end', 'number', 'end'], 'end': ['end', 'end', 'end', 'end'], } <NEW_LINE> MAX_INT = 2 ** 31 - 1 <NEW_LINE> MIN_INT = -2 ** 31 <NEW_LINE> MAX_NUM = 2 ** 31 // 10 <NEW_LINE> state = 'start' <NEW_LINE> sign = 1 <NEW_LINE> ans = 0 <NEW_LINE> for e in str: <NEW_LINE> <INDENT> if e == ' ': <NEW_LINE> <INDENT> state = state_map[state][0] <NEW_LINE> <DEDENT> elif e == '-': <NEW_LINE> <INDENT> state = state_map[state][1] <NEW_LINE> <DEDENT> elif e == '+': <NEW_LINE> <INDENT> state = state_map[state][1] <NEW_LINE> <DEDENT> elif e.isdigit(): <NEW_LINE> <INDENT> state = state_map[state][2] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> state = state_map[state][3] <NEW_LINE> <DEDENT> if state == 'sign': <NEW_LINE> <INDENT> sign = -1 if e == '-' else 1 <NEW_LINE> <DEDENT> if state == 'number': <NEW_LINE> <INDENT> if ans > MAX_NUM or (ans == MAX_NUM and int(e) > (7 if int(e) else 8)): <NEW_LINE> <INDENT> return MAX_INT if sign == 1 else MIN_INT <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ans = ans * 10 + int(e) <NEW_LINE> <DEDENT> <DEDENT> print(ans, sign, state, e) <NEW_LINE> <DEDENT> return ans * sign
[summary] 状态机法 1. 空格 '' 2. 符号+-:sign 3. 数字:number 4. 非空格或者非+-或者非数字,标记为other 5. 开始状态start, 结束状态 end 状态流转 1. 从start开始,遇到空格、符号、数字、非空格或者非+-或者非数字,依次变成下一状态 2. 再从上述状态,遇到空格、符号、数字、非空格或者非+-或者非数字,依次变成下一状态
625941b6f7d966606f6a9e11
def hangman(secret_word): <NEW_LINE> <INDENT> letters_guessed=[] <NEW_LINE> guesses=6 <NEW_LINE> warnings=3 <NEW_LINE> length=len(secret_word) <NEW_LINE> print("Welcome to the game Hangman") <NEW_LINE> print("I am thinking of a word that is ",length," letters long") <NEW_LINE> while True: <NEW_LINE> <INDENT> if guesses==0: <NEW_LINE> <INDENT> print("You lost, the word was ",secret_word) <NEW_LINE> break <NEW_LINE> <DEDENT> if letters_guessed==list(secret_word): <NEW_LINE> <INDENT> print("you won!") <NEW_LINE> score=guesses*len(secret_word) <NEW_LINE> print("Your score is",score) <NEW_LINE> break <NEW_LINE> <DEDENT> print("You have ",guesses," guesses left.") <NEW_LINE> print("available letters: ",get_available_letters(letters_guessed)) <NEW_LINE> print("Enter your guess:") <NEW_LINE> a=input() <NEW_LINE> if a not in list(string.ascii_letters ): <NEW_LINE> <INDENT> print("This is not a valid input") <NEW_LINE> warnings-=1 <NEW_LINE> print("Now you have ",warnings,' left, at 3 warnings you will lose a guess') <NEW_LINE> if warnings==0: <NEW_LINE> <INDENT> guesses-=1 <NEW_LINE> warnings=3 <NEW_LINE> <DEDENT> print("Let's try one more time!") <NEW_LINE> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if a in string.ascii_uppercase: <NEW_LINE> <INDENT> a=a.lower() <NEW_LINE> <DEDENT> if a in letters_guessed: <NEW_LINE> <INDENT> print("You already tried that guess") <NEW_LINE> warnings-=1 <NEW_LINE> print("Now you have ",warnings,' warnings left, at 0 warnings you will lose a guess') <NEW_LINE> if warnings==0: <NEW_LINE> <INDENT> guesses-=1 <NEW_LINE> warnings=3 <NEW_LINE> <DEDENT> print("Let's try one more time!") <NEW_LINE> continue <NEW_LINE> <DEDENT> letters_guessed.append(a) <NEW_LINE> <DEDENT> if a in secret_word: <NEW_LINE> <INDENT> print("Good guess:",get_guessed_word(secret_word, letters_guessed)) <NEW_LINE> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("Oops! That letter is not in word: ",get_guessed_word(secret_word, letters_guessed)) <NEW_LINE> guesses-=1 <NEW_LINE> continue
secret_word: string, the secret word to guess. Starts up an interactive game of Hangman. * At the start of the game, let the user know how many letters the secret_word contains and how many guesses s/he starts with. * The user should start with 6 guesses * Before each round, you should display to the user how many guesses s/he has left and the letters that the user has not yet guessed. * Ask the user to supply one guess per round. Remember to make sure that the user puts in a letter! * The user should receive feedback immediately after each guess about whether their guess appears in the computer's word. * After each guess, you should display to the user the partially guessed word so far. Follows the other limitations detailed in the problem write-up.
625941b6a79ad161976cbf4e
def dQdT2dLoL(Q, dQ, T, dT): <NEW_LINE> <INDENT> T, dT = radians(asarray(T, 'd')), radians(asarray(dT, 'd')) <NEW_LINE> Q, dQ = asarray(Q, 'd'), asarray(dQ, 'd') <NEW_LINE> dQoQ = sigma2FWHM(dQ)/Q <NEW_LINE> dToT = dT/tan(T) <NEW_LINE> if (dQoQ < dToT).any(): <NEW_LINE> <INDENT> raise ValueError("Cannot infer wavelength resolution: dQ is too small or dT is too large for some data points") <NEW_LINE> <DEDENT> return sqrt(dQoQ**2 - dToT**2)
Convert a calculated Q resolution and angular divergence to a wavelength dispersion. *Q*, *dQ* |1/Ang| $Q$ and 1-\ $\sigma$ $Q$ resolution *T*, *dT* |deg| angle and FWHM angular divergence Returns FWHM $\Delta\lambda/\lambda$
625941b6d58c6744b4257a69
def climbStairs(self, n): <NEW_LINE> <INDENT> n = n + 1 <NEW_LINE> rootFive = 5 ** 0.5 <NEW_LINE> phi = (1 + rootFive) / 2 <NEW_LINE> negativePhi = (1 - rootFive) / 2 <NEW_LINE> ans = (phi**n - (negativePhi)**n) / rootFive <NEW_LINE> return int(ans)
:type n: int :rtype: int
625941b697e22403b379cda1
def hcluster(features, datas, distfcn=L2dist): <NEW_LINE> <INDENT> distances = {} <NEW_LINE> distances2 = {} <NEW_LINE> node = [ClusterLeafNode(array(f),id=i) for i,f in enumerate(features)] <NEW_LINE> while len(node)>1: <NEW_LINE> <INDENT> closest = float('Inf') <NEW_LINE> for ni,nj in combinations(node,2): <NEW_LINE> <INDENT> if (ni,nj) not in distances: <NEW_LINE> <INDENT> distances2[ni.vec[0], nj.vec[0]] = distances[ni,nj] = distfcn(ni.vec,nj.vec, datas) <NEW_LINE> <DEDENT> d = distances[ni,nj] <NEW_LINE> assert(not isnan(d)) <NEW_LINE> if d<closest: <NEW_LINE> <INDENT> closest = d <NEW_LINE> lowestpair = (ni,nj) <NEW_LINE> <DEDENT> <DEDENT> ni,nj = lowestpair <NEW_LINE> new_vec = nj.vec <NEW_LINE> new_node = ClusterNode(new_vec,left=ni,right=nj,distance=closest) <NEW_LINE> node.remove(ni) <NEW_LINE> node.remove(nj) <NEW_LINE> node.append(new_node) <NEW_LINE> <DEDENT> return node[0], distances2
Cluster the rows of features using hierarchical clustering.
625941b6be383301e01b5296
def get_updated_currency(self, currency_array, main_currency, max_delta_days=1): <NEW_LINE> <INDENT> logger = logging.getLogger(__name__) <NEW_LINE> if main_currency in currency_array: <NEW_LINE> <INDENT> currency_array.remove(main_currency) <NEW_LINE> <DEDENT> suported = ['MXN', 'USD'] <NEW_LINE> for curr in currency_array: <NEW_LINE> <INDENT> if curr in suported: <NEW_LINE> <INDENT> main_rate = self.rate_retrieve() <NEW_LINE> if main_currency == 'MXN': <NEW_LINE> <INDENT> rate = 1 / main_rate <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> rate = main_rate <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> self.updated_currency['rate'][curr] = rate <NEW_LINE> logger.debug("Rate retrieved : %s = %s %s" % (main_currency, rate, curr)) <NEW_LINE> <DEDENT> return self.updated_currency, self.log_info
implementation of abstract method of Curreny_getter_interface
625941b6f548e778e58cd384
def is_immediate(self): <NEW_LINE> <INDENT> return self.kind is not VALUE and self.kind is not VARIABLE_ARGS
Is this an immediate operand? Note that this includes both `ImmediateKind` operands *and* entity references. It is any operand that doesn't represent a value dependency.
625941b67cff6e4e8111778e
def test_that_messages_can_be_created(self): <NEW_LINE> <INDENT> message = Message(**self.msg_stub) <NEW_LINE> message.save() <NEW_LINE> self.assertIsNotNone(message.id)
Tests that messages can be created
625941b66e29344779a6241e
def get_mutant_files(self): <NEW_LINE> <INDENT> file_names = glob.glob(os.path.join(self._mutants_folder, "*.json")) <NEW_LINE> for f in file_names: <NEW_LINE> <INDENT> yield f
Iterates over the mutant .json files in the mutant folder
625941b632920d7e50b27fd4
def get_device(packettype, subtype, id_string): <NEW_LINE> <INDENT> if packettype == 0x10: <NEW_LINE> <INDENT> pkt = lowlevel.Lighting1() <NEW_LINE> pkt.parse_id(subtype, id_string) <NEW_LINE> return LightingDevice(pkt) <NEW_LINE> <DEDENT> elif packettype == 0x11: <NEW_LINE> <INDENT> pkt = lowlevel.Lighting2() <NEW_LINE> pkt.parse_id(subtype, id_string) <NEW_LINE> return LightingDevice(pkt) <NEW_LINE> <DEDENT> elif packettype == 0x12: <NEW_LINE> <INDENT> pkt = lowlevel.Lighting3() <NEW_LINE> pkt.parse_id(subtype, id_string) <NEW_LINE> return LightingDevice(pkt) <NEW_LINE> <DEDENT> elif packettype == 0x14: <NEW_LINE> <INDENT> pkt = lowlevel.Lighting5() <NEW_LINE> pkt.parse_id(subtype, id_string) <NEW_LINE> return LightingDevice(pkt) <NEW_LINE> <DEDENT> elif packettype == 0x15: <NEW_LINE> <INDENT> pkt = lowlevel.Lighting6() <NEW_LINE> pkt.parse_id(subtype, id_string) <NEW_LINE> return LightingDevice(pkt) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Unsupported packettype")
Return a device base on its identifying values
625941b6de87d2750b85fb96
def _allowed_to_proceed(self, verbose): <NEW_LINE> <INDENT> def _display(msg, paths): <NEW_LINE> <INDENT> if not paths: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> logger.info(msg) <NEW_LINE> with indent_log(): <NEW_LINE> <INDENT> for path in sorted(compact(paths)): <NEW_LINE> <INDENT> logger.info(path) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if not verbose: <NEW_LINE> <INDENT> will_remove, will_skip = compress_for_output_listing(self.paths) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> will_remove = set(self.paths) <NEW_LINE> will_skip = set() <NEW_LINE> <DEDENT> _display("Would remove:", will_remove) <NEW_LINE> _display("Would not remove (might be manually added):", will_skip) <NEW_LINE> _display("Would not remove (outside of prefix):", self._refuse) <NEW_LINE> if verbose: <NEW_LINE> <INDENT> _display("Will actually move:", compress_for_rename(self.paths)) <NEW_LINE> <DEDENT> return ask("Proceed (y/n)? ", ("y", "n")) == "y"
Display which files would be deleted and prompt for confirmation
625941b666656f66f7cbbfb2
def read_metadata(self,filename = ''): <NEW_LINE> <INDENT> if (filename != ''): <NEW_LINE> <INDENT> self.filename = filename <NEW_LINE> <DEDENT> if not( self.lock.locked() ): <NEW_LINE> <INDENT> data = [] <NEW_LINE> with open(self.filename) as f: <NEW_LINE> <INDENT> for line in f: <NEW_LINE> <INDENT> if line =='\n': <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> json_data = json.loads(line) <NEW_LINE> if ("metadata" in json_data): <NEW_LINE> <INDENT> data.append(json_data) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return(data)
reads logs file and returns the metadata
625941b63c8af77a43ae35a6
def test_connectable(): <NEW_LINE> <INDENT> assert connectable
Test to ensure connectable works as expected
625941b6d486a94d0b98df57
def write_output_file(data, file_name): <NEW_LINE> <INDENT> fileWritten = False <NEW_LINE> while fileWritten == False: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> filelocation = os.getcwd() <NEW_LINE> f = open(filelocation + file_name, 'w+') <NEW_LINE> if type(data) == str: <NEW_LINE> <INDENT> f.write(data) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> for i in data: <NEW_LINE> <INDENT> f.write(i) <NEW_LINE> f.write('\n') <NEW_LINE> <DEDENT> <DEDENT> f.close() <NEW_LINE> fileWritten = True <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print("Did not output file.") <NEW_LINE> <DEDENT> <DEDENT> print("File outputed...")
This function writes the data to a specified file location. Input: data, file_name Output: None
625941b63539df3088e2e153
def coarse_tag_str(pos_seq): <NEW_LINE> <INDENT> global tag2coarse <NEW_LINE> tags = [tag2coarse.get(tag,'O') for tag in pos_seq] <NEW_LINE> return ''.join(tags)
Convert POS sequence to our coarse system, formatted as a string.
625941b6236d856c2ad445e7
@core.command() <NEW_LINE> def db_upgrade(): <NEW_LINE> <INDENT> from laniakea.db import Database <NEW_LINE> db = Database() <NEW_LINE> db.upgrade() <NEW_LINE> print('Database upgraded.')
Upgrade database schemas to latest version.
625941b656ac1b37e6263fe9
def build_label(id_example, token_index) -> str: <NEW_LINE> <INDENT> key = id_example, token_index <NEW_LINE> if key in token2info: <NEW_LINE> <INDENT> items = token2info[key] <NEW_LINE> pieces = [] <NEW_LINE> singles = set() <NEW_LINE> for id_chain, is_single, is_open in items: <NEW_LINE> <INDENT> if is_single: <NEW_LINE> <INDENT> if id_chain in singles: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p = f'({id_chain})' <NEW_LINE> singles.add(id_chain) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if is_open: <NEW_LINE> <INDENT> p = f'({id_chain}' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p = f'{id_chain})' <NEW_LINE> <DEDENT> <DEDENT> pieces.append(p) <NEW_LINE> <DEDENT> res = '|'.join(pieces) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> res = "-" <NEW_LINE> <DEDENT> return res
token.index_abs -> множество компонент которые он {открывает, закрывает} если ничего не открывает и не закрывает, то вернуть "-"
625941b6c432627299f04a4d
def test_show_should_include_disabled_flavor_for_user(self): <NEW_LINE> <INDENT> self.context.is_admin = False <NEW_LINE> flavor = self.controller.show( self.req, self.disabled_type['flavorid'])['flavor'] <NEW_LINE> self.assertEqual(flavor['name'], self.disabled_type['name'])
Counterintuitively we should show disabled flavors to all users and not just admins. The reason is that, when a user performs a server-show request, we want to be able to display the pretty flavor name ('512 MB Instance') and not just the flavor-id even if the flavor id has been marked disabled.
625941b6711fe17d82542184
def parse_time( t_string, kd_time=None): <NEW_LINE> <INDENT> t_string = t_string.strip() <NEW_LINE> mutc, mrn, mrk = (p.match(t_string) for p in time_patterns) <NEW_LINE> if mutc: <NEW_LINE> <INDENT> ts = time.strptime(t_string.replace('.', ':')[1:], "%Y-%m-%d-%H:%M:%S") <NEW_LINE> secs = int(time.mktime(ts)) - time.timezone <NEW_LINE> return secs <NEW_LINE> <DEDENT> elif mrn: <NEW_LINE> <INDENT> days,hrs,mins,secs = (0 if mrn.group(t) is None else int(mrn.group(t)) for t in range(1,5)) <NEW_LINE> return int(time.time()) + days*3600*24 + hrs*3600 + mins*60 + secs <NEW_LINE> <DEDENT> elif mrk: <NEW_LINE> <INDENT> if kd_time is not None: <NEW_LINE> <INDENT> sign = 1 if mrk.group(1)=='+' else -1 <NEW_LINE> days,hrs,mins,secs = (0 if mrk.group(t) is None else int(mrk.group(t)) for t in range(2,6)) <NEW_LINE> return int(kd_time) + sign * (days*3600*24 + hrs*3600 + mins*60 + secs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ParsingError('Time in rel-kd format, but kd_time not given!') <NEW_LINE> <DEDENT> <DEDENT> elif t_string.isdigit(): <NEW_LINE> <INDENT> return int(t_string) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ParsingError('Unknown time format')
Parses a time string, in the following formats: raw unixtime: %s UTC: [uU]%Y-%m-%d-%H:%M:%S relative to now: [nN]\+?(((%d:)?%h:)?%m:)?%s relative to kd_time: [cCkK]?[-+](((%d:)?%h:)?%m:)?%s Using .s instead of :s is also okay. The last one depends on the kd_time argument, if it is None and time is given in that format, raises a ParsingError.
625941b6e64d504609d74649
def test_deferred_award_claim_on_login(self): <NEW_LINE> <INDENT> deferred_email = "[email protected]" <NEW_LINE> user1 = self._get_user(username="creator", email="[email protected]") <NEW_LINE> b1 = Badge.objects.create(creator=user1, title="Badge to defer") <NEW_LINE> url = reverse('badger.views.award_badge', args=(b1.slug,)) <NEW_LINE> self.client.login(username="creator", password="trustno1") <NEW_LINE> r = self.client.get(url, follow=True) <NEW_LINE> eq_(200, r.status_code) <NEW_LINE> doc = pq(r.content) <NEW_LINE> form = doc('form#award_badge') <NEW_LINE> eq_(1, form.length) <NEW_LINE> eq_(1, form.find('*[name=emails]').length) <NEW_LINE> eq_(1, form.find('input.submit,button.submit').length) <NEW_LINE> r = self.client.post(url, dict( emails=deferred_email, ), follow=False) <NEW_LINE> ok_('award' not in r['Location']) <NEW_LINE> user2 = self._get_user(username="awardee", email=deferred_email) <NEW_LINE> self.client.login(username="awardee", password="trustno1") <NEW_LINE> r = self.client.get(reverse('badger.views.detail', args=(b1.slug,)), follow=True) <NEW_LINE> ok_(b1.is_awarded_to(user2))
Ensure that a deferred award gets claimed on login.
625941b67047854f462a1216
def write_team_to_file(): <NEW_LINE> <INDENT> with open('teams.txt', 'w') as file: <NEW_LINE> <INDENT> fieldnames = ['Name', 'Height (inches)', 'Soccer Experience', 'Guardian Name(s)'] <NEW_LINE> for team in [('Sharks', Sharks), ('Dragons', Dragons), ('Raptors', Raptors)]: <NEW_LINE> <INDENT> file.write(team[0] + '\n') <NEW_LINE> for p in team[1]: <NEW_LINE> <INDENT> file.write((','.join((p['Name'], p['Soccer Experience'], p['Guardian Name(s)'])) + '\n')) <NEW_LINE> write_welcome_letter(p, team[0]) <NEW_LINE> <DEDENT> file.write('\n') <NEW_LINE> file.write('\n')
Output team composition as an .txt file
625941b61f037a2d8b946008
def save(self, file_name): <NEW_LINE> <INDENT> pass
Save the state of the crawler object. Parameters: file_name (str): The name of the file you want to save the crawler information to. Returns: None. Creates file and saves to it.
625941b6e5267d203edcdaaa
def file(request, language=None, id=None): <NEW_LINE> <INDENT> file = get_object_or_404(File, pk=id) <NEW_LINE> response = HttpResponse(file.file, content_type='application/binary') <NEW_LINE> response['Content-Disposition'] = 'attachment; filename=%s' % file.title <NEW_LINE> return response
Delivers files to the browser.
625941b63c8af77a43ae35a7
def insertarDestinatario(self, conexion, nombre, celular, mail, horaMin, horaMax): <NEW_LINE> <INDENT> c= Consultas() <NEW_LINE> cursor= conexion.cursor() <NEW_LINE> cursor.execute(c.insertDestinatario(), (nombre, celular, mail, horaMin, horaMax)) <NEW_LINE> cursor.close() <NEW_LINE> conexion.commit() <NEW_LINE> cursor= conexion.cursor() <NEW_LINE> cursor.execute(c.selectUltimoDestinatario()) <NEW_LINE> resultado= cursor.fetchone() <NEW_LINE> cursor.close() <NEW_LINE> return resultado[0]
Inserta en la base de datos un destinatario. Recibe como parámetros la conexión a la base y los atributos del destinatario a insertar. Devuelve el id del destinatario creado
625941b6fbf16365ca6f5fc4
@pytest.fixture(scope='module') <NEW_LINE> def models(config_path): <NEW_LINE> <INDENT> autograder.setup_app(config_path) <NEW_LINE> from autograder import models as m <NEW_LINE> m.db.session.remove() <NEW_LINE> m.drop_all() <NEW_LINE> m.create_all() <NEW_LINE> return m
Setup the sqlite db and initialize the models
625941b6bde94217f3682c06
def create_presentation(self, output_path=None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> audio = self.download_mp3() <NEW_LINE> <DEDENT> except client.DownloadError: <NEW_LINE> <INDENT> video = self.download_video() <NEW_LINE> audio = self._extractAudio(video) <NEW_LINE> <DEDENT> raw_slides = self.download_slides() <NEW_LINE> jpg_slides = self._convert_slides(raw_slides) <NEW_LINE> frame_pattern = self._prepare_frames(jpg_slides) <NEW_LINE> output = self._assemble(audio, frame_pattern, output=output_path) <NEW_LINE> return output
Create the presentation. The audio track is mixed with the slides. The resulting file is saved as output_path DownloadFailedException is raised if some resources cannot be fetched.
625941b63317a56b86939a73
def initialize(self, runtime): <NEW_LINE> <INDENT> from .primitives import Id <NEW_LINE> if self._runtime is not None: <NEW_LINE> <INDENT> raise IllegalState('Manager has already been initialized') <NEW_LINE> <DEDENT> self._runtime = runtime <NEW_LINE> config = runtime.get_configuration() <NEW_LINE> parameter_id = Id('parameter:loggingProviderImpl@dlkit_service') <NEW_LINE> provider_impl = config.get_value_by_parameter(parameter_id).get_string_value() <NEW_LINE> if self._proxy is None: <NEW_LINE> <INDENT> self._provider_manager = runtime.get_manager('LOGGING', provider_impl) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._provider_manager = runtime.get_proxy_manager('LOGGING', provider_impl)
OSID Manager initialize
625941b65fcc89381b1e14cd
def _project_to_output_space(self, start: np.ndarray, intermediate: np.ndarray, goal: np.ndarray, params: ClothoidParameters, points_in_clothoid_space: np.ndarray) -> np.ndarray: <NEW_LINE> <INDENT> start = start - goal <NEW_LINE> intermediate = intermediate - goal <NEW_LINE> p1 = fresnel(params.t1) <NEW_LINE> p2 = fresnel(params.t2) <NEW_LINE> output_space_points = np.stack([intermediate, start], axis=-2) <NEW_LINE> clothoid_space_points = np.stack([p1, p2], axis=-2) <NEW_LINE> M = np.linalg.solve(clothoid_space_points, output_space_points) <NEW_LINE> points_in_output_space = goal + points_in_clothoid_space @ M <NEW_LINE> return points_in_output_space
Transform points in clothoid space to output space. Args: start (np.ndarray): the starting point intermediate (np.ndarray): the intermediate sample point goal (np.ndarray): the goal point params (ClothoidParameters): the clothoid parameters points_in_clothoid_space (np.ndarray): points in clothoid space that are to be transformed Returns: np.ndarray: the transformed points in output space
625941b623e79379d52ee371
def verify_spreadsheet(filepath): <NEW_LINE> <INDENT> df = pd.read_excel(filepath, header=None, nrows=11) <NEW_LINE> assert df.iloc[0][0] == 'Category' <NEW_LINE> assert df.iloc[0][7] == 'Swimming Classification' <NEW_LINE> assert df.iloc[0][9] == 1 <NEW_LINE> assert df.iloc[1][9] == 2 <NEW_LINE> assert df.iloc[2][9] == 3 <NEW_LINE> assert df.iloc[3][9] == 4 <NEW_LINE> assert df.iloc[0][10] == 'Not Swimming' <NEW_LINE> assert df.iloc[1][10] == 'Swimming' <NEW_LINE> assert df.iloc[2][10] == 'Push Off Event' <NEW_LINE> assert df.iloc[3][10] == 'Turn event ' <NEW_LINE> assert int(df.iloc[0][15]) == 1 <NEW_LINE> assert int(df.iloc[1][15]) == 2 <NEW_LINE> assert int(df.iloc[2][15]) == 3 <NEW_LINE> assert int(df.iloc[3][15]) == 4 <NEW_LINE> assert int(df.iloc[4][15]) == 5 <NEW_LINE> assert int(df.iloc[5][15]) == 6 <NEW_LINE> assert int(df.iloc[6][15]) == 7 <NEW_LINE> assert int(df.iloc[7][15]) == 8 <NEW_LINE> assert df.iloc[0][16] == 'R Hand Entry' <NEW_LINE> assert df.iloc[4][16] == 'L Hand Entry'
Ensure that spreadsheet conforms to known standard. Raises IOError if not :param filepath: :return:
625941b6d8ef3951e3243346
@click.group( cls=HelpColorsGroup, help_headers_color="blue", help_options_color="yellow" ) <NEW_LINE> @click.version_option(version=__version__) <NEW_LINE> def cli() -> None: <NEW_LINE> <INDENT> pass
Command Line interface for working with Crimson Hexagon API.
625941b68e71fb1e9831d5b6
def test_get_logged_in_athlete_zones(self): <NEW_LINE> <INDENT> pass
Test case for get_logged_in_athlete_zones Get Zones # noqa: E501
625941b60c0af96317bb7ff2
def execute(self): <NEW_LINE> <INDENT> if not super(AddCommand, self).execute(): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> if self.text: <NEW_LINE> <INDENT> self._preprocess_input_todo() <NEW_LINE> self.todo = self.todolist.add(self.text) <NEW_LINE> self._postprocess_input_todo() <NEW_LINE> self.out(pretty_print(self.todo, [self.todolist.pp_number()])) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.error(self.usage())
Adds a todo item to the list.
625941b61d351010ab855926
def generate(self, mac): <NEW_LINE> <INDENT> nic = self.__mac2nic(mac) <NEW_LINE> pin = nic ^ 0x55AA55 <NEW_LINE> pin = pin ^ (((pin & 0x0F) << 4) + ((pin & 0x0F) << 8) + ((pin & 0x0F) << 12) + ((pin & 0x0F) << 16) + ((pin & 0x0F) << 20)) <NEW_LINE> pin = pin % int(10e6) <NEW_LINE> if pin < int(10e5): <NEW_LINE> <INDENT> pin += ((pin % 9) * int(10e5)) + int(10e5); <NEW_LINE> <DEDENT> return (pin * 10) + self.wps.checksum(pin)
Calculates the default WPS pin from the NIC portion of the MAC address. @mac - The MAC address string. Returns the calculated default WPS pin, including checksum.
625941b6dc8b845886cb533d
def create_cluster_parameter_group(ParameterGroupName=None, ParameterGroupFamily=None, Description=None, Tags=None): <NEW_LINE> <INDENT> pass
Creates an Amazon Redshift parameter group. Creating parameter groups is independent of creating clusters. You can associate a cluster with a parameter group when you create the cluster. You can also associate an existing cluster with a parameter group after the cluster is created by using ModifyCluster . Parameters in the parameter group define specific behavior that applies to the databases you create on the cluster. For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide . See also: AWS API Documentation Exceptions :example: response = client.create_cluster_parameter_group( ParameterGroupName='string', ParameterGroupFamily='string', Description='string', Tags=[ { 'Key': 'string', 'Value': 'string' }, ] ) :type ParameterGroupName: string :param ParameterGroupName: [REQUIRED] The name of the cluster parameter group. Constraints: Must be 1 to 255 alphanumeric characters or hyphens First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. Must be unique withing your AWS account. Note This value is stored as a lower-case string. :type ParameterGroupFamily: string :param ParameterGroupFamily: [REQUIRED] The Amazon Redshift engine version to which the cluster parameter group applies. The cluster engine version determines the set of parameters. To get a list of valid parameter group family names, you can call DescribeClusterParameterGroups . By default, Amazon Redshift returns a list of all the parameter groups that are owned by your AWS account, including the default parameter groups for each Amazon Redshift engine version. The parameter group family names associated with the default parameter groups provide you the valid values. For example, a valid family name is 'redshift-1.0'. :type Description: string :param Description: [REQUIRED] A description of the parameter group. :type Tags: list :param Tags: A list of tag instances. (dict) --A tag consisting of a name/value pair for a resource. Key (string) --The key, or name, for the resource tag. Value (string) --The value for the resource tag. :rtype: dict ReturnsResponse Syntax { 'ClusterParameterGroup': { 'ParameterGroupName': 'string', 'ParameterGroupFamily': 'string', 'Description': 'string', 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ] } } Response Structure (dict) -- ClusterParameterGroup (dict) -- Describes a parameter group. ParameterGroupName (string) -- The name of the cluster parameter group. ParameterGroupFamily (string) -- The name of the cluster parameter group family that this cluster parameter group is compatible with. Description (string) -- The description of the parameter group. Tags (list) -- The list of tags for the cluster parameter group. (dict) -- A tag consisting of a name/value pair for a resource. Key (string) -- The key, or name, for the resource tag. Value (string) -- The value for the resource tag. Exceptions Redshift.Client.exceptions.ClusterParameterGroupQuotaExceededFault Redshift.Client.exceptions.ClusterParameterGroupAlreadyExistsFault Redshift.Client.exceptions.TagLimitExceededFault Redshift.Client.exceptions.InvalidTagFault :return: { 'ClusterParameterGroup': { 'ParameterGroupName': 'string', 'ParameterGroupFamily': 'string', 'Description': 'string', 'Tags': [ { 'Key': 'string', 'Value': 'string' }, ] } } :returns: Redshift.Client.exceptions.ClusterParameterGroupQuotaExceededFault Redshift.Client.exceptions.ClusterParameterGroupAlreadyExistsFault Redshift.Client.exceptions.TagLimitExceededFault Redshift.Client.exceptions.InvalidTagFault
625941b6498bea3a759b98bb
def test_merge_commutativity_on_interval_rules(self): <NEW_LINE> <INDENT> rule1 = IntervalRule('Rule', ContinuousRule('Rule', True, 0), ContinuousRule('Rule', False, 4)) <NEW_LINE> rule2 = IntervalRule('Rule', ContinuousRule('Rule', True, 2), ContinuousRule('Rule', False, 6)) <NEW_LINE> new_rule1 = rule1.merge_with(rule2) <NEW_LINE> new_rule2 = rule2.merge_with(rule1) <NEW_LINE> self.assertEqual(new_rule1.left_rule.value, new_rule2.left_rule.value) <NEW_LINE> self.assertEqual(new_rule1.right_rule.value, new_rule2.right_rule.value)
Interval rule merging should be commutative.
625941b676e4537e8c351480
def enqueue(self, value): <NEW_LINE> <INDENT> self.holder.append(value)
Addes a value to the end of the queue. @param value: The value to be added to the queue. @type: C{object}
625941b6d18da76e235322da
def add_row(self, *fields, **fieldsbyname): <NEW_LINE> <INDENT> row = list(fields) <NEW_LINE> if len(fieldsbyname) > 0: <NEW_LINE> <INDENT> names = self.processor.attribute_names <NEW_LINE> if len(names) >= len(row): <NEW_LINE> <INDENT> row.extend([0] * (len(names) - len(row))) <NEW_LINE> <DEDENT> names_to_index = {} <NEW_LINE> for i,name in enumerate(names): <NEW_LINE> <INDENT> names_to_index[name] = i <NEW_LINE> <DEDENT> for name, value in fieldsbyname.items(): <NEW_LINE> <INDENT> index = names_to_index[name] <NEW_LINE> row[index] = value <NEW_LINE> <DEDENT> <DEDENT> self.processor.write_row(row)
Add a row to the report, columns can be added by name or by position in list. If columns are given by name the order does not matter and will alway follow to order given in the 'attribute_names' option specified when creating the ReportTable. Example usage:: report.add_row( particle.age, particle.temperature_at_time, particle.luminosity_at_time ) report.add_row( temperature_at_time = particle.temperature_at_time, age = particle.age, luminosity_at_time = particle.luminosity_at_time )
625941b666673b3332b91ea0
def get_next_batch(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if self.get_next_results_kwargs(): <NEW_LINE> <INDENT> self.query_result = self.twitter_api.search.tweets(**self.next_query_kwargs) <NEW_LINE> print('Getting batch {}'.format(self.current_batch)) <NEW_LINE> self.current_batch += 1 <NEW_LINE> return True <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> print('Exception in get_next_batch(): ' + str(e)) <NEW_LINE> return False <NEW_LINE> <DEDENT> return False
Queries search api until no more tweets for a particular status remain or batch_size is reached Returns: True/False (boolean) indicating if more tweets are available
625941b6cc40096d6159575d
def __init__(self, env_var_name): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.env_var_name = env_var_name
CredentialLoader that loads credential from environment variable. Args: env_var_name: Name of environment variable to load (e.g. 'MY_SECRET_PW')
625941b68e71fb1e9831d5b7
def argmax(self,matrix, dim=0): <NEW_LINE> <INDENT> _, index = torch.max(matrix, dim=dim) <NEW_LINE> return index
(0.5, 0.4, 0.3)
625941b656b00c62f0f14466
@app.route('/about') <NEW_LINE> def about(): <NEW_LINE> <INDENT> if g.user: <NEW_LINE> <INDENT> items = "eingeloggt als " + g.user['user_name'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> items = "Nicht eingeloggt" <NEW_LINE> <DEDENT> return render_template('about.htm', items=items)
Registriert die Seite 'about'
625941b6ec188e330fd5a5b1
def load_from(self, buffer: bytes, offset: int = 0, length: int = None) -> int: <NEW_LINE> <INDENT> my_offset, option_len = self.parse_option_header(buffer, offset, length) <NEW_LINE> if option_len != 4: <NEW_LINE> <INDENT> raise ValueError('INF_MAX_RT Options must have length 4') <NEW_LINE> <DEDENT> self.inf_max_rt = unpack_from('!I', buffer, offset=offset + my_offset)[0] <NEW_LINE> my_offset += 4 <NEW_LINE> return my_offset
Load the internal state of this object from the given buffer. The buffer may contain more data after the structured element is parsed. This data is ignored. :param buffer: The buffer to read data from :param offset: The offset in the buffer where to start reading :param length: The amount of data we are allowed to read from the buffer :return: The number of bytes used from the buffer
625941b626068e7796caeae1
def __init__(self, source): <NEW_LINE> <INDENT> super().__init__(source, mode="t", fmt="PDB")
Return SeqRecord objects for each chain in a PDB file. Arguments: - source - input stream opened in text mode, or a path to a file The sequences are derived from the SEQRES lines in the PDB file header, not the atoms of the 3D structure. Specifically, these PDB records are handled: DBREF, SEQADV, SEQRES, MODRES See: http://www.wwpdb.org/documentation/format23/sect3.html This gets called internally via Bio.SeqIO for the SEQRES based interpretation of the PDB file format: >>> from Bio import SeqIO >>> for record in SeqIO.parse("PDB/1A8O.pdb", "pdb-seqres"): ... print("Record id %s, chain %s" % (record.id, record.annotations["chain"])) ... print(record.dbxrefs) ... Record id 1A8O:A, chain A ['UNP:P12497', 'UNP:POL_HV1N5'] Equivalently, >>> with open("PDB/1A8O.pdb") as handle: ... for record in PdbSeqresIterator(handle): ... print("Record id %s, chain %s" % (record.id, record.annotations["chain"])) ... print(record.dbxrefs) ... Record id 1A8O:A, chain A ['UNP:P12497', 'UNP:POL_HV1N5'] Note the chain is recorded in the annotations dictionary, and any PDB DBREF lines are recorded in the database cross-references list.
625941b6d164cc6175782b57
def create_connection(db_file): <NEW_LINE> <INDENT> conn = None <NEW_LINE> try: <NEW_LINE> <INDENT> conn = sqlite3.connect(db_file) <NEW_LINE> print("SQLite version: " + sqlite3.version) <NEW_LINE> <DEDENT> except Error as e: <NEW_LINE> <INDENT> print(e) <NEW_LINE> <DEDENT> return conn
create a database connection to a SQLite database
625941b68a349b6b435e7f7e
def canonicalize_energy_names(energy_dict, canonical_keys): <NEW_LINE> <INDENT> ret = dict() <NEW_LINE> for key, energy in energy_dict.items(): <NEW_LINE> <INDENT> canonical_key = canonical_keys.get(key) <NEW_LINE> if canonical_key is None: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> elif isinstance(canonical_key, list): <NEW_LINE> <INDENT> for k in canonical_key: <NEW_LINE> <INDENT> ret[k] = energy <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> ret[canonical_key] = energy <NEW_LINE> <DEDENT> <DEDENT> return ret
Adjust the keys in energy_dict to the canonical names. Parameters ---------- energy_dict : dict engine : str Returns ------- normalized : dict
625941b694891a1f4081b8b1
def index(request): <NEW_LINE> <INDENT> return render(request, 'blog/index.html')
Homepage of the blog
625941b6d6c5a10208143e51
def Flag(modes, strict): <NEW_LINE> <INDENT> del modes.unit <NEW_LINE> if strict[1][0:3] == 'max': <NEW_LINE> <INDENT> if len(strict[1])==3: <NEW_LINE> <INDENT> return (modes<strict[0]).to_dqflag(name='') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return (modes<=strict[0]).to_dqflag(name='') <NEW_LINE> <DEDENT> <DEDENT> elif strict[1][0:3] == 'min': <NEW_LINE> <INDENT> if len(strict[1])==3: <NEW_LINE> <INDENT> return (modes>strict[0]).to_dqflag(name='') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return (modes>=strict[0]).to_dqflag(name='') <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return (modes==strict[0]).to_dqflag(name='')
Return the DQ flags for a particular Times Series, reflecting when a particular condition has been met. Arguments: modes -- Time Series to be investigated. strict -- A condition of form (mode, '{'max'/'min'/'eq'}{'eq'/''}') that can be met for the corresponding time to be in an active segment in 'modes'. Returns: flag -- A DQFlag meeting the requested requirements.
625941b62c8b7c6e89b355ce
def process_resource(self, req, resp, resource, params): <NEW_LINE> <INDENT> authentication_required = True <NEW_LINE> try: <NEW_LINE> <INDENT> if req.method in resource.no_authentication_methods: <NEW_LINE> <INDENT> authentication_required = False <NEW_LINE> <DEDENT> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> if authentication_required: <NEW_LINE> <INDENT> if req.headers.get('X-IDENTITY-STATUS') == 'Confirmed': <NEW_LINE> <INDENT> req.context = deckhand.context.RequestContext.from_environ( req.env) <NEW_LINE> <DEDENT> elif CONF.development_mode: <NEW_LINE> <INDENT> req.context = deckhand.context.get_context() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise falcon.HTTPUnauthorized() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> req.context = deckhand.context.RequestContext.from_environ(req.env) <NEW_LINE> <DEDENT> req.context.end_user = req.headers.get('X-END-USER') <NEW_LINE> req.context.context_marker = req.headers.get('X-CONTEXT-MARKER')
Handle the authentication needs of the routed request. :param req: ``falcon`` request object that will be examined for method :param resource: ``falcon`` resource class that will be examined for authentication needs by looking at the no_authentication_methods list of http methods. By default, this will assume that all requests need authentication unless noted in this array. Note that this does not bypass any authorization checks, which will fail if the user is not authenticated. :raises: falcon.HTTPUnauthorized: when value of the 'X-Identity-Status' header is not 'Confirmed' and anonymous access is disallowed.
625941b6627d3e7fe0d68c58
def StopServer(self, force=False): <NEW_LINE> <INDENT> if force or self._http_server_proc: <NEW_LINE> <INDENT> logging.info('Stopping http server') <NEW_LINE> kill_proc = subprocess.Popen(self._stop_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) <NEW_LINE> logging.info('%s\n%s' % (kill_proc.stdout.read(), kill_proc.stderr.read())) <NEW_LINE> self._http_server_proc = None <NEW_LINE> if self._cygserver_path: <NEW_LINE> <INDENT> subprocess.Popen(["taskkill.exe", "/f", "/im", "cygserver.exe"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
If we started an httpd.exe process, or if force is True, call self._stop_command (passed in on init so it can be platform-dependent). This will presumably kill it, and may also kill any other httpd.exe processes that are running.
625941b623849d37ff7b2e9c
def light_sample(self): <NEW_LINE> <INDENT> raise NotImplementedError()
Return code, props tuple that is used to create light sample shader!
625941b615baa723493c3d7c
def _check_with_importpath(self, field, value): <NEW_LINE> <INDENT> self.logger.debug('Validating importpath: {}'.format(value)) <NEW_LINE> parts = [s.isidentifier() for s in value.split('.')] <NEW_LINE> if not all(parts): <NEW_LINE> <INDENT> self._error(field, "Must be a python import path")
Validates if the value is a usable import path for an entity class Valid examples are: * pkg1.pkg2.mod1.class * class_name Invalid examples: * .class_name Args: value: A string Returns: True if valid
625941b6cad5886f8bd26dec
def getDesignShearStrength(self, doubleShear= False): <NEW_LINE> <INDENT> retval= self.getNumberOfBolts()*self.bolt.getDesignShearStrength() <NEW_LINE> if(doubleShear): <NEW_LINE> <INDENT> retval*=2.0 <NEW_LINE> <DEDENT> return retval
Return the shear strength of the bolt group. :param doubleShear: true if double shear action.
625941b61b99ca400220a8ba
def _ensure_db(self): <NEW_LINE> <INDENT> if not self._db: <NEW_LINE> <INDENT> if not self.dbfilename: <NEW_LINE> <INDENT> self.dbfilename=":memory:" <NEW_LINE> <DEDENT> self._db=sqlite3.Connection(self.dbfilename, detect_types=sqlite3.PARSE_COLNAMES | sqlite3.PARSE_DECLTYPES) <NEW_LINE> <DEDENT> return self._db
The database isn't opened until first use. This function ensures it is now open.
625941b63617ad0b5ed67d09
def set_attr(self, key, value): <NEW_LINE> <INDENT> self.__dict__[key] = value
This method sets attribute value of the LeFilter object.
625941b6f9cc0f698b140410
def display_info(data): <NEW_LINE> <INDENT> for div in DIVISIONS: <NEW_LINE> <INDENT> for sex in GENDERS: <NEW_LINE> <INDENT> temp = data[(data[COL_GENDER] == sex) & (data[COL_DIVISION] == div)].index <NEW_LINE> print('Div: ' + sex + ' ' + div + ' {}'.format(len(temp))) <NEW_LINE> <DEDENT> <DEDENT> print(data.groupby('School').size().head())
Displays summary information about the registered archers.
625941b692d797404e303f95
def trilaterate(self, s2, s3): <NEW_LINE> <INDENT> P1, P2, P3 = map(lambda x: np.array(x.p.ecef()), [self, s2, s3]) <NEW_LINE> DistA, DistB, DistC = map(lambda x: x.r, [self, s2, s3]) <NEW_LINE> ex = (P2 - P1) / (np.linalg.norm(P2 - P1)) <NEW_LINE> i = np.dot(ex, P3 - P1) <NEW_LINE> ey = (P3 - P1 - i * ex) / (np.linalg.norm(P3 - P1 - i * ex)) <NEW_LINE> ez = np.cross(ex, ey) <NEW_LINE> d = np.linalg.norm(P2 - P1) <NEW_LINE> j = np.dot(ey, P3 - P1) <NEW_LINE> try: <NEW_LINE> <INDENT> x = (pow(DistA, 2) - pow(DistB, 2) + pow(d, 2)) / (2 * d) <NEW_LINE> y = ((pow(DistA, 2) - pow(DistC, 2) + pow(i, 2) + pow(j, 2)) / (2 * j)) - ((i / j) * x) <NEW_LINE> dz = pow(DistA, 2) - pow(x, 2) - pow(y, 2) <NEW_LINE> z = np.sqrt(dz) <NEW_LINE> <DEDENT> except TypeError as type_err: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> xyz = P1 + x * ex + y * ey + z * ez <NEW_LINE> lat = degrees(asin(xyz[2] / earthR)) <NEW_LINE> lon = degrees(atan2(xyz[1], xyz[0])) <NEW_LINE> if isnan(lat) or isnan(lon): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return Point(lat, lon)
http://en.wikipedia.org/wiki/Trilateration assuming elevation = 0 length unit : m
625941b671ff763f4b549498
def compute_beacon_values(t, num_inner_rows=7): <NEW_LINE> <INDENT> s1, s2 = range(t.shape[1]/2), range(t.shape[1]/2, t.shape[1]) <NEW_LINE> indices = [j for i in zip(s1, s2) for j in i] <NEW_LINE> prep_t = np.column_stack((t[:, i] for i in indices)) <NEW_LINE> for i in range(1, num_inner_rows): <NEW_LINE> <INDENT> prep_t[t.shape[0]-i] = np.roll(prep_t[t.shape[0]-i], -i) <NEW_LINE> <DEDENT> beacon_values = [] <NEW_LINE> for i in range(0, t.shape[1], 2): <NEW_LINE> <INDENT> temp = prep_t[t.shape[0]-num_inner_rows:, i:i+2] <NEW_LINE> temp = ''.join([str(bit) for smh in temp for bit in smh]) <NEW_LINE> beacon_values.append(temp) <NEW_LINE> <DEDENT> return beacon_values
Compute beacon values for a given iris template. These values help to uniquely position the iris in each beacon space. This part of the algorithm may be called locality sensitive hashing. :param t: np.ndarray Iris template. :param num_inner_rows: int In each block, beacon value is obtained by concatenating this many least significant bits of both bytes. The selected bits correspond to the iris region near the pupil. Altogether, there are 2^m unique beacon values possible in each beacon space, where m = 2 * num_inner_rows. :return: beacon_values: list of str
625941b67b25080760e39265
def login(self, user, passwd): <NEW_LINE> <INDENT> import http.client <NEW_LINE> http.client._MAXHEADERS = 200 <NEW_LINE> session = requests.Session() <NEW_LINE> session.cookies.update({'sessionid': '', 'mid': '', 'ig_pr': '1', 'ig_vw': '1920', 'csrftoken': '', 's_network': '', 'ds_user_id': ''}) <NEW_LINE> session.headers.update(self._default_http_header()) <NEW_LINE> session.headers.update({'X-CSRFToken': self.get_json('', {})['config']['csrf_token']}) <NEW_LINE> self._sleep() <NEW_LINE> login = session.post('https://www.instagram.com/accounts/login/ajax/', data={'password': passwd, 'username': user}, allow_redirects=True) <NEW_LINE> if login.status_code != 200: <NEW_LINE> <INDENT> raise ConnectionException("Login error: {} {}".format(login.status_code, login.reason)) <NEW_LINE> <DEDENT> resp_json = login.json() <NEW_LINE> if resp_json['status'] != 'ok': <NEW_LINE> <INDENT> if 'message' in resp_json: <NEW_LINE> <INDENT> raise ConnectionException("Login error: \"{}\" status, message \"{}\".".format(resp_json['status'], resp_json['message'])) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ConnectionException("Login error: \"{}\" status.".format(resp_json['status'])) <NEW_LINE> <DEDENT> <DEDENT> if not resp_json['authenticated']: <NEW_LINE> <INDENT> if resp_json['user']: <NEW_LINE> <INDENT> raise BadCredentialsException('Login error: Wrong password.') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise InvalidArgumentException('Login error: User {} does not exist.'.format(user)) <NEW_LINE> <DEDENT> <DEDENT> session.headers.update({'X-CSRFToken': login.cookies['csrftoken']}) <NEW_LINE> self._session = session <NEW_LINE> self.username = user
Not meant to be used directly, use :meth:`Instaloader.login`. :raises InvalidArgumentException: If the provided username does not exist. :raises BadCredentialsException: If the provided password is wrong. :raises ConnectionException: If connection to Instagram failed.
625941b621bff66bcd684760
def kill(self): <NEW_LINE> <INDENT> return self._subprocess.kill()
Terminate the process.
625941b61f5feb6acb0c4960
def debug_m4a(self, action: str, track_id: int): <NEW_LINE> <INDENT> self.PROCESSOR_LOGGER.log(DEBUG, f' {action:^24} | Track: {track_id:2}')
Log M4A processor information.
625941b601c39578d7e74c4e
def test_createCallSetDict_neg(self): <NEW_LINE> <INDENT> conf = self.tmpdir.join("vcf5_import.config") <NEW_LINE> conf.write(json.dumps(self.config)) <NEW_LINE> vcfile = self.tmpdir.join("test5.vcf") <NEW_LINE> test1_header = list(test_header) <NEW_LINE> test1_header.append(sampleN) <NEW_LINE> test1_header.append(sampleT) <NEW_LINE> test1_header.append('extraSample') <NEW_LINE> with open(str(vcfile), 'w') as inVCF: <NEW_LINE> <INDENT> inVCF.write("{0}\n".format(self.header)) <NEW_LINE> inVCF.write("{0}\n".format("\t".join(test1_header))) <NEW_LINE> inVCF.write("{0}\n".format("\t".join(test_data))) <NEW_LINE> <DEDENT> with pytest.raises(Exception) as exec_info, VCF(str(vcfile), str(conf)) as vc: <NEW_LINE> <INDENT> vc.createCallSetDict() <NEW_LINE> <DEDENT> assert "Currently only single" in str(exec_info.value) <NEW_LINE> conf = self.tmpdir.join("vcf7_import.config") <NEW_LINE> this_conf = dict(self.config) <NEW_LINE> this_conf['sample_name'] = {'derive_from': 'tag', 'split_by': 'SampleName'} <NEW_LINE> conf.write(json.dumps(this_conf)) <NEW_LINE> vcfile = self.tmpdir.join("test7.vcf") <NEW_LINE> test3_header = list(test_header) <NEW_LINE> test3_header.append('NORMAL') <NEW_LINE> test3_header.append('TUMOUR') <NEW_LINE> test3_header.append('EXTRA') <NEW_LINE> with open(str(vcfile), 'w') as inVCF: <NEW_LINE> <INDENT> inVCF.write("{0}\n".format(self.header)) <NEW_LINE> inVCF.write("{0}\n".format( "\n".join([normal_tag, tumor_tag, extra_tag]))) <NEW_LINE> inVCF.write("{0}\n".format("\t".join(test3_header))) <NEW_LINE> inVCF.write("{0}\n".format("\t".join(test_data))) <NEW_LINE> <DEDENT> with pytest.raises(Exception) as exec_info, VCF(str(vcfile), str(conf)) as vc: <NEW_LINE> <INDENT> vc.createCallSetDict() <NEW_LINE> <DEDENT> assert "Currently only single" in str(exec_info.value)
i) TN vcf, ii) callset_loc in config set, iii) TN vcf with sample tag
625941b666656f66f7cbbfb4
def insert(self, data): <NEW_LINE> <INDENT> node = Node(data) <NEW_LINE> if self.root_node is None: <NEW_LINE> <INDENT> self.root_node = node <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> current = self.root_node <NEW_LINE> parent = None <NEW_LINE> while True: <NEW_LINE> <INDENT> parent = current <NEW_LINE> if node.data < current.data: <NEW_LINE> <INDENT> current = current.left_child <NEW_LINE> if current is None: <NEW_LINE> <INDENT> parent.left_child = node <NEW_LINE> return <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> current = current.right_child <NEW_LINE> if current is None: <NEW_LINE> <INDENT> parent.right_child = node <NEW_LINE> return
Insert the number into our tree :param data: int value we want to insert :return:
625941b69f2886367277a69c
@pytest.fixture(scope="function") <NEW_LINE> def pillowclasses(pillowmocks): <NEW_LINE> <INDENT> return (MockPILImage(), MockExif())
Mocks out PIL classes and returns stub classes.
625941b6925a0f43d2549c7e
def index_new_docs( new_docs_to_index: List[DocumentToIndex], new_documents: Documents, lexicon: Lexicon, index_dir: str, data_dir: str, chunk_size: Optional[int] ): <NEW_LINE> <INDENT> assert len(new_docs_to_index) == len(new_documents) <NEW_LINE> base_doc_id = min(d.id for d in new_documents) <NEW_LINE> max_doc_id = max(d.id for d in new_documents) <NEW_LINE> index_and_doc_paths = defaultdict(list) <NEW_LINE> for doc_to_index, doc in zip(new_docs_to_index, new_documents): <NEW_LINE> <INDENT> assert doc_to_index.name == doc.name <NEW_LINE> if chunk_size is None: <NEW_LINE> <INDENT> doc_index_out_path = os.path.join( index_dir, '{:07d}-{:07d}.bin'.format( base_doc_id, base_doc_id + len(new_docs_to_index))) <NEW_LINE> <DEDENT> elif chunk_size == 1: <NEW_LINE> <INDENT> doc_index_out_path = os.path.join( index_dir, '{:07d}.bin'.format(doc.id)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> chunk_idx = int((doc.id - base_doc_id) / chunk_size) * chunk_size <NEW_LINE> doc_index_out_path = os.path.join( index_dir, '{:07d}-{:07d}.bin'.format( base_doc_id + chunk_idx, min(base_doc_id + chunk_idx + chunk_size, max_doc_id))) <NEW_LINE> <DEDENT> doc_data_out_path = os.path.join( data_dir, '{}.bin'.format(doc.id)) <NEW_LINE> index_and_doc_paths[doc_index_out_path].append( (doc.id, doc_to_index.path, doc_data_out_path)) <NEW_LINE> <DEDENT> index_documents(list(index_and_doc_paths.items()), lexicon)
Builds inverted indexes and reencode documents in binary
625941b63cc13d1c6d3c718f
def test_no_diff(self): <NEW_LINE> <INDENT> self._add_plugin(self.jigconfig, 'plugin01') <NEW_LINE> set_jigconfig(self.gitrepodir, config=self.jigconfig) <NEW_LINE> self.commit( self.gitrepodir, name='a.txt', content='a') <NEW_LINE> self.runner.results(self.gitrepodir) <NEW_LINE> self.assertEqual( 'No staged changes in the repository, skipping jig.\n', self.output)
If .jig is ran on a repository without any changes.
625941b64f6381625f114851
def confuse_matrix(gold, pred): <NEW_LINE> <INDENT> TP, FP, TN, FN = 0, 0, 0, 0 <NEW_LINE> for item in range(gold.shape[0]): <NEW_LINE> <INDENT> for cls in range(gold.shape[1]): <NEW_LINE> <INDENT> if gold[item][cls] == pred[item][cls]: <NEW_LINE> <INDENT> if gold[item][cls] == 1: <NEW_LINE> <INDENT> TP += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> TN += 1 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if gold[item][cls] == 1: <NEW_LINE> <INDENT> FN += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> FP += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return TP, FP, TN, FN
:param gold: gold labels :param pred: predicted labels :return: confuse_matrix elements (TP, FP, TN, FN)
625941b63346ee7daa2b2b73
def p_unary(t): <NEW_LINE> <INDENT> t[0] = ast.UnaryOp(t[1], t[2])
unary : unary_op expression
625941b6d7e4931a7ee9dd26
def add_point(self,pnt,value): <NEW_LINE> <INDENT> i = len(self.X) <NEW_LINE> self.X = array_append(self.X,pnt) <NEW_LINE> self.F = array_append(self.F,value) <NEW_LINE> self._tri = None <NEW_LINE> self._nn_interper = None <NEW_LINE> self._lin_interper = None <NEW_LINE> if self.index is not None: <NEW_LINE> <INDENT> if self.index_type == 'stree': <NEW_LINE> <INDENT> print("Stree doesn't know how to add points") <NEW_LINE> self.index = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("Adding new point %d to index at "%i,self.X[i]) <NEW_LINE> self.index.insert(i, self.X[i,xxyy] ) <NEW_LINE> <DEDENT> <DEDENT> self.created_point(i) <NEW_LINE> return i
Insert a new point into the field, clearing any invalidated data and returning the index of the new point
625941b6498bea3a759b98bc
def get_fill_color(): <NEW_LINE> <INDENT> return random.choice(rainbow_colors) if fill_color[0] == -1 else fill_color
@return: tuple (r, g, b, a)
625941b6adb09d7d5db6c59e
def _ticks(times): <NEW_LINE> <INDENT> tick_values = np.arange(times[0], times[-1]+day_in_sec, tick_period_days*day_in_sec) <NEW_LINE> tick_labels = [Time(t, format='unix', out_subfmt='date').iso for t in tick_values] <NEW_LINE> return tick_values, tick_labels
Internal common function to generate tick values and labels, given a vector of dates in seconds since epoch.
625941b615fb5d323cde0913
def cluster(self, testframe): <NEW_LINE> <INDENT> code_count = len(self.testing_df.index) <NEW_LINE> cluster = KMeans(12) <NEW_LINE> features = self.training_df.ix[:, :-1] <NEW_LINE> classes = self.training_df.ix[:, -1] <NEW_LINE> try: <NEW_LINE> <INDENT> classifier = cluster.fit(features) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print(features) <NEW_LINE> print(classes) <NEW_LINE> <DEDENT> prediction = classifier.predict(testframe) <NEW_LINE> print(prediction) <NEW_LINE> return prediction
Clustering is unsupervised learning so what if we cluster the codes and then run each cluster through random forest or another supervised algorithm in order to actually identify each.
625941b663f4b57ef0000f2d
def keyPressEvent(self, event): <NEW_LINE> <INDENT> if event.key() == PyQt5.QtCore.Qt.Key_Escape: <NEW_LINE> <INDENT> self.close()
Closes the application when the escape key is pressed.
625941b6e64d504609d7464b
def g(n): <NEW_LINE> <INDENT> if n in (1, 2, 3): <NEW_LINE> <INDENT> return n <NEW_LINE> <DEDENT> return g(n-1) + 2*g(n-2) + 3*g(n-3)
Return the value of G(n), computed recursively. >>> g(1) 1 >>> g(2) 2 >>> g(3) 3 >>> g(4) 10 >>> g(5) 22 >>> from construct_check import check >>> check(HW_SOURCE_FILE, 'g', ['While', 'For']) True
625941b6a4f1c619b28afe4d
def teleopInit(self): <NEW_LINE> <INDENT> pass
Called on first launch of teleop mode
625941b663f4b57ef0000f2e
def updateCurrentTime(self, p_int): <NEW_LINE> <INDENT> pass
QVariantAnimation.updateCurrentTime(int)
625941b6462c4b4f79d1d4db
def _get(endpoint, params={}): <NEW_LINE> <INDENT> resp = session.get('{}/{}'.format(URL, endpoint)) <NEW_LINE> code = resp.status_code <NEW_LINE> if code not in (200,): <NEW_LINE> <INDENT> raise RuntimeError('Invalid REST API response ({})'.format(code)) <NEW_LINE> <DEDENT> return resp.json()
Make GET request and return JSON dictionary. :param endpoint: Endpoint to make request to. :param params: Dictionary of URL parameters. :return: JSON dictionary.
625941b64a966d76dd550e16
def create_new_share_in_db(self): <NEW_LINE> <INDENT> isin = self.entry_isin.get() <NEW_LINE> comment = self.comment <NEW_LINE> category_id = self.df_categories.ID[self.df_categories.category_name == self.combobox_category.get()].iloc[0] <NEW_LINE> currency_id = self.df_currencies.ID[self.df_currencies.currency_name == self.combobox_currency.get()].iloc[0] <NEW_LINE> if isin == "": <NEW_LINE> <INDENT> messagebox.showinfo("Missing ISIN", "Please insert an ISIN!") <NEW_LINE> <DEDENT> elif is_isin_valid(isin): <NEW_LINE> <INDENT> list_isin = DB_Communication.get_all_isin(self.db_connection.cursor()) <NEW_LINE> if isin in list_isin: <NEW_LINE> <INDENT> messagebox.showerror("Duplicated ISIN", "The given ISIN does already exist.") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dict_share_values = {"isin": isin, "category_id": category_id, "currency_id": currency_id, "comment": comment, "company_id": self.new_company_id} <NEW_LINE> error = DB_Communication.insert_share(self.db_connection, dict_share_values) <NEW_LINE> if error is None: <NEW_LINE> <INDENT> self.update_frame(shares_disabled=True, delete_entries=True) <NEW_LINE> messagebox.showinfo("Success!", "The configured has been successfully created in the database.") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> messagebox.showerror("DB Error", "An error has occured. Please try again." "In case the error remains, please restart the application") <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif is_isin_valid(isin) is not None: <NEW_LINE> <INDENT> messagebox.showerror("Invalid ISIN", "The entered ISIN is well-formated but invalid. \n" "Please change it.") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> messagebox.showerror("Format Error ISIN", "The entered ISIN does not meet the expected format. \n" "Please try again.")
collects all inputs required to create a company entry and invokes corresponding method :return: None
625941b6e5267d203edcdaac
def _setup_target_network_updates(self): <NEW_LINE> <INDENT> init_updates, soft_updates = get_target_updates(tf_util.get_trainable_vars('model/'), tf_util.get_trainable_vars('target/'), self.tau, self.verbose) <NEW_LINE> self.target_init_updates = init_updates <NEW_LINE> self.target_soft_updates = soft_updates
set the target update operations
625941b6a8370b77170526ac