code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def obfn_gvar(self): <NEW_LINE> <INDENT> return self.Y if self.opt['gEvalY'] else np.mean(self.X, axis=-1)
Variable to be evaluated in computing :math:`g(\cdot)`, depending on the ``gEvalY`` option value.
625941b97b180e01f3dc4676
def custom_score_2(game, player): <NEW_LINE> <INDENT> opponent_player = game.get_opponent(player) <NEW_LINE> if game.is_winner(player): <NEW_LINE> <INDENT> return float("inf") <NEW_LINE> <DEDENT> if game.is_winner(opponent_player): <NEW_LINE> <INDENT> return float("-inf") <NEW_LINE> <DEDENT> score = .0 <NEW_LINE> total_spaces = game.width * game.height <NEW_LINE> remaining_spaces = len(game.get_blank_spaces()) <NEW_LINE> coefficient = float(total_spaces - remaining_spaces) / float(total_spaces) <NEW_LINE> my_moves = game.get_legal_moves(player) <NEW_LINE> opponent_moves = game.get_legal_moves(opponent_player) <NEW_LINE> for move in my_moves: <NEW_LINE> <INDENT> isNearWall = 1 if (move[0] == 0 or move[0] == game.width - 1 or move[1] == 0 or move[1] == game.height - 1) else 0 <NEW_LINE> score += 1 - coefficient * isNearWall <NEW_LINE> <DEDENT> for move in opponent_moves: <NEW_LINE> <INDENT> isNearWall = 1 if (move[0] == 0 or move[0] == game.width - 1 or move[1] == 0 or move[1] == game.height - 1) else 0 <NEW_LINE> score -= 1 - coefficient * isNearWall <NEW_LINE> <DEDENT> return score
Calculate the heuristic value of a game state from the point of view of the given player. Note: this function should be called from within a Player instance as `self.score()` -- you should not need to call this function directly. Parameters ---------- game : `isolation.Board` An instance of `isolation.Board` encoding the current state of the game (e.g., player locations and blocked cells). player : object A player instance in the current game (i.e., an object corresponding to one of the player objects `game.__player_1__` or `game.__player_2__`.) Returns ------- float The heuristic value of the current game state to the specified player.
625941b944b2445a33931f10
def __init__(self, temboo_session): <NEW_LINE> <INDENT> super(ListIndustryCodes, self).__init__(temboo_session, '/Library/CorpWatch/Lists/ListIndustryCodes')
Create a new instance of the ListIndustryCodes Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied.
625941b992d797404e303ffa
def getclaims(self): <NEW_LINE> <INDENT> raise NotImplementedError("%s needs to implement .getclaims() -> []" % self.__class__.__name__)
Returns a list of arbitrary strings describing things claimed by this helper.
625941b923849d37ff7b2f02
def __init__(self, oElement, Checksum=None): <NEW_LINE> <INDENT> if not ElementTree.iselement(oElement): <NEW_LINE> <INDENT> raise TypeError("Invalid type '%s' passed to constructor." % type(XML)) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self.ROOT = self.MakeNode(oElement) <NEW_LINE> <DEDENT> except _SpecError as e: <NEW_LINE> <INDENT> if Debug: raise <NEW_LINE> e.InsertStack(oElement) <NEW_LINE> raise SpecError(e)
Either pass a valid xml.etree.ElementTree.Element that represents the <Node> tag, or a string containing valid <Node> xml (none other).
625941b9377c676e9127201b
def word_split(text): <NEW_LINE> <INDENT> word_list = [] <NEW_LINE> stop_p = p + "~·!@#¥%……&*()——=+-{}【】:;“”‘’《》,。?、|、" <NEW_LINE> seg_list = [i for i in jieba.cut(text, cut_all=False) if (i != ' ' and i not in stop_p)] <NEW_LINE> table = {} <NEW_LINE> for i, c in enumerate(seg_list): <NEW_LINE> <INDENT> if c in table: <NEW_LINE> <INDENT> table[c] += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> table.setdefault(c, 1) <NEW_LINE> <DEDENT> value = ([i for i, word in enumerate(seg_list) if word == c][table[c] - 1], c.lower()) <NEW_LINE> word_list.append(value) <NEW_LINE> <DEDENT> return word_list, table
Find the word position in a document. :param text: a document :return: [(position, word) ...]
625941b930bbd722463cbc33
def _parse_parameters(val_type, val): <NEW_LINE> <INDENT> if val_type == "logical": <NEW_LINE> <INDENT> return val == "T" <NEW_LINE> <DEDENT> if val_type == "int": <NEW_LINE> <INDENT> return int(val) <NEW_LINE> <DEDENT> if val_type == "string": <NEW_LINE> <INDENT> return val.strip() <NEW_LINE> <DEDENT> return float(val)
Helper function to convert a Vasprun parameter into the proper type. Boolean, int and float types are converted. Args: val_type: Value type parsed from vasprun.xml. val: Actual string value parsed for vasprun.xml.
625941b9f8510a7c17cf9575
def create_compact_constraint(self): <NEW_LINE> <INDENT> self.delete([self.bg_label]) <NEW_LINE> if self.is_run_clicked: <NEW_LINE> <INDENT> self.insert_constraint(self.last_viewed_schedule)
Creates a more compact graphical schedule respresentation of the valid schedules
625941b9f9cc0f698b140476
def testLbPersistenceCookieTime(self): <NEW_LINE> <INDENT> pass
Test LbPersistenceCookieTime
625941b945492302aab5e131
def addTwoNumbers(self, l1, l2): <NEW_LINE> <INDENT> c = 0 <NEW_LINE> dummyHead = ListNode(0) <NEW_LINE> curr = dummyHead <NEW_LINE> while l1 != None or l2 != None: <NEW_LINE> <INDENT> x = l1.val if l1 != None else 0 <NEW_LINE> y = l2.val if l2 != None else 0 <NEW_LINE> sum = x + y + c <NEW_LINE> curr.next = ListNode(sum % 10) <NEW_LINE> curr=curr.next <NEW_LINE> if l1!=None: <NEW_LINE> <INDENT> l1=l1.next <NEW_LINE> <DEDENT> if l2!=None: <NEW_LINE> <INDENT> l2=l2.next <NEW_LINE> <DEDENT> c=sum/10 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if c!=0: <NEW_LINE> <INDENT> curr.next=ListNode(c) <NEW_LINE> <DEDENT> <DEDENT> return dummyHead.next
:type l1: ListNode :type l2: ListNode :rtype: ListNode
625941b9d7e4931a7ee9dd8c
def spheroidalCF2(r, psize, axrat): <NEW_LINE> <INDENT> pelpt = 1.0 * axrat <NEW_LINE> if psize <= 0 or pelpt <= 0: <NEW_LINE> <INDENT> return numpy.zeros_like(r) <NEW_LINE> <DEDENT> v = pelpt <NEW_LINE> d = 1.0 * psize <NEW_LINE> d2 = d*d <NEW_LINE> v2 = v*v <NEW_LINE> if v == 1: <NEW_LINE> <INDENT> return sphericalCF(r, psize) <NEW_LINE> <DEDENT> rx = r <NEW_LINE> if v < 1: <NEW_LINE> <INDENT> r = rx[rx <= v*psize] <NEW_LINE> r2 = r*r <NEW_LINE> f1 = 1 - 3*r/(4*d*v)*(1-r2/(4*d2)*(1+2.0/(3*v2))) - 3*r/(4*d)*(1-r2/(4*d2))*v/sqrt(1-v2)*atanh(sqrt(1-v2)) <NEW_LINE> r = rx[numpy.logical_and(rx > v*psize, rx <= psize)] <NEW_LINE> r2 = r*r <NEW_LINE> f2 = (3*d/(8*r)*(1+r2/(2*d2))*sqrt(1-r2/d2) - 3*r/(4*d)*(1-r2/(4*d2))*atanh(sqrt(1-r2/d2)) ) * v/sqrt(1-v2) <NEW_LINE> r = rx[rx > psize] <NEW_LINE> f3 = numpy.zeros_like(r) <NEW_LINE> f = numpy.concatenate((f1,f2,f3)) <NEW_LINE> <DEDENT> elif v > 1: <NEW_LINE> <INDENT> r = rx[rx <= psize] <NEW_LINE> r2 = r*r <NEW_LINE> f1 = 1 - 3*r/(4*d*v)*(1-r2/(4*d2)*(1+2.0/(3*v2))) - 3*r/(4*d)*(1-r2/(4*d2))*v/sqrt(v2-1)*atan(sqrt(v2-1)) <NEW_LINE> r = rx[numpy.logical_and(rx > psize, rx <= v*psize)] <NEW_LINE> r2 = r*r <NEW_LINE> f2 = 1 - 3*r/(4*d*v)*(1-r2/(4*d2)*(1+2.0/(3*v2))) - 3.0/8*(1+r2/(2*d2))*sqrt(1-d2/r2)*v/sqrt(v2-1) - 3*r/(4*d)*(1-r2/(4*d2))*v/sqrt(v2-1) * (atan(sqrt(v2-1)) - atan(sqrt(r2/d2-1))) <NEW_LINE> r = rx[rx > v*psize] <NEW_LINE> f3 = numpy.zeros_like(r) <NEW_LINE> f = numpy.concatenate((f1,f2,f3)) <NEW_LINE> <DEDENT> return f
Spheroidal nanoparticle characteristic function. Form factor for ellipsoid with radii (psize/2, psize/2, axrat*psize/2) r -- distance of interaction psize -- The equatorial diameter axrat -- The ratio of axis lengths From Lei et al., Phys. Rev. B, 80, 024118 (2009)
625941b926068e7796caeb49
def __str__(self): <NEW_LINE> <INDENT> if isinstance(self._symbol, str): <NEW_LINE> <INDENT> return "%s" % self._symbol <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return "%s" % repr(self._symbol)
Return a string representation for this ``Nonterminal``. :rtype: str
625941b9cdde0d52a9e52ea0
def _get_assistants_snippets(path, name): <NEW_LINE> <INDENT> result = [] <NEW_LINE> subdirs = {'assistants': 2, 'snippets': 1} <NEW_LINE> for loc in subdirs: <NEW_LINE> <INDENT> for root, dirs, files in os.walk(os.path.join(path, loc)): <NEW_LINE> <INDENT> for filename in [utils.strip_prefix(os.path.join(root, f), path) for f in files]: <NEW_LINE> <INDENT> stripped = os.path.sep.join(filename.split(os.path.sep)[subdirs[loc]:]) <NEW_LINE> if stripped.startswith(os.path.join(name, '')) or stripped == name + '.yaml': <NEW_LINE> <INDENT> result.append(os.path.join('fakeroot', filename)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return result
Get Assistants and Snippets for a given DAP name on a given path
625941b98c3a87329515822f
def verify_mapping(self, key, object): <NEW_LINE> <INDENT> assert self.verify_key(key), "precondition: key must be well-formed." + " -- " + "key: %s" % hr(key) <NEW_LINE> raise NotImplementedError <NEW_LINE> pass
@returns true if and only if `object' is a valid result for `key' @precondition key must be well-formed.: self.verify_key(key): "key: %s" % hr(key) @noblock This method may not block, either by waiting for network traffic, by waiting for a lock, or by sleeping.
625941b9460517430c393fff
def player(board): <NEW_LINE> <INDENT> xcount = 0 <NEW_LINE> ocount = 0 <NEW_LINE> for i in range (3): <NEW_LINE> <INDENT> for j in range (3): <NEW_LINE> <INDENT> if board[i][j] == "X": <NEW_LINE> <INDENT> xcount += 1 <NEW_LINE> <DEDENT> if board[i][j] == "O": <NEW_LINE> <INDENT> ocount += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if xcount > ocount: <NEW_LINE> <INDENT> turn = "O" <NEW_LINE> <DEDENT> elif xcount <= ocount: <NEW_LINE> <INDENT> turn = "X" <NEW_LINE> <DEDENT> return turn
Returns player who has the next turn on a board.
625941b90a366e3fb873e688
def buscaCadenaTuplas(cadena,listaTuplas): <NEW_LINE> <INDENT> busqueda=[] <NEW_LINE> for n in range(len(listaTuplas)): <NEW_LINE> <INDENT> if cadena in listaTuplas[n][0]: <NEW_LINE> <INDENT> busqueda.append(listaTuplas[n]) <NEW_LINE> <DEDENT> <DEDENT> print(busqueda) <NEW_LINE> return busqueda
Escribir una función que reciba una cadena a buscar y una lista de tuplas (nombre_completo, telefono), y busque dentro de la lista, todas las entradas que contengan en el nombre completo la cadena recibida (puede ser el nombre, el apellido o sólo una parte de cualquiera de ellos). Debe devolver una lista con todas las tuplas encontradas.
625941b916aa5153ce3622e9
def load_shapes(self): <NEW_LINE> <INDENT> zones_shape = self['zones_shape'] <NEW_LINE> layers_group = self.get_layers_group() <NEW_LINE> for layer in layers_group.findLayers(): <NEW_LINE> <INDENT> if layer.layer().source() == zones_shape: <NEW_LINE> <INDENT> self['zones_shape_id'] = layer.layer().id()
@summary: Loads zone shape
625941b98da39b475bd64de8
def save(self): <NEW_LINE> <INDENT> self.client.save_app_data(self.get('uuid'), self.get('app_data'))
Persists the user's app data to the `unicore.hub` server.
625941b930c21e258bdfa30e
def auth(func): <NEW_LINE> <INDENT> def inner(*args,**kwargs): <NEW_LINE> <INDENT> login_count = 0 <NEW_LINE> while login_count < 3: <NEW_LINE> <INDENT> username = input('username:') <NEW_LINE> password = input('password:') <NEW_LINE> if username in d_user: <NEW_LINE> <INDENT> if md5sum(password) == d_user[username][1].strip(): <NEW_LINE> <INDENT> if user.islock(username): <NEW_LINE> <INDENT> print("你的账号处于锁定状态,请联系管理员") <NEW_LINE> exit(0) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Login success,Welcome %s' %username) <NEW_LINE> log_o.info("Login success - account:%s" %(username)) <NEW_LINE> auth_status = True <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> login_count += 1 <NEW_LINE> print('password error,pls try again') <NEW_LINE> log_o.error("Login failed - account:%s" %(username)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print('This account is not exist.') <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> user.dolock(username) <NEW_LINE> print('你的账号已被锁定,请联系管理员解锁! ') <NEW_LINE> log_o.error("Account locked! - account:%s" %(username)) <NEW_LINE> exit(0) <NEW_LINE> <DEDENT> if auth_status == True: <NEW_LINE> <INDENT> global user_cur <NEW_LINE> user_cur = username <NEW_LINE> return func(*args, **kwargs) <NEW_LINE> <DEDENT> <DEDENT> return inner
用户认证函数 演示密码都是123
625941b9d99f1b3c44c67408
def GetReferenceImage(self): <NEW_LINE> <INDENT> return _itkChangeInformationImageFilterPython.itkChangeInformationImageFilterIF2_GetReferenceImage(self)
GetReferenceImage(self) -> itkImageF2
625941b9e8904600ed9f1d9a
def test_create_token_no_user(self): <NEW_LINE> <INDENT> payload = { 'email': '[email protected]', 'password': 'testpass' } <NEW_LINE> res = self.client.post(TOKEN_URL, payload) <NEW_LINE> self.assertNotIn('token', res.data) <NEW_LINE> self.assertTrue(res.status_code, status.HTTP_400_BAD_REQUEST)
Test that token is not created if user doesn't exist
625941b9925a0f43d2549ce5
def _write_json_file(self, filename, state): <NEW_LINE> <INDENT> json_file = os.path.join(self._options.config_path, filename) <NEW_LINE> if not os.path.exists(json_file): <NEW_LINE> <INDENT> common.file_util.mkdir_p(self._options.config_path) <NEW_LINE> <DEDENT> with atomic_write_file(json_file) as outfile: <NEW_LINE> <INDENT> json.dump(state, outfile, sort_keys=True, indent=4, separators=(',', ': '))
Write a json file given a filename under the default config path. Creates the file and directory if required.
625941b9baa26c4b54cb0f94
def test_metallicity_outside_range_nomoto(yields_nomoto): <NEW_LINE> <INDENT> yields_nomoto.set_metallicity(0.99) <NEW_LINE> assert yields_nomoto.H_1 == 2.45E-2 <NEW_LINE> assert yields_nomoto.H_2 == 5.34E-16 <NEW_LINE> assert yields_nomoto.O_16 == 6.14E-3 <NEW_LINE> assert yields_nomoto.Al_27 == 6.53E-5 <NEW_LINE> assert yields_nomoto.Fe_58 == 2.15E-6 <NEW_LINE> assert yields_nomoto.Fe_54 == 1.13E-5
Tests what happens when the metallicity is outside the range the models span. I will assert that it should be the same as the yields of the model that is at the extreme.
625941b9167d2b6e31218a0e
def select(self, *fields): <NEW_LINE> <INDENT> stmt = SelectStatement(self, *fields) <NEW_LINE> stmt.stmt_id = self._connection.get_next_statement_id() <NEW_LINE> return stmt
Creates a new :class:`mysqlx.SelectStatement` object. Args: *fields: The fields to be retrieved. Returns: mysqlx.SelectStatement: SelectStatement object
625941b90c0af96317bb805b
def show_progress(last_time, nrec=None): <NEW_LINE> <INDENT> if nrec: <NEW_LINE> <INDENT> msg = "Records processed: %d; time: %s\r" % (nrec, timestamp_to_string(last_time)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> msg = "Processed through: %s\r" % timestamp_to_string(last_time) <NEW_LINE> <DEDENT> print(msg, end='', file=sys.stdout) <NEW_LINE> sys.stdout.flush()
Utility function to show our progress
625941b9d486a94d0b98dfbe
def build_bq_schema(input_convention): <NEW_LINE> <INDENT> with open(input_convention) as tsvfile: <NEW_LINE> <INDENT> reader = csv.DictReader(tsvfile, dialect='excel-tab') <NEW_LINE> schema = [] <NEW_LINE> for row in reader: <NEW_LINE> <INDENT> entry = {} <NEW_LINE> entry['name'] = row['attribute'] <NEW_LINE> entry['type'] = process_row_type(row['type']) <NEW_LINE> if row['array']: <NEW_LINE> <INDENT> entry['type'] = process_row_type(row['type']) <NEW_LINE> entry['mode'] = 'REPEATED' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> entry['mode'] = 'NULLABLE' <NEW_LINE> <DEDENT> schema.append(entry) <NEW_LINE> <DEDENT> <DEDENT> return schema
Build schema as a Python dictionary
625941b931939e2706e4cce1
def producto_cartesiano(self, lista1, lista2): <NEW_LINE> <INDENT> if not lista1: <NEW_LINE> <INDENT> return lista2 <NEW_LINE> <DEDENT> elif not lista2: <NEW_LINE> <INDENT> return lista1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> lista = list() <NEW_LINE> for flags1, flags2 in itertools.product(lista1, lista2): <NEW_LINE> <INDENT> lista.append(dict(flags1, **flags2)) <NEW_LINE> <DEDENT> return lista
Realiza el producto cartesiano entre dos listas de diccionarios.
625941b9b57a9660fec336f2
def to_indented_string(self, obj): <NEW_LINE> <INDENT> if obj: <NEW_LINE> <INDENT> return str(obj).replace("\n", "\n ") <NEW_LINE> <DEDENT> return ""
Convert the given object to string with each line indented by 4 spaces (except the first line). :param obj: :return:
625941b985dfad0860c3acca
def __init__(self): <NEW_LINE> <INDENT> self.Filters = None <NEW_LINE> self.Limit = None <NEW_LINE> self.Offset = None <NEW_LINE> self.OrderBy = None <NEW_LINE> self.OrderByType = None
:param Filters: Filter condition. Valid values: db-instance-id, db-instance-name, db-project-id, db-pay-mode, db-tag-key. :type Filters: list of Filter :param Limit: Number of entries returned per page. Default value: 10. :type Limit: int :param Offset: Page number, starting from 0. :type Offset: int :param OrderBy: Sorting metric, such as instance name or creation time. Valid values: DBInstanceId, CreateTime, Name, EndTime :type OrderBy: str :param OrderByType: In ascending or descending order :type OrderByType: str
625941b924f1403a926009db
def extract_mags(number_of_quakes, dictionary_obj): <NEW_LINE> <INDENT> magnitude_per_code_dictionary = {} <NEW_LINE> for i in range(number_of_quakes): <NEW_LINE> <INDENT> k = dictionary_obj['features'][i]['id'] <NEW_LINE> v = dictionary_obj['features'][i]['properties']['mag'] <NEW_LINE> magnitude_per_code_dictionary[str(k)] = float(v) <NEW_LINE> <DEDENT> return magnitude_per_code_dictionary
Loops over the report to extract the magnitude per quake for each record and stores all key/value pairs in a dictionary
625941b9507cdc57c6306b45
def error(description, code=500, **keywords): <NEW_LINE> <INDENT> resp = dict(problem=description, **keywords) <NEW_LINE> return Response(dumps(resp), status=code, content_type='application/json')
Produce a response: Args: code (int): HTTP response code description (str): problem description text other arguments are added to the JSON response dictionary. Returns: Response: a Flask Response instance
625941b963b5f9789fde6f57
def set_attribute(self, attr: str, val: str) -> None: <NEW_LINE> <INDENT> self.__attrs[attr] = val
Set an attribute to a particular string value on this node. Parameters: attr - A string attribute to set on the node. val - The string value to set the attribute value to.
625941b9a8ecb033257d2f48
def _recursive_copy_to_device(value: Any, non_blocking: bool, device: torch.device) -> Any: <NEW_LINE> <INDENT> if isinstance(value, torch.Tensor): <NEW_LINE> <INDENT> return value.to(device, non_blocking=non_blocking) <NEW_LINE> <DEDENT> if isinstance(value, (list, tuple)): <NEW_LINE> <INDENT> values = [_recursive_copy_to_device(val, non_blocking=non_blocking, device=device) for val in value] <NEW_LINE> return values if isinstance(value, list) else tuple(values) <NEW_LINE> <DEDENT> if isinstance(value, container_abcs.Mapping): <NEW_LINE> <INDENT> return { key: _recursive_copy_to_device(val, non_blocking=non_blocking, device=device) for key, val in value.items() } <NEW_LINE> <DEDENT> return value
Recursively searches lists, tuples, dicts and copies tensors to device if possible. Non-tensor values are passed as-is in the result. .. note: These are all copies, so if there are two objects that reference the same object, then after this call, there will be two different objects referenced on the device.
625941b976d4e153a657e9a2
def updateDownloadingStatus(self, pkgName, progress, feedback): <NEW_LINE> <INDENT> if self.itemDict.has_key(pkgName): <NEW_LINE> <INDENT> appItem = self.itemDict[pkgName] <NEW_LINE> appItem.updateDownloadingStatus(progress, feedback)
Update downloading status.
625941b9293b9510aa2c310a
def input_type_number(): <NEW_LINE> <INDENT> pass
<div class="row"> <div class="form-group ma mb-2"> <label class="sr-only" for="age">Campo para escrever idade</label> <input type="number" class="form-control" id="age" name="age" min="1" max="120" placeholder="sua idade" size="20" required> </div> </div>
625941b90383005118ecf456
def _read_line(original_text=None, terminating_characters=None): <NEW_LINE> <INDENT> if original_text == None: <NEW_LINE> <INDENT> original_text = "" <NEW_LINE> <DEDENT> if not terminating_characters: <NEW_LINE> <INDENT> terminating_characters = "\r" <NEW_LINE> <DEDENT> assert isinstance(original_text, str) <NEW_LINE> assert isinstance(terminating_characters, str) <NEW_LINE> chars_entered = len(original_text) <NEW_LINE> sys.stdout.write(original_text) <NEW_LINE> string = original_text <NEW_LINE> finished = False <NEW_LINE> while not finished: <NEW_LINE> <INDENT> char = _read_char() <NEW_LINE> if char in (_BACKSPACE_CHAR, _DELETE_CHAR): <NEW_LINE> <INDENT> if chars_entered > 0: <NEW_LINE> <INDENT> chars_entered -= 1 <NEW_LINE> string = string[:-1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> elif char in terminating_characters: <NEW_LINE> <INDENT> finished = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> string += char <NEW_LINE> chars_entered += 1 <NEW_LINE> <DEDENT> if char == "\r": <NEW_LINE> <INDENT> char_to_print = "\n" <NEW_LINE> <DEDENT> elif char == _BACKSPACE_CHAR: <NEW_LINE> <INDENT> char_to_print = "%s %s" % (_BACKSPACE_CHAR, _BACKSPACE_CHAR) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> char_to_print = char <NEW_LINE> <DEDENT> sys.stdout.write(char_to_print) <NEW_LINE> <DEDENT> return string
Reads a line of input with the given unicode string of original text, which is editable, and the given unicode string of terminating characters (used to terminate text input). By default, terminating_characters is a string containing the carriage return character (' ').
625941b90a50d4780f666d01
def enumerate_algo_examples(): <NEW_LINE> <INDENT> exclude = NON_ALGO_EXAMPLES + LONG_RUNNING_EXAMPLES <NEW_LINE> all_examples = EXAMPLES_ROOT_DIR.glob('**/*.py') <NEW_LINE> return [str(e) for e in all_examples if e not in exclude]
Return a list of paths for all algo examples Returns: List[str]: list of path strings
625941b95166f23b2e1a4fcb
def _load_multiple_files(fn, subdirs): <NEW_LINE> <INDENT> return [np.load(str(subdir / fn)).squeeze() for subdir in subdirs]
Load the same filename in the different subdirectories.
625941b9097d151d1a222cce
def pth_nms(dets, thresh): <NEW_LINE> <INDENT> if not dets.is_cuda: <NEW_LINE> <INDENT> x1 = dets[:, 0] <NEW_LINE> y1 = dets[:, 1] <NEW_LINE> x2 = dets[:, 2] <NEW_LINE> y2 = dets[:, 3] <NEW_LINE> scores = dets[:, 4] <NEW_LINE> areas = (x2 - x1) * (y2 - y1) <NEW_LINE> order = scores.sort(0, descending=True)[1] <NEW_LINE> keep = torch.LongTensor(dets.size(0)) <NEW_LINE> num_out = torch.LongTensor(1) <NEW_LINE> nms.cpu_nms(keep, num_out, dets, order, areas, thresh) <NEW_LINE> return keep[:num_out[0]] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> x1 = dets[:, 0] <NEW_LINE> y1 = dets[:, 1] <NEW_LINE> x2 = dets[:, 2] <NEW_LINE> y2 = dets[:, 3] <NEW_LINE> scores = dets[:, 4] <NEW_LINE> areas = (x2 - x1) * (y2 - y1) <NEW_LINE> order = scores.sort(0, descending=True)[1] <NEW_LINE> dets = dets[order].contiguous() <NEW_LINE> keep = torch.LongTensor(dets.size(0)) <NEW_LINE> num_out = torch.LongTensor(1) <NEW_LINE> nms.gpu_nms(keep, num_out, dets, thresh) <NEW_LINE> return order[keep[:num_out[0]].cuda()].contiguous()
dets has to be a tensor
625941b9a219f33f346287e6
def testRepresentativeStory(self): <NEW_LINE> <INDENT> inst_req_only = self.make_instance(include_optional=False) <NEW_LINE> inst_req_and_optional = self.make_instance(include_optional=True)
Test RepresentativeStory
625941b9cc0a2c11143dcd0a
def create_channel(device_id, channel): <NEW_LINE> <INDENT> global _LOGGER <NEW_LINE> global DATABASE <NEW_LINE> _LOGGER.info("Device : " + str(device_id) + ", channel: " + str(channel) + ", does not exists") <NEW_LINE> sql = "INSERT INTO channel (device_id, channel) VALUES (%s, %s)" <NEW_LINE> _LOGGER.debug(sql) <NEW_LINE> values = [device_id, channel] <NEW_LINE> _LOGGER.debug(values) <NEW_LINE> return DATABASE.execute_non_update(sql, values)
Creates new channel record
625941b991af0d3eaac9b886
def call_later(self, seconds, target, *args, **kwargs): <NEW_LINE> <INDENT> if '_errback' not in kwargs and hasattr(self, 'handle_error'): <NEW_LINE> <INDENT> kwargs['_errback'] = self.handle_error <NEW_LINE> <DEDENT> callback = self.ioloop.call_later(seconds, target, *args, **kwargs) <NEW_LINE> self._tasks.append(callback) <NEW_LINE> return callback
Same as self.ioloop.call_later but also cancel()s the scheduled function on close().
625941b9711fe17d825421e4
def run(self): <NEW_LINE> <INDENT> super(SIPpTestCase, self).run() <NEW_LINE> self.create_ami_factory() <NEW_LINE> self.ast[0].cli_exec('sip set debug on') <NEW_LINE> self.ast[0].cli_exec('pjsip set logger on')
Override of the run method. Create an AMI factory in case anyone wants it
625941b9090684286d50eb52
def plotMatch(self, **kwargs): <NEW_LINE> <INDENT> kwargs.setdefault('keypoints_color', 'k') <NEW_LINE> kwargs.setdefault('matches_color', 'g') <NEW_LINE> kwargs.setdefault('only_matches', False) <NEW_LINE> fig, ax = plt.subplots(1, 1, figsize=(12, 6)) <NEW_LINE> plot_matches(ax, fun.bgr2rgb(self.pattern), fun.bgr2rgb(self.scene), self.kpts1, self.kpts2, self.matches, **kwargs) <NEW_LINE> plt.title('SIFT: Keypoints Match (# of matched: %d)' % self.matches.shape[0]) <NEW_LINE> ax.axis('off') <NEW_LINE> plt.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.9, hspace=0.1, wspace=0.05)
Plot matches between pattern image and scene image
625941b997e22403b379ce0a
@charity_signup.route('/authorize') <NEW_LINE> def authorize(): <NEW_LINE> <INDENT> site = 'https://connect.stripe.com' + '/oauth/authorize' <NEW_LINE> params = {'response_type': 'code', 'scope': 'read_write', 'client_id': app.config.get('CLIENT_ID') } <NEW_LINE> url = site + '?' + urllib.parse.urlencode(params) <NEW_LINE> return redirect(url)
Rediects user to stripe signup website, which will return a stripe key for charging users on their behalf
625941b94d74a7450ccd4034
def build_graph_from_queries(query_sampler_variable_graph_tuples, grakn_transaction, concept_dict_converter=concept_dict_to_graph, infer=True): <NEW_LINE> <INDENT> query_concept_graphs = [] <NEW_LINE> for query, sampler, variable_graph in query_sampler_variable_graph_tuples: <NEW_LINE> <INDENT> concept_maps = sampler(grakn_transaction.query(query, infer=infer)) <NEW_LINE> concept_dicts = [concept_dict_from_concept_map(concept_map, grakn_transaction) for concept_map in concept_maps] <NEW_LINE> answer_concept_graphs = [] <NEW_LINE> for concept_dict in concept_dicts: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> answer_concept_graphs.append(concept_dict_converter(concept_dict, variable_graph)) <NEW_LINE> <DEDENT> except ValueError as e: <NEW_LINE> <INDENT> raise ValueError(str(e) + f'Encountered processing query:\n \"{query}\"') <NEW_LINE> <DEDENT> <DEDENT> if len(answer_concept_graphs) > 1: <NEW_LINE> <INDENT> query_concept_graph = combine_n_graphs(answer_concept_graphs) <NEW_LINE> query_concept_graphs.append(query_concept_graph) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if len(answer_concept_graphs) > 0: <NEW_LINE> <INDENT> query_concept_graphs.append(answer_concept_graphs[0]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> warnings.warn(f'There were no results for query: \n\"{query}\"\nand so nothing will be added to the ' f'graph for this query') <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if len(query_concept_graphs) == 0: <NEW_LINE> <INDENT> raise RuntimeError(f'The graph from queries: {[query_sampler_variable_graph_tuple[0] for query_sampler_variable_graph_tuple in query_sampler_variable_graph_tuples]}\n' f'could not be created, since none of these queries returned results') <NEW_LINE> <DEDENT> concept_graph = combine_n_graphs(query_concept_graphs) <NEW_LINE> return concept_graph
Builds a graph of Things, interconnected by roles (and *has*), from a set of queries and graphs representing those queries (variable graphs)of those queries, over a Grakn transaction Args: infer: whether to use Grakn's inference engine query_sampler_variable_graph_tuples: A list of tuples, each tuple containing a query, a sampling function, and a variable_graph grakn_transaction: A Grakn transaction concept_dict_converter: The function to use to convert from concept_dicts to a Grakn model. This could be a typical model or a mathematical model Returns: A networkx graph
625941b93539df3088e2e1bd
def cutadapt_worker(fname, regions, primers): <NEW_LINE> <INDENT> sample = basename(fname).split('_R1.fastq')[0] <NEW_LINE> for region, seq in primers.items(): <NEW_LINE> <INDENT> fwd, rev = seq.split('-') <NEW_LINE> if exists('{}_unknown_R1.fastq'.format(sample)): <NEW_LINE> <INDENT> fname = '{}_unknown_R1.fastq'.format(sample) <NEW_LINE> cp_cmd = 'mv -f {} input_{}'.format(fname, fname) <NEW_LINE> subprocess.check_call(cp_cmd, shell=True) <NEW_LINE> cp_cmd = 'mv -f {} input_{}'.format(fname.replace('R1.fastq', 'R2.fastq'), fname.replace('R1.fastq', 'R2.fastq')) <NEW_LINE> subprocess.check_call(cp_cmd, shell=True) <NEW_LINE> cmd = """cutadapt -g {region}={fwd_primer} -G {region}={rev_primer} --pair-adapters --no-indels -e 0.1 --untrimmed-output {unknown_r1} --untrimmed-paired-output {unknown_r2} --suffix ':region={{name}}' -o {sample}_{{name}}_R1.fastq -p {sample}_{{name}}_R2.fastq {r1} {r2} >> log/{sample}_region_demultiplex.log""" <NEW_LINE> <DEDENT> cmd = cmd.format( sample=sample, unknown_r1='{}_unknown_R1.fastq'.format(sample), unknown_r2='{}_unknown_R2.fastq'.format(sample), region=seq, fwd_primer=r['primer'], rev_primer=reverse.loc[i, 'primer'], r1=('input_' + fname) if 'unknown_R1.fastq' in fname else fname, r2=('input_' + fname.replace('R1.fastq', 'R2.fastq')) if 'unknown_R1.fastq' in fname else fname.replace('R1.fastq', 'R2.fastq'), ) <NEW_LINE> subprocess.check_call(cmd, shell=True) <NEW_LINE> <DEDENT> rm_cmd = 'rm input_{}_*fastq'.format(sample) <NEW_LINE> subprocess.check_call(rm_cmd, shell=True)
Split fastq files into regions by matching 5 prime ends with conserved region primer sequences. This function requires cutadapt installed (tested with cutadapt 2.8)
625941b997e22403b379ce0b
@_print_function_call <NEW_LINE> def unhook(handler): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> _hook_handlers.remove(handler) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> _unload_hook_handlers.remove(handler)
Unhooks any hook registered with the hook functions above. :param handler: Handler returned from :func:`hook_print`, :func:`hook_command`, :func:`hook_server` or :func:`hook_timer` As of version 1.0 of the plugin hooks from :func:`hook_print` and :func:`hook_command` can be unhooked by their names.
625941b9a8370b7717052713
def test_create_model_with_log( mocked_vgg_extract_mapping, mocked_filesystem_create_log_path, mocked_filesystem_fetch_images, mocked_core_extract_style_from_path, mocked_core_optimize_model ): <NEW_LINE> <INDENT> mocked_filesystem_fetch_images.return_value = [ "/path/to/image1", "/path/to/image2", "/path/to/image3" ] <NEW_LINE> mocked_vgg_extract_mapping.return_value = "__VGG__" <NEW_LINE> mocked_core_extract_style_from_path.return_value = "__STYLE__" <NEW_LINE> stylish.create_model( "/path/to/training_data", "/path/to/style_image.jpg", "/path/to/output_image", "/path/to/vgg_model.mat", log_path="__LOG__" ) <NEW_LINE> mocked_vgg_extract_mapping.assert_called_once_with("/path/to/vgg_model.mat") <NEW_LINE> mocked_filesystem_fetch_images.assert_called_once_with( "/path/to/training_data", limit=None ) <NEW_LINE> mocked_filesystem_create_log_path.assert_not_called() <NEW_LINE> mocked_core_extract_style_from_path.assert_called_once_with( "/path/to/style_image.jpg", "__VGG__", stylish.vgg.STYLE_LAYERS ) <NEW_LINE> mocked_core_optimize_model.assert_called_once_with( ["/path/to/image1", "/path/to/image2", "/path/to/image3"], "__STYLE__", "__VGG__", "/path/to/output_image", "__LOG__", learning_rate=stylish.core.LEARNING_RATE, batch_size=stylish.core.BATCH_SIZE, batch_shape=stylish.core.BATCH_SHAPE, epoch_number=stylish.core.EPOCHS_NUMBER, content_weight=stylish.core.CONTENT_WEIGHT, style_weight=stylish.core.STYLE_WEIGHT, tv_weight=stylish.core.TV_WEIGHT, content_layer=stylish.vgg.CONTENT_LAYER, style_layer_names=[name for name, _ in stylish.vgg.STYLE_LAYERS] )
Train a style generator model with specific log path.
625941b950812a4eaa59c197
def __init__(self, id=None, name=None, description=None): <NEW_LINE> <INDENT> self._id = None <NEW_LINE> self._name = None <NEW_LINE> self._description = None <NEW_LINE> self.discriminator = None <NEW_LINE> if id is not None: <NEW_LINE> <INDENT> self.id = id <NEW_LINE> <DEDENT> self.name = name <NEW_LINE> if description is not None: <NEW_LINE> <INDENT> self.description = description
JsonAddStatusRequest - a model defined in Swagger
625941b9236d856c2ad44650
def reduce(self, show_noisy=False): <NEW_LINE> <INDENT> if not show_noisy: <NEW_LINE> <INDENT> for log in self.quiet_logs: <NEW_LINE> <INDENT> yield log['raw'].strip() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for log in self.noisy_logs: <NEW_LINE> <INDENT> yield log['raw'].strip()
Yield the reduced log lines :param show_noisy: If this is true, shows the reduced log file. If this is false, it shows the logs that were deleted.
625941b994891a1f4081b91a
def load_module(self,fullname): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return sys.modules[fullname] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> info = self._get_module_info(fullname) <NEW_LINE> code = self.get_code(fullname,info) <NEW_LINE> if code is None: <NEW_LINE> <INDENT> raise ImportError(fullname) <NEW_LINE> <DEDENT> mod = imp.new_module(fullname) <NEW_LINE> mod.__file__ = "<loading>" <NEW_LINE> mod.__loader__ = self <NEW_LINE> sys.modules[fullname] = mod <NEW_LINE> try: <NEW_LINE> <INDENT> exec(code, mod.__dict__) <NEW_LINE> mod.__file__ = self.get_filename(fullname,info) <NEW_LINE> if self.is_package(fullname,info): <NEW_LINE> <INDENT> if self.path is None: <NEW_LINE> <INDENT> mod.__path__ = [] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> mod.__path__ = [self.path] <NEW_LINE> <DEDENT> <DEDENT> return mod <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> sys.modules.pop(fullname,None) <NEW_LINE> raise
Load the specified module. This method locates the file for the specified module, loads and executes it and returns the created module object.
625941b9a79ad161976cbfb7
def testJsonDateAttributeImpl(self): <NEW_LINE> <INDENT> pass
Test JsonDateAttributeImpl
625941b96fece00bbac2d5ad
def get_compete_user_obj(self,compete_dict): <NEW_LINE> <INDENT> from apps.models.virtual.monster import Monster <NEW_LINE> obj = Monster.get_compete(compete_dict) <NEW_LINE> return obj
获取竞技对手的对象
625941b97047854f462a127f
def package_command(self, command, args=None, pkgs=None): <NEW_LINE> <INDENT> if pkgs is None: <NEW_LINE> <INDENT> pkgs = [] <NEW_LINE> <DEDENT> e = os.environ.copy() <NEW_LINE> e["DEBIAN_FRONTEND"] = "noninteractive" <NEW_LINE> wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER) <NEW_LINE> cmd = _get_wrapper_prefix( wcfg.get("command", APT_GET_WRAPPER["command"]), wcfg.get("enabled", APT_GET_WRAPPER["enabled"]), ) <NEW_LINE> cmd.extend(list(self.get_option("apt_get_command", APT_GET_COMMAND))) <NEW_LINE> if args and isinstance(args, str): <NEW_LINE> <INDENT> cmd.append(args) <NEW_LINE> <DEDENT> elif args and isinstance(args, list): <NEW_LINE> <INDENT> cmd.extend(args) <NEW_LINE> <DEDENT> subcmd = command <NEW_LINE> if command == "upgrade": <NEW_LINE> <INDENT> subcmd = self.get_option( "apt_get_upgrade_subcommand", "dist-upgrade" ) <NEW_LINE> <DEDENT> cmd.append(subcmd) <NEW_LINE> pkglist = util.expand_package_list("%s=%s", pkgs) <NEW_LINE> cmd.extend(pkglist) <NEW_LINE> self._wait_for_apt_command( short_cmd=command, subp_kwargs={"args": cmd, "env": e, "capture": False}, )
Run the given package command. On Debian, this will run apt-get (unless APT_GET_COMMAND is set). command: The command to run, like "upgrade" or "install" args: Arguments passed to apt itself in addition to any specified in APT_GET_COMMAND pkgs: Apt packages that the command will apply to
625941b91b99ca400220a923
def setSecurityLevel(self, level): <NEW_LINE> <INDENT> if level == SecurityLevel.RDP_LEVEL_RDP: <NEW_LINE> <INDENT> self._x224Layer._requestedProtocol = x224.Protocols.PROTOCOL_RDP <NEW_LINE> <DEDENT> elif level == SecurityLevel.RDP_LEVEL_SSL: <NEW_LINE> <INDENT> self._x224Layer._requestedProtocol = x224.Protocols.PROTOCOL_SSL <NEW_LINE> <DEDENT> elif level == SecurityLevel.RDP_LEVEL_NLA: <NEW_LINE> <INDENT> self._x224Layer._requestedProtocol = x224.Protocols.PROTOCOL_SSL | x224.Protocols.PROTOCOL_HYBRID
@summary: Request basic security @param level: {SecurityLevel}
625941b98a43f66fc4b53edb
def uint2int64(value): <NEW_LINE> <INDENT> if value & 0x8000000000000000: <NEW_LINE> <INDENT> return value - 0x10000000000000000 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return value
Convert an unsigned 64 bits integer into a signed 64 bits integer.
625941b9b7558d58953c4d8d
def get_vspring(self): <NEW_LINE> <INDENT> if self.nbeads == 1: <NEW_LINE> <INDENT> return 0.0 <NEW_LINE> <DEDENT> if len(self.bosons) == 0: <NEW_LINE> <INDENT> sqnm = dstrip(self.qnm) * dstrip(self.beads.sm3) <NEW_LINE> q2 = (sqnm ** 2).sum(axis=1) <NEW_LINE> vspring = (self.omegak2 * q2).sum() <NEW_LINE> for j in self.open_paths: <NEW_LINE> <INDENT> vspring += ( self.beads.m[j] * (self.o_omegak ** 2 - self.omegak ** 2) * ( self.qnm[:, 3 * j] ** 2 + self.qnm[:, 3 * j + 1] ** 2 + self.qnm[:, 3 * j + 2] ** 2 ) ).sum() <NEW_LINE> <DEDENT> return vspring * 0.5 <NEW_LINE> <DEDENT> elif len(self.bosons) is self.natoms: <NEW_LINE> <INDENT> return self.vspring_and_fspring_B[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> vspring = 0.0 <NEW_LINE> notbosons = list(set(range(self.natoms)) - set(self.bosons)) <NEW_LINE> for j in notbosons: <NEW_LINE> <INDENT> vspring += ( self.beads.m[j] * self.omegak ** 2 * ( self.qnm[:, 3 * j] ** 2 + self.qnm[:, 3 * j + 1] ** 2 + self.qnm[:, 3 * j + 2] ** 2 ) ).sum() <NEW_LINE> <DEDENT> return vspring * 0.5 + self.vspring_and_fspring_B[0]
Returns the spring energy calculated in NM representation for distinguishable particles. For bosons, get the first element of vspring_and_fspring_B[0] For a mixture of both, calculate separately and combine.
625941b930dc7b76659017dc
@tf_export("summary.record_summaries", v1=[]) <NEW_LINE> @tf_contextlib.contextmanager <NEW_LINE> def record_summaries(boolean=True): <NEW_LINE> <INDENT> global _SHOULD_RECORD_SUMMARIES <NEW_LINE> key = ops.get_default_graph()._graph_key <NEW_LINE> old = _SHOULD_RECORD_SUMMARIES.setdefault(key, False) <NEW_LINE> try: <NEW_LINE> <INDENT> _SHOULD_RECORD_SUMMARIES[key] = boolean <NEW_LINE> yield <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> _SHOULD_RECORD_SUMMARIES[key] = old
Sets summary recording on or off per the provided boolean value. The provided value can be a python boolean, a scalar boolean Tensor, or or a callable providing such a value; if a callable is passed it will be invoked each time should_record_summaries() is called to determine whether summary writing should be enabled. Args: boolean: can be True, False, a bool Tensor, or a callable providing such. Defaults to True. Yields: Returns a context manager that sets this value on enter and restores the previous value on exit.
625941b9293b9510aa2c310b
def successful_signing_test(self): <NEW_LINE> <INDENT> privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA'] <NEW_LINE> inputs = [ {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}, {'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0, 'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'}, ] <NEW_LINE> outputs = {convert_btc_address_to_btn('mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB'): 0.1} <NEW_LINE> rawTx = self.nodes[0].createrawtransaction(inputs, outputs) <NEW_LINE> rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys) <NEW_LINE> assert 'complete' in rawTxSigned <NEW_LINE> assert_equal(rawTxSigned['complete'], True) <NEW_LINE> assert 'errors' not in rawTxSigned <NEW_LINE> dummyTxInconsistent = self.nodes[0].createrawtransaction([inputs[0]], outputs) <NEW_LINE> rawTxUnsigned = self.nodes[0].signrawtransaction(rawTx + dummyTxInconsistent, inputs) <NEW_LINE> assert 'complete' in rawTxUnsigned <NEW_LINE> assert_equal(rawTxUnsigned['complete'], False) <NEW_LINE> rawTxSigned2 = self.nodes[0].signrawtransaction(rawTxUnsigned["hex"] + dummyTxInconsistent + rawTxSigned["hex"], inputs) <NEW_LINE> assert 'complete' in rawTxSigned2 <NEW_LINE> assert_equal(rawTxSigned2['complete'], True) <NEW_LINE> assert 'errors' not in rawTxSigned2
Creates and signs a valid raw transaction with one input. Expected results: 1) The transaction has a complete set of signatures 2) No script verification error occurred
625941b985dfad0860c3accb
def genere_balise_fin_image(): <NEW_LINE> <INDENT> return "</svg>"
Retourne la chaine de caractères correspondant à la balise svg fermante. Cette balise doit être placée après tous les éléments de description de l’image, juste avant la fin du fichier.
625941b9283ffb24f3c5577e
def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> loop = kwargs.pop('loop', None) <NEW_LINE> self.loop = loop or asyncio.get_event_loop() <NEW_LINE> protocol.WebSocketServerFactory.__init__(self, *args, **kwargs)
.. note:: In addition to all arguments to the constructor of :meth:`autobahn.websocket.interfaces.IWebSocketServerChannelFactory`, you can supply a ``loop`` keyword argument to specify the asyncio event loop to be used.
625941b94e696a04525c92c6
def test_write_id_index_overflow(self): <NEW_LINE> <INDENT> res = 0 <NEW_LINE> time_sample = 0 <NEW_LINE> morton = 11 <NEW_LINE> id = 4 <NEW_LINE> version = 0 <NEW_LINE> last_partition_key = 2 <NEW_LINE> rev_id = 224 <NEW_LINE> no_rev_id = None <NEW_LINE> max_capacity = 100 <NEW_LINE> obj_key = AWSObjectStore.generate_object_key( self.resource, res, time_sample, morton) <NEW_LINE> chan_key = self.obj_ind.generate_channel_id_key(self.resource, res, id) <NEW_LINE> key_parts = AWSObjectStore.get_object_key_parts(obj_key) <NEW_LINE> with patch.multiple( self.obj_ind, get_last_partition_key_and_rev_id=DEFAULT, lookup=DEFAULT, write_cuboid=DEFAULT, update_last_partition_key=DEFAULT ) as mocks: <NEW_LINE> <INDENT> mocks['get_last_partition_key_and_rev_id'].return_value = ( last_partition_key, rev_id ) <NEW_LINE> mocks['write_cuboid'].return_value = last_partition_key + 1 <NEW_LINE> mocks['lookup'].return_value = (False, -1) <NEW_LINE> self.obj_ind.write_id_index(max_capacity, obj_key, id, version) <NEW_LINE> mocks['write_cuboid'].assert_called_with( max_capacity, str(morton), chan_key, last_partition_key, rev_id, ANY, version) <NEW_LINE> mocks['update_last_partition_key'].assert_called_with( chan_key, last_partition_key + 1, version)
Case where a new Dynamo key needs to be created because the current key is full. The LAST_PARTITION_KEY should be updated.
625941b907f4c71912b112f9
def calculate_alignments(self, words, features): <NEW_LINE> <INDENT> for feature in features: <NEW_LINE> <INDENT> denom = 0.0 <NEW_LINE> for word in words: <NEW_LINE> <INDENT> denom += self._learned_lexicon.prob(word, feature, self._decay, self._time) <NEW_LINE> <DEDENT> denom += self._beta * self._alpha <NEW_LINE> for word in words: <NEW_LINE> <INDENT> alignment = self._learned_lexicon.prob(word, feature, self._decay, self._time) <NEW_LINE> alignment += self._alpha <NEW_LINE> alignment /= denom <NEW_LINE> alignment *= self._learned_lexicon.novelty(word) if self._novelty else 1 <NEW_LINE> self._learned_lexicon.update_association(word, feature, alignment, self._decay, self._time) <NEW_LINE> <DEDENT> <DEDENT> for word in words: <NEW_LINE> <INDENT> self._learned_lexicon.add_seen_features(word, features)
Update the alignments between words and features. Update the alignments for each combination of word-feature pairs from the list words and the set features: alignment: P(a|u,f) = p(f|w) / sum(w' in words) p(f|w')
625941b9be383301e01b52ff
def test_tag_str(self): <NEW_LINE> <INDENT> tag = models.Tag.objects.create( user=sample_user(), name='Home roasting' ) <NEW_LINE> self.assertEqual(str(tag), tag.name)
Test the tag string representation
625941b9851cf427c661a387
def add_dictval_to_list(adict, key, alist): <NEW_LINE> <INDENT> if key in adict: <NEW_LINE> <INDENT> if isinstance(adict[key], six.string_types): <NEW_LINE> <INDENT> alist.append(adict[key]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> alist.extend(adict[key])
add a value from a dictionary to a list :param adict: dictionary :param key: key of value :param list: list where value should be added
625941b9b5575c28eb68de70
def load_embed_txt(embed_file): <NEW_LINE> <INDENT> emb_dict = dict() <NEW_LINE> emb_size = None <NEW_LINE> with codecs.getreader("utf-8")(tf.gfile.GFile(embed_file, 'rb')) as f: <NEW_LINE> <INDENT> for line in f: <NEW_LINE> <INDENT> tokens = line.strip().split(" ") <NEW_LINE> word = tokens[0] <NEW_LINE> vec = list(map(float, tokens[1:])) <NEW_LINE> if emb_size: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> emb_size = len(vec) <NEW_LINE> <DEDENT> if emb_size != len(vec): <NEW_LINE> <INDENT> print("Unexpected embedding size (%d) for word (%s): " % (word, len(vec))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> emb_dict[word] = vec <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return emb_dict, emb_size
Load embed_file into a python dictionary. Note: the embed_file should be a Glove formated txt file. Assuming embed_size=5, for example: the -0.071549 0.093459 0.023738 -0.090339 0.056123 to 0.57346 0.5417 -0.23477 -0.3624 0.4037 and 0.20327 0.47348 0.050877 0.002103 0.060547 Args: embed_file: file path to the embedding file. Returns: a dictionary that maps word to vector, and the size of embedding dimensions.
625941b98e71fb1e9831d61f
def contains( self, sphere): <NEW_LINE> <INDENT> for item in self.container: <NEW_LINE> <INDENT> if item == sphere: <NEW_LINE> <INDENT> return True; <NEW_LINE> <DEDENT> if item.center == sphere.center and item.radius == sphere.radius: <NEW_LINE> <INDENT> return True; <NEW_LINE> <DEDENT> <DEDENT> return False;
True if the grid contains the sphere
625941b92ae34c7f2600cfa4
def set_key(self, keygrip): <NEW_LINE> <INDENT> self.keygrip = keygrip
Set hexadecimal keygrip for next operation.
625941b9711fe17d825421e5
def read_har_file(filename): <NEW_LINE> <INDENT> fhandle = open(filename) <NEW_LINE> try: <NEW_LINE> <INDENT> har = json.loads(fhandle.read(), object_hook=encode_strings) <NEW_LINE> <DEDENT> except Exception as oops: <NEW_LINE> <INDENT> sys.stderr.write("Unable to parse %s\n\n" % filename) <NEW_LINE> sys.stderr.write(oops) <NEW_LINE> sys.exit(1) <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> fhandle.close() <NEW_LINE> <DEDENT> return har2hdrs(har)
Read filename and return the header dictionaries for it.
625941b96e29344779a62487
def is_ip(addr): <NEW_LINE> <INDENT> if '.' not in addr: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> parts = addr.split('.') <NEW_LINE> for part in parts: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> int(part) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return True
Determine if a string is really an ip, or a hostname instead. Args: addr (str): The ip address string to check Returns: bool: Whether or not `addr` is a valid ip.
625941b9cc40096d615957c6
def __init__( self, more_items_remaining=None, total_item_count=None, continuation_token=None, items=None, ): <NEW_LINE> <INDENT> if more_items_remaining is not None: <NEW_LINE> <INDENT> self.more_items_remaining = more_items_remaining <NEW_LINE> <DEDENT> if total_item_count is not None: <NEW_LINE> <INDENT> self.total_item_count = total_item_count <NEW_LINE> <DEDENT> if continuation_token is not None: <NEW_LINE> <INDENT> self.continuation_token = continuation_token <NEW_LINE> <DEDENT> if items is not None: <NEW_LINE> <INDENT> self.items = items
Keyword args: more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved. total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned. continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified). items (list[TargetProtectionGroup]): Returns a list of all items after filtering. The values are displayed for each name where meaningful.
625941b957b8e32f52483313
def __init__(self, executable_path, port=0): <NEW_LINE> <INDENT> self.port = port <NEW_LINE> self.path = executable_path <NEW_LINE> if self.port == 0: <NEW_LINE> <INDENT> self.port = utils.free_port()
Creates a new instance of the Service :Args: - executable_path : Path to the OperaDriver - port : Port the service is running on
625941b9ec188e330fd5a618
def has_perm(self, user): <NEW_LINE> <INDENT> for dashboard in self.registry: <NEW_LINE> <INDENT> if dashboard.has_perm(user): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False
True if there is at least one avaialble dashboard.
625941b98e71fb1e9831d620
def _create_attachment(self, cr, uid, data, context=None): <NEW_LINE> <INDENT> attachment_obj = self.pool.get('ir.attachment') <NEW_LINE> vals = { 'name': 'pain001_%s' % time.strftime("%Y-%m-%d_%H:%M:%S", time.gmtime()), 'datas': data['base64_data'], 'datas_fname': 'pain001_%s.xml' % time.strftime( "%Y-%m-%d_%H:%M:%S", time.gmtime()), 'res_model': data['model'], 'res_id': data['id'], } <NEW_LINE> attachment_obj.create(cr, uid, vals, context=context)
Create an attachment using data provided data needed are : - model : type of object to attach to - id : id of object model - base64_data
625941b992d797404e303ffc
def find_centroid(cluster): <NEW_LINE> <INDENT> location_list=[restaurant_location(i) for i in cluster] <NEW_LINE> latitude=mean([i[0] for i in location_list]) <NEW_LINE> longitude=mean([i[1] for i in location_list]) <NEW_LINE> return [latitude,longitude]
Return the centroid of the locations of the restaurants in cluster.
625941b932920d7e50b2803f
def command(cmd, **kwargs): <NEW_LINE> <INDENT> message = create(protobuf.SEND_COMMAND_MESSAGE) <NEW_LINE> send_command = message.inner() <NEW_LINE> send_command.command = cmd <NEW_LINE> for key, value in kwargs.items(): <NEW_LINE> <INDENT> setattr(send_command.options, key, value) <NEW_LINE> <DEDENT> return message
Playback command request.
625941b97d847024c06be133
def add(self, key): <NEW_LINE> <INDENT> if key in self._data: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._data[key] = 1
:type key: int :rtype: None
625941b9377c676e9127201d
def execution_time_and_output(function, n_times=1, *args, **kwargs): <NEW_LINE> <INDENT> start = time.time() <NEW_LINE> retval = function(*args, **kwargs) <NEW_LINE> elapsed = time.time() - start <NEW_LINE> return round(elapsed, 3), retval
Return the execution time of a function in seconds together with its output.
625941b93c8af77a43ae3610
def to_dict(self, target_dict=None): <NEW_LINE> <INDENT> if target_dict is None: <NEW_LINE> <INDENT> target_dict = self.storage <NEW_LINE> <DEDENT> result_dict = dict() <NEW_LINE> def to_inner_dict(actual_value): <NEW_LINE> <INDENT> if hasattr(actual_value, 'to_dict'): <NEW_LINE> <INDENT> return actual_value.to_dict() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return actual_value <NEW_LINE> <DEDENT> <DEDENT> for key, value in target_dict.iteritems(): <NEW_LINE> <INDENT> if value is not None: <NEW_LINE> <INDENT> if isinstance(value, dict): <NEW_LINE> <INDENT> result_dict[key] = self.to_dict(target_dict=value) <NEW_LINE> <DEDENT> elif isinstance(value, list): <NEW_LINE> <INDENT> temp = list() <NEW_LINE> for item in value: <NEW_LINE> <INDENT> temp.append(to_inner_dict(actual_value=item)) <NEW_LINE> <DEDENT> result_dict[key] = temp <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result_dict[key] = to_inner_dict(actual_value=value) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return result_dict
Recursive serialization to dict :param target_dict: :return:
625941b96fece00bbac2d5ae
def BeginInvoke(self, sender, e, callback, object): <NEW_LINE> <INDENT> pass
BeginInvoke(self: PagesChangedEventHandler, sender: object, e: PagesChangedEventArgs, callback: AsyncCallback, object: object) -> IAsyncResult
625941b9796e427e537b0435
def rename_files(path: str): <NEW_LINE> <INDENT> old_content = os.listdir(path) <NEW_LINE> new_content = [re.sub(r".*[sS](\d\d)[eE](\d\d).*\.(.+)$", r"S\1E\2.\3", f) for f in old_content] <NEW_LINE> print("Here is the changment that will be applied : ") <NEW_LINE> for (old, new) in zip(old_content, new_content): <NEW_LINE> <INDENT> print(old, " ===> ", new) <NEW_LINE> <DEDENT> choice = input("Continue ?(y/n) : ") <NEW_LINE> if choice.lower() == 'y': <NEW_LINE> <INDENT> for (old, new) in zip(old_content, new_content): <NEW_LINE> <INDENT> os.rename(path + old, path + new) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> exit(0)
Rename the files in the path from the long format to the short format (see the description of the program up there).
625941b91f037a2d8b946072
def mount_procfs(newroot, target='/proc'): <NEW_LINE> <INDENT> while target.startswith('/'): <NEW_LINE> <INDENT> target = target[1:] <NEW_LINE> <DEDENT> mnt_flags = [ mount.MS_NODEV, mount.MS_NOEXEC, mount.MS_NOSUID, mount.MS_RELATIME, ] <NEW_LINE> return mount.mount( source='proc', target=os.path.join(newroot, target), fs_type='proc', mnt_flags=mnt_flags, )
Mounts procfs on directory.
625941b93d592f4c4ed1cef2
def createPodActions(self, statements): <NEW_LINE> <INDENT> r = -1 <NEW_LINE> try: <NEW_LINE> <INDENT> if not statements: raise ParsingError(EMPTY_NOTE) <NEW_LINE> main = statements[0] <NEW_LINE> aRes = self.Rex.action.match(main) <NEW_LINE> if not aRes: <NEW_LINE> <INDENT> raise ParsingError(BAD_STATEMENT % main) <NEW_LINE> <DEDENT> statementName, podElem, minus, actionType, subExpr = aRes.groups() <NEW_LINE> if not (podElem in PodElement.POD_ELEMS): <NEW_LINE> <INDENT> raise ParsingError(BAD_ELEMENT % podElem) <NEW_LINE> <DEDENT> if minus and (not podElem in PodElement.MINUS_ELEMS): <NEW_LINE> <INDENT> raise ParsingError(BAD_MINUS % (podElem,PodElement.MINUS_ELEMS)) <NEW_LINE> <DEDENT> i = self.getIndex(podElem) <NEW_LINE> if i == -1: <NEW_LINE> <INDENT> raise ParsingError(ELEMENT_NOT_FOUND % (podElem, str([ e.__class__.__name__.lower() for e in self.elements.values()]))) <NEW_LINE> <DEDENT> podElem = self.elements[i] <NEW_LINE> self.action = self.createPodAction(actionType, statements, statementName, subExpr, podElem, minus) <NEW_LINE> fromClause = last = None <NEW_LINE> for statement in statements[1:]: <NEW_LINE> <INDENT> if statement.startswith('from') or statement.startswith('from+'): <NEW_LINE> <INDENT> fromInfo = self.Rex.from_.match(statement) <NEW_LINE> if not fromInfo: <NEW_LINE> <INDENT> raise ParsingError(BAD_FROM_CLAUSE % fromClause) <NEW_LINE> <DEDENT> fromClause = fromInfo.groups() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> info = self.Rex.subAction.match(statement) <NEW_LINE> if not info: <NEW_LINE> <INDENT> raise ParsingError(BAD_SUB_STATEMENT % statement) <NEW_LINE> <DEDENT> actionType, subExpr = info.groups() <NEW_LINE> last = self.createPodAction(actionType, statements, '', subExpr, podElem, None, main=False) <NEW_LINE> self.action.addSubAction(last) <NEW_LINE> <DEDENT> <DEDENT> if fromClause: <NEW_LINE> <INDENT> target = last or self.action <NEW_LINE> target.setFrom(*fromClause) <NEW_LINE> <DEDENT> success, msg = self.action.check() <NEW_LINE> if not success: raise ParsingError(msg) <NEW_LINE> r = i <NEW_LINE> <DEDENT> except ParsingError as ppe: <NEW_LINE> <INDENT> PodError.dump(self, ppe, removeFirstLine=True) <NEW_LINE> <DEDENT> return r
Tries to create action(s) based on p_statements. If the statement is not correct, r_ is -1. Else, r_ is the index of the element within the buffer that is the object of the action(s).
625941b90a366e3fb873e68a
def _make_texts_(self, tick_list, text_list, f, g, dx_units, dy_units, angles, text_distance, text_size, manual_texts=[]): <NEW_LINE> <INDENT> for idx, u in enumerate(tick_list): <NEW_LINE> <INDENT> if dy_units[idx] < 0: <NEW_LINE> <INDENT> text_attr = [pyx.text.valign.middle, pyx.text.halign.right, text_size, pyx.trafo.rotate(angles[idx])] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> text_attr = [pyx.text.valign.middle, pyx.text.halign.left, text_size, pyx.trafo.rotate(angles[idx])] <NEW_LINE> <DEDENT> if self.axis_appear['full_angle'] == True: <NEW_LINE> <INDENT> if self.axis_appear['angle_tick_direction'] == 'outer': <NEW_LINE> <INDENT> text_attr = [pyx.text.valign.middle, pyx.text.halign.left, text_size, pyx.trafo.rotate(angles[idx])] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> text_attr = [pyx.text.valign.middle, pyx.text.halign.left, text_size, pyx.trafo.rotate(angles[idx])] <NEW_LINE> <DEDENT> <DEDENT> if self.axis_appear['text_horizontal_align_center'] == True: <NEW_LINE> <INDENT> text_attr = [pyx.text.valign.top, pyx.text.halign.center, text_size, pyx.trafo.rotate(angles[idx])] <NEW_LINE> <DEDENT> if len(manual_texts) > 0: <NEW_LINE> <INDENT> text_list.append((manual_texts[idx], f(u) + text_distance * dy_units[idx], g(u) - text_distance * dx_units[idx], text_attr)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> text_list.append((self._put_text_(u), f(u) + text_distance * dy_units[idx], g(u) - text_distance * dx_units[idx], text_attr))
makes list of text definitions
625941b994891a1f4081b91b
def declareAdapterForProtocol ( protocol, adapter, proto, depth = 1 ): <NEW_LINE> <INDENT> adapt( protocol, IOpenProtocol ) <NEW_LINE> adapt( proto, IOpenProtocol ).addImpliedProtocol( protocol, bindAdapter( adapter, protocol ), depth )
Declare that 'adapter' adapts 'proto' to 'protocol'
625941b915baa723493c3de5
@manager.command <NEW_LINE> def adduser(username, password): <NEW_LINE> <INDENT> _adduser(username, password)
Add a user
625941b930dc7b76659017dd
def import_data(self, input, password): <NEW_LINE> <INDENT> entrystore = data.EntryStore() <NEW_LINE> folders = {} <NEW_LINE> for line in input.splitlines()[1:]: <NEW_LINE> <INDENT> f_csv = csv.reader([line.decode()]) <NEW_LINE> for row in f_csv: <NEW_LINE> <INDENT> if len(row) != 6 and len(row) != 10: <NEW_LINE> <INDENT> raise base.FormatError <NEW_LINE> <DEDENT> if row[4]: <NEW_LINE> <INDENT> new_entry = entry.WebEntry() <NEW_LINE> new_entry[entry.URLField] = row[4] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> new_entry = entry.GenericEntry() <NEW_LINE> <DEDENT> new_entry.name = row[1] <NEW_LINE> new_entry[entry.UsernameField] = row[2] <NEW_LINE> new_entry[entry.PasswordField] = row[3] <NEW_LINE> new_entry.notes = row[5] <NEW_LINE> new_entry.updated = time.time() <NEW_LINE> if row[0] in folders: <NEW_LINE> <INDENT> parent = folders[row[0]] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> folder = entry.FolderEntry() <NEW_LINE> folder.name = row[0] <NEW_LINE> parent = entrystore.add_entry(folder) <NEW_LINE> folders[row[0]] = parent <NEW_LINE> <DEDENT> entrystore.add_entry(new_entry, parent) <NEW_LINE> <DEDENT> <DEDENT> return entrystore
Import data from a file into the entry store
625941b94428ac0f6e5ba665
def convert_prefixed_properties(self, doc, filename): <NEW_LINE> <INDENT> converted_properties = [] <NEW_LINE> inline_styles = doc.findAll(style=re.compile('.*')) <NEW_LINE> style_tags = doc.findAll('style') <NEW_LINE> all_styles = inline_styles + style_tags <NEW_LINE> for tag in all_styles: <NEW_LINE> <INDENT> style_text = '' <NEW_LINE> if tag.name == 'style': <NEW_LINE> <INDENT> if not tag.contents: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> style_text = tag.contents[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> style_text = tag['style'] <NEW_LINE> <DEDENT> updated_style_text = self.add_webkit_prefix_to_unprefixed_properties(style_text, filename) <NEW_LINE> if updated_style_text[0]: <NEW_LINE> <INDENT> converted_properties.extend(updated_style_text[0]) <NEW_LINE> new_tag = Tag(doc, tag.name, tag.attrs) <NEW_LINE> new_tag.insert(0, updated_style_text[1]) <NEW_LINE> self.replace_tag(tag, new_tag) <NEW_LINE> <DEDENT> <DEDENT> return (converted_properties, doc.prettify())
Searches a BeautifulSoup |doc| for any CSS properties requiring the -webkit- prefix and converts them. Returns the list of converted properties and the modified document as a string
625941b95510c4643540f26a
def update_grid(self, *args): <NEW_LINE> <INDENT> grid = self.panel.gridExp <NEW_LINE> num_rows = len(model.instrument.inst.positions) <NEW_LINE> if grid.GetNumberRows() > num_rows: <NEW_LINE> <INDENT> grid.DeleteRows(0, grid.GetNumberRows()-num_rows) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> old_num_rows = grid.GetNumberRows() <NEW_LINE> grid.AppendRows(num_rows-grid.GetNumberRows()) <NEW_LINE> choices = model.experiment.get_stopping_criteria_names() <NEW_LINE> for row in xrange(old_num_rows, num_rows): <NEW_LINE> <INDENT> grid.SetCellEditor(row, self.criterion_col, wx.grid.GridCellChoiceEditor(choices)) <NEW_LINE> <DEDENT> <DEDENT> angle_font = wx.Font(10, 76, wx.NORMAL, wx.NORMAL, False, u'Monospace') <NEW_LINE> for (i, poscov) in enumerate(model.instrument.inst.positions): <NEW_LINE> <INDENT> row = i <NEW_LINE> grid.SetCellAlignment(row, 0, wx.ALIGN_CENTRE, wx.ALIGN_CENTRE ) <NEW_LINE> grid.SetReadOnly(row, 0, True) <NEW_LINE> for (j, angleinfo) in enumerate(model.instrument.inst.angles): <NEW_LINE> <INDENT> x = poscov.angles[j] <NEW_LINE> col = j+1 <NEW_LINE> grid.SetCellValue(row, col, u"%8.2f" % angleinfo.internal_to_friendly(x)) <NEW_LINE> grid.SetReadOnly(row, col, True) <NEW_LINE> grid.SetCellAlignment(row, col, wx.ALIGN_CENTRE, wx.ALIGN_CENTRE ) <NEW_LINE> grid.SetCellFont(row, col, angle_font) <NEW_LINE> <DEDENT> grid.SetCellValue(row, self.criterion_col, model.experiment.get_stopping_criterion_friendly_name(poscov.criterion)) <NEW_LINE> grid.SetCellValue(row, self.criterion_col+1, str(poscov.criterion_value)) <NEW_LINE> grid.SetCellValue(row, self.criterion_col+2, str(poscov.comment)) <NEW_LINE> <DEDENT> self.update_selection()
Fill the grid rows with data, and set the right editors.
625941b9627d3e7fe0d68cc2
def get_licenses_as_dict(self): <NEW_LINE> <INDENT> return json.load(self.data_source)
Get licenses as python dict from stdin or from file :return: licenses dict Called by: get_email_and_company()
625941b9e8904600ed9f1d9c
def getAvailableSubtitleStreams(self): <NEW_LINE> <INDENT> return list()
Get Subtitle stream names :returns: List of subtitle streams as name. :rtype: list
625941b9b57a9660fec336f3
def test_switch_class_value_off(): <NEW_LINE> <INDENT> test_name = "light" <NEW_LINE> test_value = "test" <NEW_LINE> switch = Switch(test_name, off=test_value) <NEW_LINE> assert len(switch.options) == 2 <NEW_LINE> assert switch.options[0] == test_value <NEW_LINE> assert switch.options[1] == "" <NEW_LINE> assert switch.name == test_name
check that the Switch class returns the specified value for "off", defaults to the required value for "on" and stores the correct value for name
625941b921bff66bcd6847c8
def vs(youngs=None, vp=None, rho=None, mu=None, lam=None, bulk=None, pr=None, pmod=None): <NEW_LINE> <INDENT> if (mu is not None) and (rho is not None): <NEW_LINE> <INDENT> return np.sqrt(mu / rho) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None
Computes Vs given bulk density and shear modulus. SI units only. Args: Mu Rho Returns: Vs in m/s
625941b950485f2cf553cc0c
def get_args(): <NEW_LINE> <INDENT> parser = argparse.ArgumentParser(description='Run CNN training on prostate segmentation.') <NEW_LINE> parser.add_argument('--run_id', type=int, help='Training sessions ID') <NEW_LINE> parser.add_argument('--dir_in', type=str, help='Path to input file') <NEW_LINE> parser.add_argument('--dir_out', type=str, help='Path to save training session') <NEW_LINE> parser.add_argument('--dir_retrieved_file', type=str, help='Specify this in case that we want to reuse parameters from other training sessions, ' 'Path to the training instance') <NEW_LINE> parser.add_argument('--retrieved_params', type=str, help='Name of parameters we want to retrieve, e.g. "base_lr, val_amount"') <NEW_LINE> parser.add_argument('--split', type=int, help='Split index') <NEW_LINE> parser.add_argument('--resumed_epoch', type=int, help='Epoch wished to resume') <NEW_LINE> parser.add_argument('--train_batch_size', type=int, help='Size of each mini-batch in training') <NEW_LINE> parser.add_argument('--val_batch_size', type=int, help='Size of each mini-batch in validation') <NEW_LINE> parser.add_argument('--train_amount', type=int, help='Size of training set') <NEW_LINE> parser.add_argument('--val_amount', type=int, help='Size of validation set') <NEW_LINE> parser.add_argument('--steps', type=int, help='Total training steps, default: 10000') <NEW_LINE> parser.add_argument('--gpu_id', type=int, help='GPUs used for training, e.g. 0, ' 'leave empty to use CPUs, default: 0') <NEW_LINE> parser.add_argument('--log_file', type=str, help='Name of the log file, default "log.txt"') <NEW_LINE> parser.add_argument('--training_data_name', type=str, help='Name of the training image file (include both training and validation set') <NEW_LINE> parser.add_argument('--training_gt_name', type=str, help='Name of the training ground truth file (include both training and validation set') <NEW_LINE> parser.add_argument('--to_review_network', type=bool, help='Get some information about the network, ' 'including detailed architecture, number of parameters') <NEW_LINE> parser.add_argument('--optimizer', type=str, help='Name of the optimizer for optimizing network, default: "adam"') <NEW_LINE> parser.add_argument('--loss_term', type=str, help='Name of the loss term we want to minimize, default: CrossEntropy') <NEW_LINE> parser.add_argument('--base_lr', type=float, help='Initial learning rate, default: 1e-3 or 0.001') <NEW_LINE> parser.add_argument('--wd', type=float, help='Weight decay, default: 5e-4 or 0.0005') <NEW_LINE> parser.add_argument('--seed', type=int, help='A number specifying which fixed seed is used for MXNet, Numpy and Python') <NEW_LINE> parser.add_argument('--log_interval', type=int, help='Logging interval, default: 5') <NEW_LINE> parser.add_argument('--save_interval', type=int, help='Number of epochs between each checkpoints, default: 1') <NEW_LINE> parser.add_argument('--val_interval', type=int, help='Number of epochs between each validation, default: 1') <NEW_LINE> parser.add_argument('--prefix', type=str, help='Checkpoint name prefix, default: "dmnet"') <NEW_LINE> args = parser.parse_args() <NEW_LINE> return args
get commandline parameters
625941b9d486a94d0b98dfc0
def test_all_example_files_against_strict_schema(self): <NEW_LINE> <INDENT> self.folder_should_pass( schema=self.sportsjs_strict_schema, folder_name=EXAMPLE_FILES_FOLDER )
Run all files in EXAMPLE_FILES_FOLDER/should_pass against the schema. They should all pass (ie they are all valid against the schema). We use "subTest" so we can see which file failed in test output.
625941b923e79379d52ee3db
def keys(self): <NEW_LINE> <INDENT> return self.redis.hkeys('adhoc')
Return a list of session ID values.
625941b967a9b606de4a7d30
def dns_hostname(uid): <NEW_LINE> <INDENT> return common.format_hostname('dns', uid)
Formats hostname for a docker hosting DNS. NOTE: Hostnames are also used as docker names!
625941b9fbf16365ca6f6030
def detect_language(text, languages): <NEW_LINE> <INDENT> text = text.split() <NEW_LINE> lang_count = {} <NEW_LINE> for lang in languages: <NEW_LINE> <INDENT> counter = 0 <NEW_LINE> for word in text: <NEW_LINE> <INDENT> if word in lang['common_words']: <NEW_LINE> <INDENT> counter += 1 <NEW_LINE> <DEDENT> <DEDENT> lang_count[lang['name']] = counter <NEW_LINE> print(lang['name'] + ' : ' + str(counter)) <NEW_LINE> <DEDENT> lang_max_count = 0 <NEW_LINE> lang_max_name = '' <NEW_LINE> for lang in lang_count: <NEW_LINE> <INDENT> if lang_max_count < lang_count[lang]: <NEW_LINE> <INDENT> lang_max_count = lang_count[lang] <NEW_LINE> lang_max_name = lang <NEW_LINE> <DEDENT> <DEDENT> return lang_max_name
Returns the detected language of given text.
625941b9d18da76e23532345
def end(self): <NEW_LINE> <INDENT> if not self.playing or self.ended: <NEW_LINE> <INDENT> log.warn('[%s] Invalid Track.end() call (playing: %s, ended: %s)', self.uri, self.playing, self.ended) <NEW_LINE> return <NEW_LINE> <DEDENT> log.debug('[%s] Sending "track_end" event (position: %s)', self.uri, self.position) <NEW_LINE> self.metadata.track_end(self.info['lid'], self.position) <NEW_LINE> self.ended = True
Send track end/completion events
625941b971ff763f4b549502