Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
5,200
def _toggle_filming(self): if self._filming: self.log("Stopping operation") self._filming = False self.timer.stop() else: self.log("Starting operation") self._filming = True self.timer.start()
Toggles the camera system recording state
5,201
def reload(self): self.load(self.api.get(self.objName, self.key))
Function reload Sync the full object
5,202
def move_tab(self, index_from, index_to): client = self.clients.pop(index_from) self.clients.insert(index_to, client)
Move tab.
5,203
def verify_registration(request): user = process_verify_registration_data(request.data) extra_data = None if registration_settings.REGISTER_VERIFICATION_AUTO_LOGIN: extra_data = perform_login(request, user) return get_ok_response(, extra_data=extra_data)
Verify registration via signature.
5,204
def with_setup(setup=None, teardown=None): def decorate(func, setup=setup, teardown=teardown): if setup: if hasattr(func, ): _old_s = func.setup def _s(): setup() _old_s() func.setup = _s else: func.setup = setup if teardown: if hasattr(func, ): _old_t = func.teardown def _t(): _old_t() teardown() func.teardown = _t else: func.teardown = teardown return func return decorate
Decorator to add setup and/or teardown methods to a test function:: @with_setup(setup, teardown) def test_something(): " ... " Note that `with_setup` is useful *only* for test functions, not for test methods or inside of TestCase subclasses.
5,205
def _check_success(self): cube_height = self.sim.data.body_xpos[self.cube_body_id][2] table_height = self.table_full_size[2] return cube_height > table_height + 0.10
Returns True if task is successfully completed
5,206
def write_str(data, sidx, pnames): start = time.time() tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name)) with h5py.File(tmparrs, ) as io5: snparr = io5["snparr"] bisarr = io5["bisarr"] bend = np.where(np.all(bisarr[:] == "", axis=0))[0] if np.any(bend): bend = bend.min() else: bend = bisarr.shape[1] send = np.where(np.all(snparr[:] == "", axis=0))[0] if np.any(send): send = send.min() else: send = snparr.shape[1] out1 = open(data.outfiles.str, ) out2 = open(data.outfiles.ustr, ) numdict = {: , : , : , : , : , : } if data.paramsdict["max_alleles_consens"] > 1: for idx, name in enumerate(pnames): out1.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in snparr[idx, :send]]))) out1.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][1]] for i in snparr[idx, :send]]))) out2.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in bisarr[idx, :bend]]))) out2.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][1]] for i in bisarr[idx, :bend]]))) else: for idx, name in enumerate(pnames): out1.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in snparr[idx, :send]]))) out2.write("{}\t\t\t\t\t{}\n"\ .format(name, "\t".join([numdict[DUCT[i][0]] for i in bisarr[idx, :bend]]))) out1.close() out2.close() LOGGER.debug("finished writing str in: %s", time.time() - start)
Write STRUCTURE format for all SNPs and unlinked SNPs
5,207
def init(options, use_sigterm_handler=True): global _AUTH, _OPTIONS if isinstance(options, dict): _OPTIONS = DEFAULT_OPTIONS.copy() _OPTIONS.update(options) else: for optname, optvalue in DEFAULT_OPTIONS.iteritems(): if hasattr(options, optname): _OPTIONS[optname] = getattr(options, optname) else: _OPTIONS[optname] = optvalue if _OPTIONS[]: def fortytwo(request): "test GET method" return 42 def ping(request): "test POST method" return request.payload_params() register(fortytwo, ) register(ping, ) if _OPTIONS[]: _AUTH = HttpAuthentication(_OPTIONS[], realm = _OPTIONS[]).parse_file() for name, cmd in _COMMANDS.iteritems(): if cmd.safe_init: LOG.info("safe_init: %r", name) cmd.safe_init(_OPTIONS) if use_sigterm_handler: signal.signal(signal.SIGTERM, sigterm_handler) signal.signal(signal.SIGINT, sigterm_handler)
Must be called just after registration, before anything else
5,208
def matches_filters(self, node): visible = self.visible if self.options["text"]: if isregex(self.options["text"]): regex = self.options["text"] elif self.exact_text is True: regex = re.compile(r"\A{}\Z".format(re.escape(self.options["text"]))) else: regex = toregex(self.options["text"]) text = normalize_text( node.all_text if visible == "all" else node.visible_text) if not regex.search(text): return False if isinstance(self.exact_text, (bytes_, str_)): regex = re.compile(r"\A{}\Z".format(re.escape(self.exact_text))) text = normalize_text( node.all_text if visible == "all" else node.visible_text) if not regex.search(text): return False if visible == "visible": if not node.visible: return False elif visible == "hidden": if node.visible: return False for name, node_filter in iter(self._node_filters.items()): if name in self.filter_options: if not node_filter.matches(node, self.filter_options[name]): return False elif node_filter.has_default: if not node_filter.matches(node, node_filter.default): return False if self.options["filter"] and not self.options["filter"](node): return False return True
Returns whether the given node matches all filters. Args: node (Element): The node to evaluate. Returns: bool: Whether the given node matches.
5,209
def create_LM_hashed_password_v1(passwd): if re.match(r, passwd): return binascii.unhexlify(passwd.split()[0]) passwd = passwd.upper() lm_pw = passwd + * (14 - len(passwd)) lm_pw = passwd[0:14] magic_str = b"KGS!@ res = b dobj = des.DES(lm_pw[0:7]) res = res + dobj.encrypt(magic_str) dobj = des.DES(lm_pw[7:14]) res = res + dobj.encrypt(magic_str) return res
create LanManager hashed password
5,210
def get_pages_for_display(self): all_pages = Page.objects.none() if self.max_levels == 1: return all_pages for item in self.top_level_items: if item.link_page_id: page_depth = item.link_page.depth if( item.allow_subnav and page_depth >= settings.SECTION_ROOT_DEPTH ): all_pages = all_pages | Page.objects.filter( depth__gt=page_depth, depth__lt=page_depth + self.max_levels, path__startswith=item.link_page.path) all_pages = all_pages & self.get_base_page_queryset() if self.use_specific == constants.USE_SPECIFIC_ALWAYS: return all_pages.specific() return all_pages
Return all pages needed for rendering all sub-levels for the current menu
5,211
def dot(self, other_tf): if other_tf.to_frame != self.from_frame: raise ValueError(.format(other_tf.to_frame, self.from_frame)) if not isinstance(other_tf, RigidTransform): raise ValueError() other_scale = 1.0 if isinstance(other_tf, SimilarityTransform): other_scale = other_tf.scale rotation = self.rotation.dot(other_tf.rotation) translation = self.translation + self.scale * self.rotation.dot(other_tf.translation) scale = self.scale * other_scale return SimilarityTransform(rotation, translation, scale, from_frame=other_tf.from_frame, to_frame=self.to_frame)
Compose this simliarity transform with another. This transform is on the left-hand side of the composition. Parameters ---------- other_tf : :obj:`SimilarityTransform` The other SimilarityTransform to compose with this one. Returns ------- :obj:`SimilarityTransform` A SimilarityTransform that represents the composition. Raises ------ ValueError If the to_frame of other_tf is not identical to this transform's from_frame.
5,212
def event_text_key(self, event): char = event.char if not char or char not in string.ascii_letters: return converted_char = invert_shift(char) log.debug("convert keycode %s - char %s to %s", event.keycode, repr(char), converted_char) self.text.insert(tkinter.INSERT, converted_char) return "break"
So a "invert shift" for user inputs: Convert all lowercase letters to uppercase and vice versa.
5,213
def _to_dict(self): _dict = {} if hasattr(self, ) and self.key_as_string is not None: _dict[] = datetime_to_string(self.key_as_string) if hasattr(self, ) and self.key is not None: _dict[] = self.key if hasattr(self, ) and self.matching_results is not None: _dict[] = self.matching_results if hasattr(self, ) and self.event_rate is not None: _dict[] = self.event_rate return _dict
Return a json dictionary representing this model.
5,214
def descend(self, include_me=True): if include_me: yield self for child in self.child_list: yield child yield from child.descend()
Descend depth first into all child nodes
5,215
def anderson(*args, dist=): from scipy.stats import anderson as ads k = len(args) from_dist = np.zeros(k, ) sig_level = np.zeros(k) for j in range(k): st, cr, sig = ads(args[j], dist=dist) from_dist[j] = True if (st > cr).any() else False sig_level[j] = sig[np.argmin(np.abs(st - cr))] if k == 1: from_dist = bool(from_dist) sig_level = float(sig_level) return from_dist, sig_level
Anderson-Darling test of distribution. Parameters ---------- sample1, sample2,... : array_like Array of sample data. May be different lengths. dist : string Distribution ('norm', 'expon', 'logistic', 'gumbel') Returns ------- from_dist : boolean True if data comes from this distribution. sig_level : float The significance levels for the corresponding critical values in %. (See :py:func:`scipy.stats.anderson` for more details) Examples -------- 1. Test that an array comes from a normal distribution >>> from pingouin import anderson >>> x = [2.3, 5.1, 4.3, 2.6, 7.8, 9.2, 1.4] >>> anderson(x, dist='norm') (False, 15.0) 2. Test that two arrays comes from an exponential distribution >>> y = [2.8, 12.4, 28.3, 3.2, 16.3, 14.2] >>> anderson(x, y, dist='expon') (array([False, False]), array([15., 15.]))
5,216
def request(self, method, api_url, params={}, **kwargs): LOG.debug("axapi_http: full url = %s", self.url_base + api_url) LOG.debug("axapi_http: %s url = %s", method, api_url) LOG.debug("axapi_http: params = %s", json.dumps(logutils.clean(params), indent=4)) if params: extra_params = kwargs.get(, {}) params_copy = merge_dicts(params, extra_params) LOG.debug("axapi_http: params_all = %s", logutils.clean(params_copy)) payload = json.dumps(params_copy) else: try: payload = kwargs.pop(, None) self.headers = dict(self.HEADERS, **kwargs.pop(, {})) LOG.debug("axapi_http: headers_all = %s", logutils.clean(self.headers)) except KeyError: payload = None max_retries = kwargs.get(, self.max_retries) timeout = kwargs.get(, self.timeout) session = Session() if self.port == 443: session.mount(, SSLAdapter(max_retries=max_retries)) else: session.mount(, HTTPAdapter(max_retries=max_retries)) session_request = getattr(session, method.lower()) try: device_response = session_request( self.url_base + api_url, verify=False, data=payload, headers=self.HEADERS, timeout=timeout ) except (Exception) as e: LOG.error("acos_client failing with error %s after %s retries", e.__class__.__name__, max_retries) raise e finally: session.close() if device_response in broken_replies: device_response = broken_replies[device_response] LOG.debug("axapi_http: broken reply, new response: %s", logutils.clean(device_response)) try: json_response = device_response.json() LOG.debug("axapi_http: data = %s", json.dumps(logutils.clean(json_response), indent=4)) except ValueError as e: LOG.debug("axapi_http: json = %s", e) return device_response if in json_response and in json_response[]: if json_response[][] == : acos_responses.raise_axapi_ex(json_response, action=extract_method(api_url)) return json_response
Generate the API call to the device.
5,217
def remove_port_channel(self, **kwargs): port_int = kwargs.pop() callback = kwargs.pop(, self._callback) if re.search(, port_int) is None: raise ValueError( % repr(port_int)) port_channel = getattr(self._interface, ) port_channel_args = dict(name=port_int) config = port_channel(**port_channel_args) delete_channel = config.find() delete_channel.set(, ) return callback(config)
Remove a port channel interface. Args: port_int (str): port-channel number (1, 2, 3, etc). callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `port_int` is not passed. ValueError: if `port_int` is invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.channel_group(name='225/0/20', ... int_type='tengigabitethernet', ... port_int='1', channel_type='standard', mode='active') ... output = dev.interface.remove_port_channel( ... port_int='1')
5,218
def add_segmented_colorbar(da, colors, direction): nbreak = len(colors) if direction == : linewidth = da.height/nbreak verts = [None] * nbreak x1, x2 = 0, da.width for i, color in enumerate(colors): y1 = i * linewidth y2 = y1 + linewidth verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1)) else: linewidth = da.width/nbreak verts = [None] * nbreak y1, y2 = 0, da.height for i, color in enumerate(colors): x1 = i * linewidth x2 = x1 + linewidth verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1)) coll = mcoll.PolyCollection(verts, facecolors=colors, linewidth=0, antialiased=False) da.add_artist(coll)
Add 'non-rastered' colorbar to DrawingArea
5,219
def unpublish(self, daap_server): if daap_server not in self.daap_servers: return self.zeroconf.unregister_service(self.daap_servers[daap_server]) del self.daap_servers[daap_server]
Unpublish a given server. If the server was not published, this method will not do anything. :param DAAPServer daap_server: DAAP Server instance to publish.
5,220
def wc_dict2lha(wc, skip_redundant=True, skip_zero=True): d = OrderedDict() for name, (block, i) in WC_dict_0f.items(): if block not in d: d[block] = defaultdict(list) if wc[name] != 0: d[block][].append([i, wc[name].real]) for name in definitions.WC_keys_2f: reblock = +name.upper() imblock = +name.upper() if reblock not in d: d[reblock] = defaultdict(list) if imblock not in d: d[imblock] = defaultdict(list) for i in range(3): for j in range(3): if (i, j) in definitions.redundant_elements[name] and skip_redundant: continue if wc[name][i, j].real != 0 or not skip_zero: d[reblock][].append([i+1, j+1, float(wc[name][i, j].real)]) if wc[name][i, j].imag != 0 or not skip_zero: if (i, j) not in definitions.vanishing_im_parts[name]: d[imblock][].append([i+1, j+1, float(wc[name][i, j].imag)]) for name in definitions.WC_keys_4f: reblock = +name.upper() imblock = +name.upper() if reblock not in d: d[reblock] = defaultdict(list) if imblock not in d: d[imblock] = defaultdict(list) for i in range(3): for j in range(3): for k in range(3): for l in range(3): if (i, j, k, l) in definitions.redundant_elements[name] and skip_redundant: continue if wc[name][i, j, k, l].real != 0 or not skip_zero: d[reblock][].append([i+1, j+1, k+1, l+1, float(wc[name][i, j, k, l].real)]) if wc[name][i, j, k, l].imag != 0 or not skip_zero: if (i, j, k, l) not in definitions.vanishing_im_parts[name]: d[imblock][].append([i+1, j+1, k+1, l+1, float(wc[name][i, j, k, l].imag)]) empty = [] for block in d: if d[block] == {}: empty.append(block) for block in empty: del d[block] return {: d}
Convert a a dictionary of Wilson coefficients into a dictionary that pylha can convert into a DSixTools WC output file.
5,221
def dimension(self): if self.dim > -1: return self.dim d = None if self.dim != -1 and not self._estimated: d = self.dim elif self._estimated: dim = len(self.eigenvalues) if self.var_cutoff < 1.0: dim = min(dim, np.searchsorted(self.cumvar, self.var_cutoff) + 1) d = dim elif self.var_cutoff == 1.0: d = self.data_producer.dimension() else: raise RuntimeError( ) return d
output dimension
5,222
def getInspectorActionById(self, identifier): for action in self.inspectorActionGroup.actions(): if action.data() == identifier: return action raise KeyError("No action found with ID: {!r}".format(identifier))
Sets the inspector and draw the contents Triggers the corresponding action so that it is checked in the menus.
5,223
def suspend(self, instance_id): nt_ks = self.compute_conn response = nt_ks.servers.suspend(instance_id) return True
Suspend a server
5,224
def createIndex(self, table, fields, where = , whereValues = []) : versioTest = sq.sqlite_version_info[0] >= 3 and sq.sqlite_version_info[1] >= 8 if len(where) > 0 and not versioTest : sys.stderr.write("WARNING: IGNORING THE \"WHERE\" CLAUSE in INDEX. Partial indexes where only implemented in sqlite 3.8.0+, your version is: %s. Sorry about that.\n" % sq.sqlite_version) indexTable = self.makeIndexTableName(table, fields) else : indexTable = self.makeIndexTableName(table, fields, where, whereValues) if type(fields) is types.ListType : sql = "CREATE INDEX IF NOT EXISTS %s on %s(%s)" %(indexTable, table, .join(fields)) else : sql = "CREATE INDEX IF NOT EXISTS %s on %s(%s)" %(indexTable, table, fields) if len(where) > 0 and versioTest: sql = "%s WHERE %s;" % (sql, where) self.execute(sql, whereValues) else : self.execute(sql)
Creates indexes for Raba Class a fields resulting in significantly faster SELECTs but potentially slower UPADTES/INSERTS and a bigger DBs Fields can be a list of fields for Multi-Column Indices, or siply the name of one single field. With the where close you can create a partial index by adding conditions ----- only for sqlite 3.8.0+ where : optional ex: name = ? AND hair_color = ? whereValues : optional, ex: ["britney", 'black']
5,225
def run(self, dag): coupling_map = self._coupling_map ordered_virtual_gates = list(dag.serial_layers()) if self.initial_layout is None: if self.property_set["layout"]: self.initial_layout = self.property_set["layout"] else: self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values()) if len(dag.qubits()) != len(self.initial_layout): raise TranspilerError() if len(self._coupling_map.physical_qubits) != len(self.initial_layout): raise TranspilerError( "Mappers require to have the layout to be the same size as the coupling map") mapped_gates = [] layout = self.initial_layout.copy() gates_remaining = ordered_virtual_gates.copy() while gates_remaining: best_step = _search_forward_n_swaps(layout, gates_remaining, coupling_map) layout = best_step[] gates_mapped = best_step[] gates_remaining = best_step[] mapped_gates.extend(gates_mapped) mapped_dag = _copy_circuit_metadata(dag, coupling_map) for node in mapped_gates: mapped_dag.apply_operation_back(op=node.op, qargs=node.qargs, cargs=node.cargs) return mapped_dag
Run one pass of the lookahead mapper on the provided DAG. Args: dag (DAGCircuit): the directed acyclic graph to be mapped Returns: DAGCircuit: A dag mapped to be compatible with the coupling_map in the property_set. Raises: TranspilerError: if the coupling map or the layout are not compatible with the DAG
5,226
def configure(default=None, dev=None): cache_loc = openaccess_epub.utils.cache_location() config_loc = openaccess_epub.utils.config_location() openaccess_epub.utils.mkdir_p(cache_loc) defaults = {: time.asctime(), : openaccess_epub.__version__, : unix_path_coercion(cache_loc), : , : , : os.path.join(cache_loc, ), : , : , : , : , : os.path.join(cache_loc, , )} if default or dev: if dev: defaults[] = defaults[] = list_opts(defaults[]) defaults[] = boolean(defaults[]) defaults[] = absolute_path(defaults[]) defaults[] = boolean(defaults[]) defaults[] = boolean(defaults[]) defaults[] = nonempty(defaults[]) defaults[] = nonempty(defaults[]) defaults[] = absolute_path(defaults[]) config = config_formatter(CONFIG_TEXT, defaults) with open(config_loc, ) as conf_out: conf_out.write(bytes(config, )) print(.format(config_loc)) return config_dict = {: time.asctime(), : openaccess_epub.__version__, : unix_path_coercion(cache_loc)} print() print() print(.format(cache_loc)) input() print(ll configure some values for each of these, and you\) print() user_prompt(config_dict, , , default=defaults[], validator=list_opts) print() user_prompt(config_dict, , , default=defaults[], validator=boolean) print() user_prompt(config_dict, , , default=defaults[], validator=absolute_path) print() user_prompt(config_dict, , , default=defaults[], validator=boolean) print() user_prompt(config_dict, , , default=defaults[], validator=boolean) print() user_prompt(config_dict, , , default=defaults[], validator=nonempty) print() user_prompt(config_dict, , , default=defaults[], validator=nonempty) print() user_prompt(config_dict, , , default=defaults[], validator=absolute_path) config = config_formatter(CONFIG_TEXT, config_dict) with open(config_loc, ) as conf_out: conf_out.write(bytes(config, )) print()
The inner control loops for user interaction during quickstart configuration.
5,227
def _is_path(s): if isinstance(s, string_types): try: return op.exists(s) except (OSError, ValueError): return False else: return False
Return whether an object is a path.
5,228
def delete_message(self, chat_id, message_id): return apihelper.delete_message(self.token, chat_id, message_id)
Use this method to delete message. Returns True on success. :param chat_id: in which chat to delete :param message_id: which message to delete :return: API reply.
5,229
def _start(self): if self.whoami is None: me = self.get_me() if me.get(, False): self.whoami = me[] else: raise ValueError( )
Requests bot information based on current api_key, and sets self.whoami to dictionary with username, first_name, and id of the configured bot.
5,230
def xross_listener(http_method=None, **xross_attrs): handler = currentframe().f_back.f_locals[]._xross_handler handler.set_attrs(**xross_attrs) if http_method is not None: handler.http_method = http_method handler.dispatch()
Instructs xross to handle AJAX calls right from the moment it is called. This should be placed in a view decorated with `@xross_view()`. :param str http_method: GET or POST. To be used as a source of data for xross. :param dict xross_attrs: xross handler attributes. Those attributes will be available in operation functions in `xross` keyword argument.
5,231
def pvremove(devices, override=True): if isinstance(devices, six.string_types): devices = devices.split() cmd = [, ] for device in devices: if pvdisplay(device): cmd.append(device) elif not override: raise CommandExecutionError(.format(device)) if not cmd[2:]: return True out = __salt__[](cmd, python_shell=False) if out.get(): raise CommandExecutionError(out.get()) for device in devices: if pvdisplay(device, quiet=True): raise CommandExecutionError(.format(device)) return True
Remove a physical device being used as an LVM physical volume override Skip devices, if they are already not used as LVM physical volumes CLI Examples: .. code-block:: bash salt mymachine lvm.pvremove /dev/sdb1,/dev/sdb2
5,232
def set_tags(name=None, tags=None, call=None, location=None, instance_id=None, resource_id=None, kwargs=None): Other stuff if kwargs is None: kwargs = {} if location is None: location = get_location() if instance_id is None: if in kwargs: resource_id = kwargs[] del kwargs[] if in kwargs: instance_id = kwargs[] del kwargs[] if resource_id is None: if instance_id is None: instance_id = _get_node(name=name, instance_id=None, location=location)[] else: instance_id = resource_id if instance_id is None: return { : } params = {: , : instance_id} log.debug(, name, tags) if kwargs and not tags: tags = kwargs for idx, (tag_k, tag_v) in enumerate(six.iteritems(tags)): params[.format(idx)] = tag_k params[.format(idx)] = tag_v attempts = 0 while attempts < aws.AWS_MAX_RETRIES: aws.query(params, setname=, location=location, provider=get_provider(), opts=__opts__, sigver=) settags = get_tags( instance_id=instance_id, call=, location=location ) log.debug(, settags) failed_to_set_tags = False for tag in settags: if tag[] not in tags: continue if tag.get() is None and tags.get(tag[]) == : continue if six.text_type(tags.get(tag[])) != six.text_type(tag[]): log.debug( , tag[], tags.get(tag[]), tag[] ) failed_to_set_tags = True break if failed_to_set_tags: log.warning(, attempts) attempts += 1 aws.sleep_exponential_backoff(attempts) continue return settags raise SaltCloudSystemExit( .format(name) )
Set tags for a resource. Normally a VM name or instance_id is passed in, but a resource_id may be passed instead. If both are passed in, the instance_id will be used. CLI Examples: .. code-block:: bash salt-cloud -a set_tags mymachine tag1=somestuff tag2='Other stuff' salt-cloud -a set_tags resource_id=vol-3267ab32 tag=somestuff
5,233
def AddRoute(self, short_name, long_name, route_type, route_id=None): if route_id is None: route_id = util.FindUniqueId(self.routes) route = self._gtfs_factory.Route(short_name=short_name, long_name=long_name, route_type=route_type, route_id=route_id) route.agency_id = self.GetDefaultAgency().agency_id self.AddRouteObject(route) return route
Add a route to this schedule. Args: short_name: Short name of the route, such as "71L" long_name: Full name of the route, such as "NW 21st Ave/St Helens Rd" route_type: A type such as "Tram", "Subway" or "Bus" route_id: id of the route or None, in which case a unique id is picked Returns: A new Route object
5,234
def get_lifecycle(self, policy=None, params=None): return self.transport.perform_request( "GET", _make_path("_ilm", "policy", policy), params=params )
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html>`_ :arg policy: The name of the index lifecycle policy
5,235
def calc_size_and_sha265(content: io.IOBase, chunk_size: int): size = 0 sha256 = hashlib.sha256() content.seek(0, io.SEEK_SET) while True: buf = content.read(chunk_size) length = len(buf) size += length sha256.update(buf) if length != chunk_size: break return size, sha256.hexdigest()
Calculates the size and the sha2566 value of the content.
5,236
def import_single_vpn_path_to_all_vrfs(self, vpn_path, path_rts=None): LOG.debug(, vpn_path) if not path_rts: LOG.info(, vpn_path) return interested_tables = set() if vpn_path.route_family == RF_IPv4_VPN: route_family = RF_IPv4_UC elif vpn_path.route_family == RF_IPv6_VPN: route_family = RF_IPv6_UC elif vpn_path.route_family == RF_L2_EVPN: route_family = RF_L2_EVPN elif vpn_path.route_family == RF_VPNv4_FLOWSPEC: route_family = RF_IPv4_FLOWSPEC elif vpn_path.route_family == RF_VPNv6_FLOWSPEC: route_family = RF_IPv6_FLOWSPEC elif vpn_path.route_family == RF_L2VPN_FLOWSPEC: route_family = RF_L2VPN_FLOWSPEC else: raise ValueError( % vpn_path.route_family) for rt in path_rts: rt_rf_id = rt + + str(route_family) vrf_rt_tables = self._tables_for_rt.get(rt_rf_id) if vrf_rt_tables: interested_tables.update(vrf_rt_tables) if interested_tables: route_dist = vpn_path.nlri.route_dist for vrf_table in interested_tables: if (vpn_path.source is not None or route_dist != vrf_table.vrf_conf.route_dist): update_vrf_dest = vrf_table.import_vpn_path(vpn_path) if update_vrf_dest is not None: self._signal_bus.\ dest_changed(update_vrf_dest) else: LOG.debug(, path_rts)
Imports *vpn_path* to qualifying VRF tables. Import RTs of VRF table is matched with RTs from *vpn4_path* and if we have any common RTs we import the path into VRF.
5,237
def check_for_required_columns(problems, table, df): r = cs.PROTOFEED_REF req_columns = r.loc[(r[] == table) & r[], ].values for col in req_columns: if col not in df.columns: problems.append([, .format(col), table, []]) return problems
Check that the given ProtoFeed table has the required columns. Parameters ---------- problems : list A four-tuple containing 1. A problem type (string) equal to ``'error'`` or ``'warning'``; ``'error'`` means the ProtoFeed is violated; ``'warning'`` means there is a problem but it is not a ProtoFeed violation 2. A message (string) that describes the problem 3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem occurs 4. A list of rows (integers) of the table's DataFrame where the problem occurs table : string Name of a ProtoFeed table df : DataFrame The ProtoFeed table corresponding to ``table`` Returns ------- list The ``problems`` list extended as follows. Check that the DataFrame contains the colums required by the ProtoFeed spec and append to the problems list one error for each column missing.
5,238
def zoom_in(self, action=None, channel=0): ret = self.command( .format(action, channel) ) return ret.content.decode()
Params: action - start or stop channel - channel number The magic of zoom in 1x, 2x etc. is the timer between the cmd 'start' and cmd 'stop'. My suggestion for start/stop cmd is 0.5 sec
5,239
def weighted_std(values, weights): average = np.average(values, weights=weights) variance = np.average((values-average)**2, weights=weights) return np.sqrt(variance)
Calculate standard deviation weighted by errors
5,240
def cache_result(func): def cache_set(key, value): cache.set(key, value, AVATAR_CACHE_TIMEOUT) return value def cached_func(user, size): prefix = func.__name__ cached_funcs.add(prefix) key = get_cache_key(user, size, prefix=prefix) return cache.get(key) or cache_set(key, func(user, size)) return cached_func
Decorator to cache the result of functions that take a ``user`` and a ``size`` value.
5,241
def _convert_to_cwl_json(data, fnargs, input_files): out = {} for outvar in _get_output_cwl_keys(fnargs): keys = [] for key in outvar.split("__"): try: key = int(key) except ValueError: pass keys.append(key) if isinstance(data, dict): out[outvar] = _to_cwl(tz.get_in(keys, data), input_files) else: out[outvar] = [_to_cwl(tz.get_in(keys, x), input_files) for x in data] return out
Convert world data object (or list of data objects) into outputs for CWL ingestion.
5,242
def zoning_defined_configuration_cfg_cfg_name(self, **kwargs): config = ET.Element("config") zoning = ET.SubElement(config, "zoning", xmlns="urn:brocade.com:mgmt:brocade-zone") defined_configuration = ET.SubElement(zoning, "defined-configuration") cfg = ET.SubElement(defined_configuration, "cfg") cfg_name = ET.SubElement(cfg, "cfg-name") cfg_name.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
5,243
def get_humidity(self): self._init_humidity() humidity = 0 data = self._humidity.humidityRead() if (data[0]): humidity = data[1] return humidity
Returns the percentage of relative humidity
5,244
def add_local_option(self, *args, **kw): try: group = self.local_option_group except AttributeError: group = SConsOptionGroup(self, ) group = self.add_option_group(group) self.local_option_group = group result = group.add_option(*args, **kw) if result: setattr(self.values.__defaults__, result.dest, result.default) self.reparse_local_options() return result
Adds a local option to the parser. This is initiated by a SetOption() call to add a user-defined command-line option. We add the option to a separate option group for the local options, creating the group if necessary.
5,245
def total_variation(domain, grad=None): if grad is None: grad = odl.Gradient(domain, method=, pad_mode=) grad.norm = 2 * np.sqrt(sum(1 / grad.domain.cell_sides**2)) else: grad = grad f = odl.solvers.GroupL1Norm(grad.range, exponent=2) return f * grad
Total variation functional. Parameters ---------- domain : odlspace domain of TV functional grad : gradient operator, optional Gradient operator of the total variation functional. This may be any linear operator and thereby generalizing TV. default=forward differences with Neumann boundary conditions Examples -------- Check that the total variation of a constant is zero >>> import odl.contrib.spdhg as spdhg, odl >>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3]) >>> tv = spdhg.total_variation(space) >>> x = space.one() >>> tv(x) < 1e-10
5,246
def cached_property(getter): def decorator(self): key = "_cached_property_" + getter.__name__ if not hasattr(self, key): setattr(self, key, getter(self)) return getattr(self, key) decorator.__name__ = getter.__name__ decorator.__module__ = getter.__module__ decorator.__doc__ = getter.__doc__ return property(decorator)
Decorator that converts a method into memoized property. The decorator works as expected only for classes with attribute '__dict__' and immutable properties.
5,247
def is_compliant(self, path): log("Auditing contents of file " % (path), level=DEBUG) with open(path, ) as fd: contents = fd.read() matches = 0 for pattern in self.pass_cases: key = re.compile(pattern, flags=re.MULTILINE) results = re.search(key, contents) if results: matches += 1 else: log("Pattern was expected to pass but instead it failed" % (pattern), level=WARNING) for pattern in self.fail_cases: key = re.compile(pattern, flags=re.MULTILINE) results = re.search(key, contents) if not results: matches += 1 else: log("Pattern was expected to fail but instead it passed" % (pattern), level=WARNING) total = len(self.pass_cases) + len(self.fail_cases) log("Checked %s cases and %s passed" % (total, matches), level=DEBUG) return matches == total
Given a set of content matching cases i.e. tuple(regex, bool) where bool value denotes whether or not regex is expected to match, check that all cases match as expected with the contents of the file. Cases can be expected to pass of fail. :param path: Path of file to check. :returns: Boolean value representing whether or not all cases are found to be compliant.
5,248
def pauseMovie(self): if self.state == self.PLAYING: self.sendRtspRequest(self.PAUSE)
Pause button handler.
5,249
def _fault_to_exception(f): e = _fault_to_exception_map.get(f.faultCode) if e is None: e = NipapError return e(f.faultString)
Converts XML-RPC Fault objects to Pynipap-exceptions. TODO: Is this one neccesary? Can be done inline...
5,250
def override_cluster_spec(self, srd): merged_cluster_spec = copy.deepcopy(self.entrypoints) merged_cluster_spec = dict([(k, v) for k, v in merged_cluster_spec.items() if v.get("clusterSpec") is not None]) merged_cluster_spec = dict([(k, v) for k, v in merged_cluster_spec.items() if \ k in srd.entrypoints or "*" in srd.entrypoints]) for entry_pt, req in merged_cluster_spec.items(): merged_cluster_spec[entry_pt]["clusterSpec"].update( srd.entrypoints.get(entry_pt, srd.entrypoints.get("*"))["clusterSpec"]) for entry_pt, req in srd.entrypoints.items(): if entry_pt not in merged_cluster_spec and "*" in self.entrypoints and "clusterSpec" in self.entrypoints["*"]: merged_cluster_spec[entry_pt] = {"clusterSpec": copy.deepcopy(self.entrypoints["*"]["clusterSpec"])} merged_cluster_spec[entry_pt]["clusterSpec"].update(req["clusterSpec"]) return SystemRequirementsDict(merged_cluster_spec)
Returns SystemRequirementsDict can be passed in a "systemRequirements" input to app-xxx/run, e.g. {'fn': {'clusterSpec': {initialInstanceCount: 3, version: "2.4.0", ..}}} Since full clusterSpec must be passed to the API server, we need to retrieve the cluster spec defined in app doc's systemRequirements and overwrite the field initialInstanceCount with the value the user passed to dx run for each entrypoint. initialInstanceCount is currently the only clusterSpec's field the user is allowed to change at runtime. A few scenarios when requesting instance count for different entrypoints with dx run and the resulting merged systemRequirements (merged_cluster_spec). The bootstapScript field here is only one of many (version, ports, etc) that should be copied from app spec to merged_cluster_spec: Requested: {"*": 5} App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"}, "other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"}} Merged: {"main": "clusterSpec": {"initialInstanceCount": 5, bootstrapScript: "x.sh"}, "other": "clusterSpec": {"initialInstanceCount": 5, bootstrapScript: "y.sh"}} Requested: {"*": 15} App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"}, "other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"}, "*": "clusterSpec": {"initialInstanceCount": 11, bootstrapScript: "y.sh"}} Merged: {"main": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "x.sh"}, "other": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "y.sh"}, "*": "clusterSpec": {"initialInstanceCount": 15, bootstrapScript: "y.sh"}} Requested: {"main": 12} App doc: {"main": "clusterSpec": {"initialInstanceCount": 7, bootstrapScript: "x.sh"}, "other": "clusterSpec": {"initialInstanceCount": 9, bootstrapScript: "y.sh"}} Merged: {"main": "clusterSpec": {"initialInstanceCount": 12, bootstrapScript: "x.sh"}} Requested: {"main": 33} App doc: {"*": "clusterSpec": {"initialInstanceCount": 2, bootstrapScript: "z.sh"}} Merged: {"main": "clusterSpec": {"initialInstanceCount": 33, bootstrapScript: "z.sh"}} Requested: {"main": 22, "*": 11} App doc: {"*": "clusterSpec": {"initialInstanceCount": 2, bootstrapScript: "t.sh"}} Merged: {"main": "clusterSpec": {"initialInstanceCount": 22, bootstrapScript: "t.sh"}, "*": "clusterSpec": {"initialInstanceCount": 11, bootstrapScript: "t.sh"}}
5,251
def install_theme(path_to_theme): pref_init() filename = basename(path_to_theme) dest = join(THEMES_DIR, filename) copy(path_to_theme, dest) zf = zipfile.ZipFile(dest) zf.extractall(THEMES_DIR) unlink(dest)
Pass a path to a theme file which will be extracted to the themes directory.
5,252
def marketShortInterest(date=None, token=, version=): if date: date = _strOrDate(date) return _getJson( + date, token, version) return _getJson(, token, version)
The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report. The report data will be published daily at 4:00pm ET. https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev Args: date (datetime); Effective Datetime token (string); Access token version (string); API version Returns: dict: result
5,253
def add_to_parser(self, parser, group): return parser.add_argument_group(*self.args, **self.kwds)
Add this object's information to the parser.
5,254
def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind=, na_position=, sort_remaining=True): inplace = validate_bool_kwarg(inplace, ) axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) labels = self._get_axis(axis) if level is not None: raise NotImplementedError("level is not implemented") if inplace: raise NotImplementedError("inplace is not implemented") sort_index = labels.argsort() if not ascending: sort_index = sort_index[::-1] new_axis = labels.take(sort_index) return self.reindex(**{axis_name: new_axis})
Sort object by labels (along an axis). Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis along which to sort. The value 0 identifies the rows, and 1 identifies the columns. level : int or level name or list of ints or list of level names If not None, sort on values in specified index level(s). ascending : bool, default True Sort ascending vs. descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. Returns ------- sorted_obj : DataFrame or None DataFrame with sorted index if inplace=False, None otherwise.
5,255
def _Open(self, path_spec=None, mode=): if not self._file_object_set_in_init and not path_spec: raise ValueError() if self._file_object_set_in_init: return self._file_object = self._OpenFileObject(path_spec) if not self._file_object: raise IOError()
Opens the file-like object defined by path specification. Args: path_spec (Optional[PathSpec]): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
5,256
def batch_scan(points, xdist=20, ydist=20, N=5): chr_pair_points = group_hits(points) clusters = [] for chr_pair in sorted(chr_pair_points.keys()): points = chr_pair_points[chr_pair] clusters.extend(synteny_scan(points, xdist, ydist, N)) return clusters
runs synteny_scan() per chromosome pair
5,257
def get_install_requires(): install_requires = get_requirements() if not in sys.argv: if sys.version_info[0] == 2: install_requires.append() if sys.version_info[2:] == (2, 6): install_requires.append() elif (2, 6) < sys.version_info[:2] < (3, 0): install_requires.append() return sorted(install_requires)
Add conditional dependencies (when creating source distributions).
5,258
def _show(self, pk): pages = get_page_args() page_sizes = get_page_size_args() orders = get_order_args() item = self.datamodel.get(pk, self._base_filters) if not item: abort(404) widgets = self._get_show_widget(pk, item) self.update_redirect() return self._get_related_views_widgets( item, orders=orders, pages=pages, page_sizes=page_sizes, widgets=widgets )
show function logic, override to implement different logic returns show and related list widget
5,259
def close(self, code=None, reason=): self.send_close_frame(code, reason) frame = self.sock.recv() if frame.opcode != OPCODE_CLOSE: raise ValueError( % frame) self.handle_control_frame(frame)
Close the socket by sending a CLOSE frame and waiting for a response close message, unless such a message has already been received earlier (prior to calling this function, for example). The onclose() handler is called after the response has been received, but before the socket is actually closed.
5,260
def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None, max_window=3000, variant_type=): if variant_type == : nr_variants = case_obj[] else: nr_variants = case_obj[] nr_inserted = 0 case_id = case_obj[] if skip_case_id: case_id = None with click.progressbar(vcf_obj, label="Inserting variants",length=nr_variants) as bar: variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar) if variant_type == : for sv_variant in variants: if not sv_variant: continue adapter.add_structural_variant(variant=sv_variant, max_window=max_window) nr_inserted += 1 if variant_type == : nr_inserted = adapter.add_variants(variants) LOG.info("Inserted %s variants of type %s", nr_inserted, variant_type) return nr_inserted
Load variants for a family into the database. Args: adapter (loqusdb.plugins.Adapter): initialized plugin case_obj(Case): dict with case information nr_variants(int) skip_case_id (bool): whether to include the case id on variant level or not gq_treshold(int) max_window(int): Specify the max size for sv windows variant_type(str): 'sv' or 'snv' Returns: nr_inserted(int)
5,261
def render(self, code): if self._callbacks[] is not None: self._callbacks[](code) ypos = 1.0 for line in code: xpos = self.quiet_zone for mod in line: if mod == : color = self.background else: color = self.foreground self._callbacks[](xpos, ypos, self.module_width, color) xpos += self.module_width self._callbacks[](xpos, ypos, self.quiet_zone, self.background) ypos += self.module_height if self.text and self._callbacks[] is not None: ypos += self.text_distance if self.center_text: xpos = xpos / 2.0 else: xpos = self.quiet_zone + 4.0 self._callbacks[](xpos, ypos) return self._callbacks[]()
Renders the barcode to whatever the inheriting writer provides, using the registered callbacks. :parameters: code : List List of strings matching the writer spec (only contain 0 or 1).
5,262
def expand_as_args(args): return (isinstance(args, collections.Sequence) and not _is_namedtuple(args) and not _force_leaf(args))
Returns `True` if `args` should be expanded as `*args`.
5,263
def warn_import_error(type_of_obj_support: str, caught: ImportError): msg = StringIO() msg.writelines( + type_of_obj_support + ) traceback.print_tb(caught.__traceback__, file=msg) msg.writelines(str(caught.__class__.__name__) + + str(caught) + ) warn(msg.getvalue())
Utility method to print a warning message about failed import of some modules :param type_of_obj_support: :param caught: :return:
5,264
def interface_by_name(self, name): if name in self._devinfo: return self._devinfo[name] raise KeyError("No device named {}".format(name))
Given a device name, return the corresponding interface object
5,265
def substitute_harmonic(progression, substitute_index, ignore_suffix=False): simple_substitutions = [(, ), (, ), (, ), (, ), (, )] res = [] (roman, acc, suff) = parse_string(progression[substitute_index]) if suff == or suff == or ignore_suffix: for subs in simple_substitutions: r = subs[1] if roman == subs[0] else None if r == None: r = subs[0] if roman == subs[1] else None if r != None: suff = suff if suff == else res.append(tuple_to_string((r, acc, suff))) return res
Do simple harmonic substitutions. Return a list of possible substitions for progression[substitute_index]. If ignore_suffix is set to True the suffix of the chord being substituted will be ignored. Otherwise only progressions without a suffix, or with suffix '7' will be substituted. The following table is used to convert progressions: || I || III || || I || VI || || IV || II || || IV || VI || || V || VII ||
5,266
def free_size(self, units="MiB"): self.open() size = lvm_vg_get_free_size(self.handle) self.close() return size_convert(size, units)
Returns the volume group free size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
5,267
def length(self, vertices): length = ((np.diff(self.discrete(vertices), axis=0)**2).sum(axis=1)**.5).sum() return length
Return the total length of the entity. Returns --------- length: float, total length of entity
5,268
def create_vm(self, userid, cpu, memory, disk_list, profile, max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam): rd = ( % {: userid, : memory, : const.ZVM_USER_DEFAULT_PRIVILEGE, : cpu, : profile, : max_cpu, : max_mem}) if CONF.zvm.default_admin_userid: rd += ( % CONF.zvm.default_admin_userid) if (disk_list and in disk_list[0] and disk_list[0][]): rd += ( % self._get_ipl_param(ipl_from)) if ipl_param: rd += % ipl_param if ipl_loadparam: rd += % ipl_loadparam action = "create userid " % userid try: self._request(rd) except exception.SDKSMTRequestFailed as err: if ((err.results[] == 436) and (err.results[] == 4)): result = "Profile " % profile raise exception.SDKObjectNotExistError(obj_desc=result, modID=) else: msg = if action is not None: msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) action = "add guest to database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.add_guest(userid) if disk_list: return self.add_mdisks(userid, disk_list)
Create VM and add disks if specified.
5,269
def copy_script(self, filename, id_=-1): for repo in self._children: repo.copy_script(filename, id_)
Copy a script to all repositories. Takes into account whether a JSS has been migrated. See the individual DistributionPoint types for more information. Args: filename: String path to the local file to copy. id_: Integer ID you wish to associate script with for a JDS or CDP only. Default is -1, which is used for creating a new script object in the database.
5,270
def _parse_reflectivity(line, lines): split_line = line.split() energy = float(split_line[0]) reflect_xx = float(split_line[1]) reflect_zz = float(split_line[2]) return {"energy": energy, "reflect_xx": reflect_xx, "reflect_zz": reflect_zz}
Parse Energy [eV] reflect_xx reflect_zz
5,271
def load_yaml(file): if hasattr(yaml, "full_load"): return yaml.full_load(file) else: return yaml.load(file)
If pyyaml > 5.1 use full_load to avoid warning
5,272
def _tokenize(cls, sentence): while True: match = cls._regex_tag.search(sentence) if not match: yield from cls._split(sentence) return chunk = sentence[:match.start()] yield from cls._split(chunk) tag = match.group(0) yield tag sentence = sentence[(len(chunk) + len(tag)):]
Split a sentence while preserving tags.
5,273
def get_items(self): from .layers import Layer results = [] for url in self.items: if in url: r = self._client.request(, url) results.append(self._client.get_manager(Layer).create_from_result(r.json())) else: raise NotImplementedError("No support for %s" % url) return results
Return the item models associated with this Publish group.
5,274
def load_from_ini(ini, default_section=_DEFAULT_SECTION): global _CONFIG_CACHE if ini not in _CONFIG_CACHE: if six.PY3: logger.debug("PY3........") _CONFIG_CACHE[ini] = _load_from_ini_py3(ini, default_section) else: _CONFIG_CACHE[ini] = _load_from_ini_py2(ini) logger.debug(_CONFIG_CACHE[ini]) return _CONFIG_CACHE[ini]
从单个配置文件读取配置 :param ini: :param default_section: :return:
5,275
def walk(prev, inital_path, *args, **kw): for dir_path, dir_names, filenames in os.walk(inital_path): for filename in filenames: yield os.path.join(dir_path, filename)
This pipe wrap os.walk and yield absolute path one by one. :param prev: The previous iterator of pipe. :type prev: Pipe :param args: The end-of-line symbol for each output. :type args: list of string. :param kw: The end-of-line symbol for each output. :type kw: dictionary of options. Add 'endl' in kw to specify end-of-line symbol. :returns: generator
5,276
def get_signature(self, signature): resp = self.request_list() if resp and (len(resp) > 0): for sig_dict in resp: sig = zobjects.Signature.from_dict(sig_dict) if hasattr(signature, ): its_this_one = (sig.id == signature.id) elif hasattr(signature, ): its_this_one = (sig.name.upper() == signature.name.upper()) else: raise ValueError() if its_this_one: return sig else: return None
Retrieve one signature, discriminated by name or id. Note that signature name is not case sensitive. :param: a zobjects.Signature describing the signature like "Signature(name='my-sig')" :returns: a zobjects.Signature object, filled with the signature if no signature is matching, returns None.
5,277
def get_cookbook_dirs(self, base_dir=None): if base_dir is None: base_dir = self.env_root cookbook_dirs = [] dirs_to_skip = set([]) for root, dirs, files in os.walk(base_dir): dirs[:] = [d for d in dirs if d not in dirs_to_skip] for name in files: if name == : if in os.path.basename(os.path.dirname(root)): cookbook_dirs.append(root) return cookbook_dirs
Find cookbook directories.
5,278
def get_last_fingerprint(fullpath): record = model.FileFingerprint.get(file_path=fullpath) if record: return record.fingerprint return None
Get the last known modification time for a file
5,279
def p_lpartselect_lpointer(self, p): p[0] = Partselect(p[1], p[3], p[5], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
lpartselect : pointer LBRACKET expression COLON expression RBRACKET
5,280
def file_digest(source): hash_sha256 = hashlib.sha256() should_close = False if isinstance(source, six.string_types): should_close = True source = open(source, ) for chunk in iter(lambda: source.read(_BUFFER_SIZE), b): hash_sha256.update(chunk) if should_close: source.close() return hash_sha256.hexdigest()
Calculates SHA256 digest of a file. Args: source: either a file-like object or a path to file
5,281
def load_featured(data): from invenio_communities.models import FeaturedCommunity obj = FeaturedCommunity(id=data[], id_community=data[], start_date=iso2dt(data[])) db.session.add(obj) db.session.commit()
Load community featuring from data dump. :param data: Dictionary containing community featuring data. :type data: dict
5,282
def printImports(self): for module in self.listModules(): print("%s:" % module.label) if self.external_dependencies: imports = list(module.imports) else: imports = [modname for modname in module.imports if modname in self.modules] imports.sort() print(" %s" % "\n ".join(imports))
Produce a report of dependencies.
5,283
def _parse_datapoints(self, parsed_duration, parsed_resolution, limit): return self.datapoints_parser.parse(parsed_duration, parsed_resolution, limit)
Parse the number of datapoints of a query. This can be calculated from the given duration and resolution of the query. E.g. if the query has a duation of 2*60*60 = 7200 seconds and a resolution of 10 seconds then the number of datapoints would be 7200/10 => 7200 datapoints. :param parsed_duration: :param parsed_resolution: :param limit: :return:
5,284
def dump_sensor_memory(self, cb_compress=False, custom_compress=False, custom_compress_file=None, auto_collect_result=False): print("~ dumping contents of memory on {}".format(self.sensor.computer_name)) local_file = remote_file = "{}.memdmp".format(self.sensor.computer_name) if not self.lr_session: self.go_live() try: if cb_compress and auto_collect_result: logging.info("CB compression and auto-collection set") self.lr_session.memdump(remote_filename=remote_file, compress=cb_compress) return True dump_object = self.lr_session.start_memdump(remote_filename=remote_file, compress=cb_compress) dump_object.wait() if cb_compress: print("+ Memory dump compressed at -> C:\windows\carbonblack\{}.zip".format(remote_file)) if auto_collect_result: self.getFile_with_timeout("C:\\Windows\\CarbonBlack\\{}.zip".format(remote_file)) return True print("+ Memory dump complete on host -> C:\windows\carbonblack\{}".format(remote_file)) except LiveResponseError as e: raise Exception("LiveResponseError: {}".format(e)) if custom_compress: if not os.path.exists(custom_compress_file): logging.debug("{} not found.".format(custom_compress_file)) HOME_DIR = os.path.abspath(os.path.join(os.path.realpath(__file__),,)) custom_compress_file = os.path.join(HOME_DIR, , ) if not os.path.exists(custom_compress_file): logging.error("{} not found.".format(custom_compress_file)) return False logging.info("Using {}".format(custom_compress_file)) bat_filename = custom_compress_file[custom_compress_file.rfind()+1:] filedata = None with open(custom_compress_file, ) as f: filedata = f.read() try: self.lr_session.put_file(filedata, "C:\\Windows\\CarbonBlack\\" + bat_filename) except LiveResponseError as e: if not in str(e): logging.error("Error puting compress_file.bat") return False else: self.lr_session.delete_file("C:\\Windows\\CarbonBlack\\" + bat_filename) self.lr_session.put_file(filedata, "C:\\Windows\\CarbonBlack\\" + bat_filename) print("~ Launching "+ bat_filename +" to create C:\\windows\\carbonblack\\_memdump.zip") compress_cmd = "C:\\Windows\\CarbonBlack\\" + bat_filename + " " + remote_file self.lr_session.create_process(compress_cmd, wait_for_output=False, wait_for_completion=False) if auto_collect_result: print("~ waiting for {} to complete.".format(bat_filename)) self.wait_for_process_to_finish(bat_filename) self.getFile_with_timeout("C:\\windows\\carbonblack\\_memdump.zip") print("[!] If compression successful, _memdump.zip will exist, and {} should be deleted.".format(remote_file)) if auto_collect_result: self.getFile_with_timeout("C:\\Windows\\CarbonBlack\\{}".format(remote_file)) return True
Customized function for dumping sensor memory. :arguments cb_compress: If True, use CarbonBlack's built-in compression. :arguments custom_compress_file: Supply path to lr_tools/compress_file.bat to fork powershell compression :collect_mem_file: If True, wait for memdump + and compression to complete, then use cbapi to collect
5,285
def validate_row(self, row): clean_row = {} if isinstance(row, (tuple, list)): assert self.header_order, "No attribute order specified." assert len(row) == len(self.header_order), \ "Row length does not match header length." itr = zip(self.header_order, row) else: assert isinstance(row, dict) itr = iteritems(row) for el_name, el_value in itr: if self.header_types[el_name] == ATTR_TYPE_DISCRETE: clean_row[el_name] = int(el_value) elif self.header_types[el_name] == ATTR_TYPE_CONTINUOUS: clean_row[el_name] = float(el_value) else: clean_row[el_name] = el_value return clean_row
Ensure each element in the row matches the schema.
5,286
def _process_inbox_message(self, message: praw.models.Message): self._func_message(message, *self._func_message_args)
Process a reddit inbox message. Calls `func_message(message, *func_message_args)`. :param message: Item to process
5,287
def query_pop(query, prefix, sep=): Annotation.namespaceAnnotationnamespacenamespaceAnnotationnamespace terms = query.split(sep) if terms[0] == prefix: terms = terms[1:] return sep.join(terms)
Pop a prefix from a query string. Parameters ---------- query : str The query string prefix : str The prefix string to pop, if it exists sep : str The string to separate fields Returns ------- popped : str `query` with a `prefix` removed from the front (if found) or `query` if the prefix was not found Examples -------- >>> query_pop('Annotation.namespace', 'Annotation') 'namespace' >>> query_pop('namespace', 'Annotation') 'namespace'
5,288
def wait_for_stable_cluster( hosts, jolokia_port, jolokia_prefix, check_interval, check_count, unhealthy_time_limit, ): stable_counter = 0 max_checks = int(math.ceil(unhealthy_time_limit / check_interval)) for i in itertools.count(): partitions, brokers = read_cluster_status( hosts, jolokia_port, jolokia_prefix, ) if partitions or brokers: stable_counter = 0 else: stable_counter += 1 print( "Under replicated partitions: {p_count}, missing brokers: {b_count} ({stable}/{limit})".format( p_count=partitions, b_count=brokers, stable=stable_counter, limit=check_count, )) if stable_counter >= check_count: print("The cluster is stable") return if i >= max_checks: raise WaitTimeoutException() time.sleep(check_interval)
Block the caller until the cluster can be considered stable. :param hosts: list of brokers ip addresses :type hosts: list of strings :param jolokia_port: HTTP port for Jolokia :type jolokia_port: integer :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries :type jolokia_prefix: string :param check_interval: the number of seconds it will wait between each check :type check_interval: integer :param check_count: the number of times the check should be positive before restarting the next broker :type check_count: integer :param unhealthy_time_limit: the maximum number of seconds it will wait for the cluster to become stable before exiting with error :type unhealthy_time_limit: integer
5,289
def _ensure_counter(self): if not isinstance(self.sync_counter, self._SynchronizationManager): self.sync_counter = self._SynchronizationManager()
Ensure the sync counter is a valid non-dummy object.
5,290
def add(self, source_id, auth, validate=True): params = {: source_id, : auth, : validate} return self.request.post(, params)
Add one or more sets of authorization credentials to a Managed Source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourceauthadd :param source_id: target Source ID :type source_id: str :param auth: An array of the source-specific authorization credential sets that you're adding. :type auth: array of strings :param validate: Allows you to suppress the validation of the authorization credentials, defaults to true. :type validate: bool :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
5,291
def filter(self, collection, data, **kwargs): ops = self.parse(data) collection = self.apply(collection, ops, **kwargs) return ops, collection
Filter given collection.
5,292
def generate_cache_key(value): if is_bytes(value): return hashlib.md5(value).hexdigest() elif is_text(value): return generate_cache_key(to_bytes(text=value)) elif is_boolean(value) or is_null(value) or is_number(value): return generate_cache_key(repr(value)) elif is_dict(value): return generate_cache_key(( (key, value[key]) for key in sorted(value.keys()) )) elif is_list_like(value) or isinstance(value, collections.abc.Generator): return generate_cache_key("".join(( generate_cache_key(item) for item in value ))) else: raise TypeError("Cannot generate cache key for value {0} of type {1}".format( value, type(value), ))
Generates a cache key for the *args and **kwargs
5,293
def expire(key, seconds, host=None, port=None, db=None, password=None): * server = _connect(host, port, db, password) return server.expire(key, seconds)
Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300
5,294
def download(model, direct=False, *pip_args): dl_tpl = "{m}-{v}/{m}-{v}.tar.gz if direct: components = model.split("-") model_name = "".join(components[:-1]) version = components[-1] dl = download_model(dl_tpl.format(m=model_name, v=version), pip_args) else: shortcuts = get_json(about.__shortcuts__, "available shortcuts") model_name = shortcuts.get(model, model) compatibility = get_compatibility() version = get_version(model_name, compatibility) dl = download_model(dl_tpl.format(m=model_name, v=version), pip_args) if dl != 0: msg.warn( "Download successful but linking failed", "Creating a shortcut link for didnt have admin permissions?), but you can still load " "the model via its full package name: " "nlp = spacy.load()".format(model, model_name), )
Download compatible model from default download path using pip. Model can be shortcut, model name or, if --direct flag is set, full model name with version. For direct downloads, the compatibility check will be skipped.
5,295
def shuffled_batches(self, batch_size): if batch_size >= self.num_envs * self.num_steps: yield self else: rollouts_in_batch = batch_size // self.num_steps batch_splits = math_util.divide_ceiling(self.num_envs, rollouts_in_batch) indices = list(range(self.num_envs)) np.random.shuffle(indices) for sub_indices in np.array_split(indices, batch_splits): yield Trajectories( num_steps=self.num_steps, num_envs=len(sub_indices), environment_information=None, transition_tensors={k: x[:, sub_indices] for k, x in self.transition_tensors.items()}, rollout_tensors={k: x[sub_indices] for k, x in self.rollout_tensors.items()}, )
Generate randomized batches of data - only sample whole trajectories
5,296
def upload(self, filename, directory=None): filename = eval_path(filename) if directory is None: directory = self.downloads_directory res1 = self._req_upload(filename, directory) data1 = res1[] file_id = data1[] res2 = self._req_file(file_id) data2 = res2[][0] data2.update(**data1) return _instantiate_uploaded_file(self, data2)
Upload a file ``filename`` to ``directory`` :param str filename: path to the file to upload :param directory: destionation :class:`.Directory`, defaults to :attribute:`.API.downloads_directory` if None :return: the uploaded file :rtype: :class:`.File`
5,297
def make_systemrestoreitem_originalfilename(original_filename, condition=, negate=False, preserve_case=False): document = search = content_type = content = original_filename ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content, negate=negate, preserve_case=preserve_case) return ii_node
Create a node for SystemRestoreItem/OriginalFileName :return: A IndicatorItem represented as an Element node
5,298
def get_collection(self, event_collection): url = "{0}/{1}/projects/{2}/events/{3}".format(self.base_url, self.api_version, self.project_id, event_collection) headers = utilities.headers(self.read_key) response = self.fulfill(HTTPMethods.GET, url, headers=headers, timeout=self.get_timeout) self._error_handling(response) return response.json()
Extracts info about a collection using the Keen IO API. A master key must be set first. :param event_collection: the name of the collection to retrieve info for
5,299
def generate_contentinfo_from_folder(self, csvwriter, rel_path, filenames): LOGGER.debug( + str(rel_path) + + str(filenames)) from ricecooker.utils.linecook import filter_filenames, filter_thumbnail_files, chan_path_from_rel_path topicrow = self.channeldir_node_to_row( rel_path.split(os.path.sep) ) csvwriter.writerow(topicrow) chan_path = chan_path_from_rel_path(rel_path, self.channeldir) filenames_cleaned = filter_filenames(filenames) for filename in filenames_cleaned: path_tuple = rel_path.split(os.path.sep) path_tuple.append(filename) filerow = self.channeldir_node_to_row(path_tuple) csvwriter.writerow(filerow)
Create a topic node row in Content.csv for the folder at `rel_path` and add content node rows for all the files in the `rel_path` folder.