code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def reftrack_rtype_data(rt, role): """Return the data for the releasetype that is loaded by the reftrack :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the releasetype :rtype: depending on role :raises: None """ tfi = rt.get_taskfileinfo() if not tfi: return return filesysitemdata.taskfileinfo_rtype_data(tfi, role)
Return the data for the releasetype that is loaded by the reftrack :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the releasetype :rtype: depending on role :raises: None
def is_binarized(self): """ Return True if the pianoroll is already binarized. Otherwise, return False. Returns ------- is_binarized : bool True if the pianoroll is already binarized; otherwise, False. """ is_binarized = np.issubdtype(self.pianoroll.dtype, np.bool_) return is_binarized
Return True if the pianoroll is already binarized. Otherwise, return False. Returns ------- is_binarized : bool True if the pianoroll is already binarized; otherwise, False.
def parse(self, obj): """ Parse the object's properties according to its default types. """ for k, default in obj.__class__.defaults.items(): typ = type(default) if typ is str: continue v = getattr(obj, k) if typ is int: setattr(obj, k, int(v or default)) elif typ is float: setattr(obj, k, float(v or default)) elif typ is bool: setattr(obj, k, bool(int(v or default)))
Parse the object's properties according to its default types.
def run_with_graph_transformation(self) -> Iterable[BELGraph]: """Calculate scores for all leaves until there are none, removes edges until there are, and repeats until all nodes have been scored. Also, yields the current graph at every step so you can make a cool animation of how the graph changes throughout the course of the algorithm :return: An iterable of BEL graphs """ yield self.get_remaining_graph() while not self.done_chomping(): while not list(self.iter_leaves()): self.remove_random_edge() yield self.get_remaining_graph() self.score_leaves() yield self.get_remaining_graph()
Calculate scores for all leaves until there are none, removes edges until there are, and repeats until all nodes have been scored. Also, yields the current graph at every step so you can make a cool animation of how the graph changes throughout the course of the algorithm :return: An iterable of BEL graphs
def zrem(self, key, *members): """Removes the specified members from the sorted set stored at key. Non existing members are ignored. An error is returned when key exists and does not hold a sorted set. .. note:: **Time complexity**: ``O(M*log(N))`` with ``N`` being the number of elements in the sorted set and ``M`` the number of elements to be removed. :param key: The key of the sorted set :type key: :class:`str`, :class:`bytes` :param members: One or more member values to remove :type members: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'ZREM', key] + list(members))
Removes the specified members from the sorted set stored at key. Non existing members are ignored. An error is returned when key exists and does not hold a sorted set. .. note:: **Time complexity**: ``O(M*log(N))`` with ``N`` being the number of elements in the sorted set and ``M`` the number of elements to be removed. :param key: The key of the sorted set :type key: :class:`str`, :class:`bytes` :param members: One or more member values to remove :type members: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError`
def resolve_movie(self, title, year=None): """Tries to find a movie with a given title and year""" r = self.search_movie(title) return self._match_results(r, title, year)
Tries to find a movie with a given title and year
def tararchive_opener(path, pattern='', verbose=False): """Opener that opens files from tar archive. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s). """ with tarfile.open(fileobj=io.BytesIO(urlopen(path).read())) if is_url(path) else tarfile.open(path) as tararchive: for tarinfo in tararchive: if tarinfo.isfile(): source = os.path.join(path, tarinfo.name) if pattern and not re.match(pattern, tarinfo.name): logger.verbose('Skipping file: {}, did not match regex pattern "{}"'.format(os.path.abspath(tarinfo.name), pattern)) continue logger.verbose('Processing file: {}'.format(source)) filehandle = tararchive.extractfile(tarinfo) yield filehandle
Opener that opens files from tar archive. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s).
def ReadFile(self, definitions_registry, path): """Reads data type definitions from a file into the registry. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. path (str): path of the file to read from. """ with open(path, 'r') as file_object: self.ReadFileObject(definitions_registry, file_object)
Reads data type definitions from a file into the registry. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. path (str): path of the file to read from.
def encrypt(privkey, passphrase): """ BIP0038 non-ec-multiply encryption. Returns BIP0038 encrypted privkey. :param privkey: Private key :type privkey: Base58 :param str passphrase: UTF-8 encoded passphrase for encryption :return: BIP0038 non-ec-multiply encrypted wif key :rtype: Base58 """ if isinstance(privkey, str): privkey = PrivateKey(privkey) else: privkey = PrivateKey(repr(privkey)) privkeyhex = repr(privkey) # hex addr = format(privkey.bitcoin.address, "BTC") a = _bytes(addr) salt = hashlib.sha256(hashlib.sha256(a).digest()).digest()[0:4] if SCRYPT_MODULE == "scrypt": # pragma: no cover key = scrypt.hash(passphrase, salt, 16384, 8, 8) elif SCRYPT_MODULE == "pylibscrypt": # pragma: no cover key = scrypt.scrypt(bytes(passphrase, "utf-8"), salt, 16384, 8, 8) else: # pragma: no cover raise ValueError("No scrypt module loaded") # pragma: no cover (derived_half1, derived_half2) = (key[:32], key[32:]) aes = AES.new(derived_half2, AES.MODE_ECB) encrypted_half1 = _encrypt_xor(privkeyhex[:32], derived_half1[:16], aes) encrypted_half2 = _encrypt_xor(privkeyhex[32:], derived_half1[16:], aes) " flag byte is forced 0xc0 because Graphene only uses compressed keys " payload = b"\x01" + b"\x42" + b"\xc0" + salt + encrypted_half1 + encrypted_half2 " Checksum " checksum = hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4] privatkey = hexlify(payload + checksum).decode("ascii") return Base58(privatkey)
BIP0038 non-ec-multiply encryption. Returns BIP0038 encrypted privkey. :param privkey: Private key :type privkey: Base58 :param str passphrase: UTF-8 encoded passphrase for encryption :return: BIP0038 non-ec-multiply encrypted wif key :rtype: Base58
def get_items(*indexes): """Return a callable that fetches the given indexes of an object Always return a tuple even when len(indexes) == 1. Similar to `operator.itemgetter`, but will insert `None` when the object does not have the desired index (instead of raising IndexError). """ return lambda obj: tuple( obj[index] if len(obj) > index else None for index in indexes )
Return a callable that fetches the given indexes of an object Always return a tuple even when len(indexes) == 1. Similar to `operator.itemgetter`, but will insert `None` when the object does not have the desired index (instead of raising IndexError).
def _is_region_extremely_sparse(self, start, end, base_state=None): """ Check whether the given memory region is extremely sparse, i.e., all bytes are the same value. :param int start: The beginning of the region. :param int end: The end of the region. :param base_state: The base state (optional). :return: True if the region is extremely sparse, False otherwise. :rtype: bool """ all_bytes = None if base_state is not None: all_bytes = base_state.memory.load(start, end - start + 1) try: all_bytes = base_state.solver.eval(all_bytes, cast_to=bytes) except SimError: all_bytes = None size = end - start + 1 if all_bytes is None: # load from the binary all_bytes = self._fast_memory_load_bytes(start, size) if all_bytes is None: return True if len(all_bytes) < size: l.warning("_is_region_extremely_sparse: The given region %#x-%#x is not a continuous memory region in the " "memory space. Only the first %d bytes (%#x-%#x) are processed.", start, end, len(all_bytes), start, start + len(all_bytes) - 1) the_byte_value = None for b in all_bytes: if the_byte_value is None: the_byte_value = b else: if the_byte_value != b: return False return True
Check whether the given memory region is extremely sparse, i.e., all bytes are the same value. :param int start: The beginning of the region. :param int end: The end of the region. :param base_state: The base state (optional). :return: True if the region is extremely sparse, False otherwise. :rtype: bool
def joint_torques(self): '''Get a list of all current joint torques in the skeleton.''' return as_flat_array(getattr(j, 'amotor', j).feedback[-1][:j.ADOF] for j in self.joints)
Get a list of all current joint torques in the skeleton.
def deep_reload_hook(m): """Replacement for reload().""" if not isinstance(m, ModuleType): raise TypeError("reload() argument must be module") name = m.__name__ if name not in sys.modules: raise ImportError("reload(): module %.200s not in sys.modules" % name) global modules_reloading try: return modules_reloading[name] except: modules_reloading[name] = m dot = name.rfind('.') if dot < 0: subname = name path = None else: try: parent = sys.modules[name[:dot]] except KeyError: modules_reloading.clear() raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot]) subname = name[dot+1:] path = getattr(parent, "__path__", None) try: # This appears to be necessary on Python 3, because imp.find_module() # tries to import standard libraries (like io) itself, and we don't # want them to be processed by our deep_import_hook. with replace_import_hook(original_import): fp, filename, stuff = imp.find_module(subname, path) finally: modules_reloading.clear() try: newm = imp.load_module(name, fp, filename, stuff) except: # load_module probably removed name from modules because of # the error. Put back the original module object. sys.modules[name] = m raise finally: if fp: fp.close() modules_reloading.clear() return newm
Replacement for reload().
def zlist(columns, items, print_columns=None, text="", title="", width=DEFAULT_WIDTH, height=ZLIST_HEIGHT, timeout=None): """ Display a list of values :param columns: a list of columns name :type columns: list of strings :param items: a list of values :type items: list of strings :param print_columns: index of a column (return just the values from this column) :type print_columns: int (None if all the columns) :param text: text inside the window :type text: str :param title: title of the window :type title: str :param width: window width :type width: int :param height: window height :type height: int :param timeout: close the window after n seconds :type timeout: int :return: A row of values from the table :rtype: list """ dialog = ZList(columns, items, print_columns, text, title, width, height, timeout) dialog.run() return dialog.response
Display a list of values :param columns: a list of columns name :type columns: list of strings :param items: a list of values :type items: list of strings :param print_columns: index of a column (return just the values from this column) :type print_columns: int (None if all the columns) :param text: text inside the window :type text: str :param title: title of the window :type title: str :param width: window width :type width: int :param height: window height :type height: int :param timeout: close the window after n seconds :type timeout: int :return: A row of values from the table :rtype: list
def img2wav(path, min_x, max_x, min_y, max_y, window_size=3): """Generate 1-D data ``y=f(x)`` from a black/white image. Suppose we have an image like that: .. image:: images/waveform.png :align: center Put some codes:: >>> from weatherlab.math.img2waveform import img2wav >>> import matplotlib.pyplot as plt >>> x, y = img2wav(r"testdata\img2waveform\waveform.png", ... min_x=0.0, max_x=288, ... min_y=15.0, max_y=35.0, ... window_size=15) >>> plt.plot(x, y) >>> plt.show() Then you got nicely sampled data: .. image:: images\waveform_pyplot.png :align: center :param path: the image file path :type path: string :param min_x: minimum value of x axis :type min_x: number :param max_x: maximum value of x axis :type max_x: number :param min_y: minimum value of y axis :type min_y: number :param max_y: maximum value of y axis :type max_y: number :param window_size: the slide window :type window_size: int Note: In python, a numpy array that represent a image is from left to the right, top to the bottom, but in coordinate, it's from bottom to the top. So we use ::-1 for a reverse output """ image = Image.open(path).convert("L") matrix = np.array(image)[::-1] # you can customize the gray scale fix behavior to fit color image matrix[np.where(matrix >= 128)] = 255 matrix[np.where(matrix < 128)] = 0 tick_x = (max_x - min_x) / matrix.shape[1] tick_y = (max_y - min_y) / matrix.shape[0] x, y = list(), list() for i in range(matrix.shape[1]): window = expand_window( # slide margin window i, window_size, matrix.shape[1]) margin_dots_y_indices = np.where(matrix[:, window] == 0)[0] # if found at least one dots in margin if len(margin_dots_y_indices) > 0: x.append(min_x + (i + 1) * tick_x) y.append(min_y + margin_dots_y_indices.mean() * tick_y) return np.array(x), np.array(y)
Generate 1-D data ``y=f(x)`` from a black/white image. Suppose we have an image like that: .. image:: images/waveform.png :align: center Put some codes:: >>> from weatherlab.math.img2waveform import img2wav >>> import matplotlib.pyplot as plt >>> x, y = img2wav(r"testdata\img2waveform\waveform.png", ... min_x=0.0, max_x=288, ... min_y=15.0, max_y=35.0, ... window_size=15) >>> plt.plot(x, y) >>> plt.show() Then you got nicely sampled data: .. image:: images\waveform_pyplot.png :align: center :param path: the image file path :type path: string :param min_x: minimum value of x axis :type min_x: number :param max_x: maximum value of x axis :type max_x: number :param min_y: minimum value of y axis :type min_y: number :param max_y: maximum value of y axis :type max_y: number :param window_size: the slide window :type window_size: int Note: In python, a numpy array that represent a image is from left to the right, top to the bottom, but in coordinate, it's from bottom to the top. So we use ::-1 for a reverse output
def checkversion(version): """Checks foliadocserve version, returns 1 if the document is newer than the library, -1 if it is older, 0 if it is equal""" try: for refversion, responseversion in zip([int(x) for x in REQUIREFOLIADOCSERVE.split('.')], [int(x) for x in version.split('.')]): if responseversion > refversion: return 1 #response is newer than library elif responseversion < refversion: return -1 #response is older than library return 0 #versions are equal except ValueError: raise ValueError("Unable to parse version, invalid syntax")
Checks foliadocserve version, returns 1 if the document is newer than the library, -1 if it is older, 0 if it is equal
def getTradeHistory(pair, connection=None, info=None, count=None): """Retrieve the trade history for the given pair. Returns a list of Trade instances. If count is not None, it should be an integer, and specifies the number of items from the trade history that will be processed and returned.""" if info is not None: info.validate_pair(pair) if connection is None: connection = common.BTCEConnection() response = connection.makeJSONRequest("/api/3/trades/%s" % pair) if type(response) is not dict: raise TypeError("The response is not a dict.") history = response.get(pair) if type(history) is not list: raise TypeError("The response is a %r, not a list." % type(history)) result = [] # Limit the number of items returned if requested. if count is not None: history = history[:count] for h in history: h["pair"] = pair t = Trade(**h) result.append(t) return result
Retrieve the trade history for the given pair. Returns a list of Trade instances. If count is not None, it should be an integer, and specifies the number of items from the trade history that will be processed and returned.
def tags(self, id, service='facebook'): """ Get the existing analysis for a given hash :param id: The hash to get tag analysis for :type id: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.get(service + '/tags', params=dict(id=id))
Get the existing analysis for a given hash :param id: The hash to get tag analysis for :type id: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def _extract_level(self, topic_str): """Turn 'engine.0.INFO.extra' into (logging.INFO, 'engine.0.extra')""" topics = topic_str.split('.') for idx,t in enumerate(topics): level = getattr(logging, t, None) if level is not None: break if level is None: level = logging.INFO else: topics.pop(idx) return level, '.'.join(topics)
Turn 'engine.0.INFO.extra' into (logging.INFO, 'engine.0.extra')
def validate(anchors, duplicate_tags, opts): """ Client facing validate function. Runs _validate() and returns True if anchors and duplicate_tags pass all validations. Handles exceptions automatically if _validate() throws any and exits the program. :param anchors: Dictionary mapping string file path keys to dictionary values. The inner dictionaries map string AnchorHub tags to generated anchor values :param duplicate_tags: Dictionary mapping string file path keys to a list of tuples. The tuples contain the following information, in order: 1. The string AnchorHub tag that was repeated 2. The line in the file that the duplicate was found, as a number 3. The string generated anchor that first used the repeated tag :param opts: Namespace containing AnchorHub options, usually created by command line arguments :return: True if the anchors pass all validation tests """ try: return _validate(anchors, duplicate_tags, opts) except ValidationException as e: if str(e) == "Duplicate tags found": messages.print_duplicate_anchor_information(duplicate_tags) else: print(e) sys.exit(0)
Client facing validate function. Runs _validate() and returns True if anchors and duplicate_tags pass all validations. Handles exceptions automatically if _validate() throws any and exits the program. :param anchors: Dictionary mapping string file path keys to dictionary values. The inner dictionaries map string AnchorHub tags to generated anchor values :param duplicate_tags: Dictionary mapping string file path keys to a list of tuples. The tuples contain the following information, in order: 1. The string AnchorHub tag that was repeated 2. The line in the file that the duplicate was found, as a number 3. The string generated anchor that first used the repeated tag :param opts: Namespace containing AnchorHub options, usually created by command line arguments :return: True if the anchors pass all validation tests
def mode(name, mode, quotatype): ''' Set the quota for the system name The filesystem to set the quota mode on mode Whether the quota system is on or off quotatype Must be ``user`` or ``group`` ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} fun = 'off' if mode is True: fun = 'on' if __salt__['quota.get_mode'](name)[name][quotatype] == fun: ret['result'] = True ret['comment'] = 'Quota for {0} already set to {1}'.format(name, fun) return ret if __opts__['test']: ret['comment'] = 'Quota for {0} needs to be set to {1}'.format(name, fun) return ret if __salt__['quota.{0}'.format(fun)](name): ret['changes'] = {'quota': name} ret['result'] = True ret['comment'] = 'Set quota for {0} to {1}'.format(name, fun) return ret else: ret['result'] = False ret['comment'] = 'Failed to set quota for {0} to {1}'.format(name, fun) return ret
Set the quota for the system name The filesystem to set the quota mode on mode Whether the quota system is on or off quotatype Must be ``user`` or ``group``
def status(self, verbose=False): """ Checks the status of your CyREST server. """ try: response=api(url=self.__url, method="GET", verbose=verbose) except Exception as e: print('Could not get status from CyREST:\n\n' + str(e)) else: print('CyREST online!')
Checks the status of your CyREST server.
def scs2e(sc, sclkch): """ Convert a spacecraft clock string to ephemeris seconds past J2000 (ET). http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/scs2e_c.html :param sc: NAIF integer code for a spacecraft. :type sc: int :param sclkch: An SCLK string. :type sclkch: str :return: Ephemeris time, seconds past J2000. :rtype: float """ sc = ctypes.c_int(sc) sclkch = stypes.stringToCharP(sclkch) et = ctypes.c_double() libspice.scs2e_c(sc, sclkch, ctypes.byref(et)) return et.value
Convert a spacecraft clock string to ephemeris seconds past J2000 (ET). http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/scs2e_c.html :param sc: NAIF integer code for a spacecraft. :type sc: int :param sclkch: An SCLK string. :type sclkch: str :return: Ephemeris time, seconds past J2000. :rtype: float
def create_package_file(root, master_package, subroot, py_files, opts, subs): """Build the text of the file and write the file.""" text = format_heading(1, '%s' % makename(master_package, subroot)) if opts.modulefirst: text += format_directive(subroot, master_package) text += '\n' # build a list of directories that are szvpackages (contain an INITPY file) subs = [sub for sub in subs if path.isfile(path.join(root, sub, INITPY))] # if there are some package directories, add a TOC for theses subpackages if subs: # text += format_heading(2, 'Subpackages') text += '.. toctree::\n\n' for sub in subs: text += ' %s.%s\n' % (makename(master_package, subroot), sub) text += '\n' submods = [path.splitext(sub)[0] for sub in py_files if not shall_skip(path.join(root, sub), opts) and sub != INITPY] if submods: #text += format_heading(2, 'Submodules') if opts.separatemodules: text += '.. toctree::\n\n' for submod in submods: modfile = makename(master_package, makename(subroot, submod)) text += ' %s\n' % modfile # generate separate file for this module if not opts.noheadings: filetext = format_heading(1, '%s module' % modfile) else: filetext = '' filetext += format_directive(makename(subroot, submod), master_package) write_file(modfile, filetext, opts) else: for submod in submods: modfile = makename(master_package, makename(subroot, submod)) if not opts.noheadings: text += format_heading(2, '%s module' % modfile) text += format_directive(makename(subroot, submod), master_package) text += '\n' text += '\n' if not opts.modulefirst: text += format_heading(2, 'Module contents') text += format_directive(subroot, master_package) write_file(makename(master_package, subroot), text, opts)
Build the text of the file and write the file.
def main(): """ Sends an API AT command to read the lower-order address bits from an XBee Series 1 and looks for a response """ try: # Open serial port ser = serial.Serial('/dev/ttyUSB0', 9600) # Create XBee Series 1 object xbee = XBee(ser) # Send AT packet xbee.send('at', frame_id='A', command='DH') # Wait for response response = xbee.wait_read_frame() print response # Send AT packet xbee.send('at', frame_id='B', command='DL') # Wait for response response = xbee.wait_read_frame() print response # Send AT packet xbee.send('at', frame_id='C', command='MY') # Wait for response response = xbee.wait_read_frame() print response # Send AT packet xbee.send('at', frame_id='D', command='CE') # Wait for response response = xbee.wait_read_frame() print response except KeyboardInterrupt: pass finally: ser.close()
Sends an API AT command to read the lower-order address bits from an XBee Series 1 and looks for a response
def write(self, value): # type: (int) -> None """Write a raw byte to the LCD.""" # Get current position row, col = self._cursor_pos # Write byte if changed try: if self._content[row][col] != value: self._send_data(value) self._content[row][col] = value # Update content cache unchanged = False else: unchanged = True except IndexError as e: # Position out of range if self.auto_linebreaks is True: raise e self._send_data(value) unchanged = False # Update cursor position. if self.text_align_mode == 'left': if self.auto_linebreaks is False or col < self.lcd.cols - 1: # No newline, update internal pointer newpos = (row, col + 1) if unchanged: self.cursor_pos = newpos else: self._cursor_pos = newpos self.recent_auto_linebreak = False else: # Newline, reset pointer if row < self.lcd.rows - 1: self.cursor_pos = (row + 1, 0) else: self.cursor_pos = (0, 0) self.recent_auto_linebreak = True else: if self.auto_linebreaks is False or col > 0: # No newline, update internal pointer newpos = (row, col - 1) if unchanged: self.cursor_pos = newpos else: self._cursor_pos = newpos self.recent_auto_linebreak = False else: # Newline, reset pointer if row < self.lcd.rows - 1: self.cursor_pos = (row + 1, self.lcd.cols - 1) else: self.cursor_pos = (0, self.lcd.cols - 1) self.recent_auto_linebreak = True
Write a raw byte to the LCD.
def _get_repr(obj, pretty=False, indent=1): """ Get string representation of an object :param obj: object :type obj: object :param pretty: use pretty formatting :type pretty: bool :param indent: indentation for pretty formatting :type indent: int :return: string representation :rtype: str """ if pretty: repr_value = pformat(obj, indent) else: repr_value = repr(obj) if sys.version_info[0] == 2: # Try to convert Unicode string to human-readable form try: repr_value = repr_value.decode('raw_unicode_escape') except UnicodeError: repr_value = repr_value.decode('utf-8', 'replace') return repr_value
Get string representation of an object :param obj: object :type obj: object :param pretty: use pretty formatting :type pretty: bool :param indent: indentation for pretty formatting :type indent: int :return: string representation :rtype: str
def _add_data_to_general_stats(self, data): """ Add data for the general stats in a Picard-module specific manner """ headers = _get_general_stats_headers() self.general_stats_headers.update(headers) header_names = ('ERROR_count', 'WARNING_count', 'file_validation_status') general_data = dict() for sample in data: general_data[sample] = {column: data[sample][column] for column in header_names} if sample not in self.general_stats_data: self.general_stats_data[sample] = dict() if data[sample]['file_validation_status'] != 'pass': headers['file_validation_status']['hidden'] = False self.general_stats_data[sample].update(general_data[sample])
Add data for the general stats in a Picard-module specific manner
def GetParsers(cls, parser_filter_expression=None): """Retrieves the registered parsers and plugins. Retrieves a dictionary of all registered parsers and associated plugins from a parser filter string. The filter string can contain direct names of parsers, presets or plugins. The filter string can also negate selection if prepended with an exclamation point, e.g.: "foo,!foo/bar" would include parser foo but not include plugin bar. A list of specific included and excluded plugins is also passed to each parser's class. The three types of entries in the filter string: * name of a parser: this would be the exact name of a single parser to include (or exclude), e.g. foo; * name of a preset, e.g. win7: the presets are defined in plaso/parsers/presets.py; * name of a plugin: if a plugin name is included the parent parser will be included in the list of registered parsers; Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Yields: tuple: containing: * str: name of the parser: * type: parser class (subclass of BaseParser). """ includes, excludes = cls._GetParserFilters(parser_filter_expression) for parser_name, parser_class in iter(cls._parser_classes.items()): # If there are no includes all parsers are included by default. if not includes and parser_name in excludes: continue if includes and parser_name not in includes: continue yield parser_name, parser_class
Retrieves the registered parsers and plugins. Retrieves a dictionary of all registered parsers and associated plugins from a parser filter string. The filter string can contain direct names of parsers, presets or plugins. The filter string can also negate selection if prepended with an exclamation point, e.g.: "foo,!foo/bar" would include parser foo but not include plugin bar. A list of specific included and excluded plugins is also passed to each parser's class. The three types of entries in the filter string: * name of a parser: this would be the exact name of a single parser to include (or exclude), e.g. foo; * name of a preset, e.g. win7: the presets are defined in plaso/parsers/presets.py; * name of a plugin: if a plugin name is included the parent parser will be included in the list of registered parsers; Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Yields: tuple: containing: * str: name of the parser: * type: parser class (subclass of BaseParser).
def open_slots(self, session): """ Returns the number of slots open at the moment """ from airflow.models.taskinstance import \ TaskInstance as TI # Avoid circular import used_slots = session.query(func.count()).filter(TI.pool == self.pool).filter( TI.state.in_([State.RUNNING, State.QUEUED])).scalar() return self.slots - used_slots
Returns the number of slots open at the moment
def _update(self, sock_info, criteria, document, upsert=False, check_keys=True, multi=False, manipulate=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, session=None, retryable_write=False): """Internal update / replace helper.""" common.validate_boolean("upsert", upsert) if manipulate: document = self.__database._fix_incoming(document, self) collation = validate_collation_or_none(collation) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged update_doc = SON([('q', criteria), ('u', document), ('multi', multi), ('upsert', upsert)]) if collation is not None: if sock_info.max_wire_version < 5: raise ConfigurationError( 'Must be connected to MongoDB 3.4+ to use collations.') elif not acknowledged: raise ConfigurationError( 'Collation is unsupported for unacknowledged writes.') else: update_doc['collation'] = collation if array_filters is not None: if sock_info.max_wire_version < 6: raise ConfigurationError( 'Must be connected to MongoDB 3.6+ to use array_filters.') elif not acknowledged: raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged writes.') else: update_doc['arrayFilters'] = array_filters command = SON([('update', self.name), ('ordered', ordered), ('updates', [update_doc])]) if not write_concern.is_server_default: command['writeConcern'] = write_concern.document if not sock_info.op_msg_enabled and not acknowledged: # Legacy OP_UPDATE. return self._legacy_write( sock_info, 'update', command, op_id, bypass_doc_val, message.update, self.__full_name, upsert, multi, criteria, document, False, write_concern.document, check_keys, self.__write_response_codec_options) # Update command. if bypass_doc_val and sock_info.max_wire_version >= 4: command['bypassDocumentValidation'] = True # The command result has to be published for APM unmodified # so we make a shallow copy here before adding updatedExisting. result = sock_info.command( self.__database.name, command, write_concern=write_concern, codec_options=self.__write_response_codec_options, session=session, client=self.__database.client, retryable_write=retryable_write).copy() _check_write_command_response(result) # Add the updatedExisting field for compatibility. if result.get('n') and 'upserted' not in result: result['updatedExisting'] = True else: result['updatedExisting'] = False # MongoDB >= 2.6.0 returns the upsert _id in an array # element. Break it out for backward compatibility. if 'upserted' in result: result['upserted'] = result['upserted'][0]['_id'] if not acknowledged: return None return result
Internal update / replace helper.
def generate_repo_files(self, release): """Dynamically generate our yum repo configuration""" repo_tmpl = pkg_resources.resource_string(__name__, 'templates/repo.mako') repo_file = os.path.join(release['git_dir'], '%s.repo' % release['repo']) with file(repo_file, 'w') as repo: repo_out = Template(repo_tmpl).render(**release) self.log.debug('Writing repo file %s:\n%s', repo_file, repo_out) repo.write(repo_out) self.log.info('Wrote repo configuration to %s', repo_file)
Dynamically generate our yum repo configuration
def angact_ho(x,omega): """ Calculate angle and action variable in sho potential with parameter omega """ action = (x[3:]**2+(omega*x[:3])**2)/(2.*omega) angle = np.array([np.arctan(-x[3+i]/omega[i]/x[i]) if x[i]!=0. else -np.sign(x[3+i])*np.pi/2. for i in range(3)]) for i in range(3): if(x[i]<0): angle[i]+=np.pi return np.concatenate((action,angle % (2.*np.pi)))
Calculate angle and action variable in sho potential with parameter omega
def nvmlDeviceSetPowerManagementLimit(handle, limit): r""" /** * Set new power limit of this device. * * For Kepler &tm; or newer fully supported devices. * Requires root/admin permissions. * * See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values. * * \note Limit is not persistent across reboots or driver unloads. * Enable persistent mode to prevent driver from unloading when no application is using the device. * * @param device The identifier of the target device * @param limit Power management limit in milliwatts to set * * @return * - \ref NVML_SUCCESS if \a limit has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is out of range * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceGetPowerManagementLimitConstraints * @see nvmlDeviceGetPowerManagementDefaultLimit */ nvmlReturn_t DECLDIR nvmlDeviceSetPowerManagementLimit """ fn = _nvmlGetFunctionPointer("nvmlDeviceSetPowerManagementLimit") ret = fn(handle, c_uint(limit)) _nvmlCheckReturn(ret) return None
r""" /** * Set new power limit of this device. * * For Kepler &tm; or newer fully supported devices. * Requires root/admin permissions. * * See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values. * * \note Limit is not persistent across reboots or driver unloads. * Enable persistent mode to prevent driver from unloading when no application is using the device. * * @param device The identifier of the target device * @param limit Power management limit in milliwatts to set * * @return * - \ref NVML_SUCCESS if \a limit has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is out of range * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceGetPowerManagementLimitConstraints * @see nvmlDeviceGetPowerManagementDefaultLimit */ nvmlReturn_t DECLDIR nvmlDeviceSetPowerManagementLimit
def get_assessment_ids(self): """Gets the Ids of any assessments associated with this activity. return: (osid.id.IdList) - list of assessment Ids raise: IllegalState - is_assessment_based_activity() is false compliance: mandatory - This method must be implemented. """ if not self.is_assessment_based_activity(): raise IllegalState() else: return [Id(a) for a in self._my_map['assessmentIds']]
Gets the Ids of any assessments associated with this activity. return: (osid.id.IdList) - list of assessment Ids raise: IllegalState - is_assessment_based_activity() is false compliance: mandatory - This method must be implemented.
def add_layout(self, obj, place='center'): ''' Adds an object to the plot in a specified place. Args: obj (Renderer) : the object to add to the Plot place (str, optional) : where to add the object (default: 'center') Valid places are: 'left', 'right', 'above', 'below', 'center'. Returns: None ''' valid_places = ['left', 'right', 'above', 'below', 'center'] if place not in valid_places: raise ValueError( "Invalid place '%s' specified. Valid place values are: %s" % (place, nice_join(valid_places)) ) getattr(self, place).append(obj)
Adds an object to the plot in a specified place. Args: obj (Renderer) : the object to add to the Plot place (str, optional) : where to add the object (default: 'center') Valid places are: 'left', 'right', 'above', 'below', 'center'. Returns: None
def citedReferences(self, uid, count=100, offset=1, retrieveParameters=None): """The citedReferences operation returns references cited by an article identified by a unique identifier. You may specify only one identifier per request. :uid: Thomson Reuters unique record identifier :count: Number of records to display in the result. Cannot be less than 0 and cannot be greater than 100. If count is 0 then only the summary information will be returned. :offset: First record in results to return. Must be greater than zero :retrieveParameters: Retrieve parameters. If omitted the result of make_retrieveParameters(offset, count, 'RS', 'D') is used. """ return self._search.service.citedReferences( databaseId='WOS', uid=uid, queryLanguage='en', retrieveParameters=(retrieveParameters or self.make_retrieveParameters(offset, count)) )
The citedReferences operation returns references cited by an article identified by a unique identifier. You may specify only one identifier per request. :uid: Thomson Reuters unique record identifier :count: Number of records to display in the result. Cannot be less than 0 and cannot be greater than 100. If count is 0 then only the summary information will be returned. :offset: First record in results to return. Must be greater than zero :retrieveParameters: Retrieve parameters. If omitted the result of make_retrieveParameters(offset, count, 'RS', 'D') is used.
def get_default_frame(self): '''default frame for waypoints''' if self.settings.terrainalt == 'Auto': if self.get_mav_param('TERRAIN_FOLLOW',0) == 1: return mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT return mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT if self.settings.terrainalt == 'True': return mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT return mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT
default frame for waypoints
def base64_b64encode(instr): ''' Encode a string as base64 using the "modern" Python interface. Among other possible differences, the "modern" encoder does not include newline ('\\n') characters in the encoded output. ''' return salt.utils.stringutils.to_unicode( base64.b64encode(salt.utils.stringutils.to_bytes(instr)), encoding='utf8' if salt.utils.platform.is_windows() else None )
Encode a string as base64 using the "modern" Python interface. Among other possible differences, the "modern" encoder does not include newline ('\\n') characters in the encoded output.
def retrieve_file_from_RCSB(http_connection, resource, silent = True): '''Retrieve a file from the RCSB.''' if not silent: colortext.printf("Retrieving %s from RCSB" % os.path.split(resource)[1], color = "aqua") return http_connection.get(resource)
Retrieve a file from the RCSB.
async def upload_sticker_file(self, user_id: base.Integer, png_sticker: base.InputFile) -> types.File: """ Use this method to upload a .png file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Source: https://core.telegram.org/bots/api#uploadstickerfile :param user_id: User identifier of sticker file owner :type user_id: :obj:`base.Integer` :param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. :type png_sticker: :obj:`base.InputFile` :return: Returns the uploaded File on success :rtype: :obj:`types.File` """ payload = generate_payload(**locals(), exclude=['png_sticker']) files = {} prepare_file(payload, files, 'png_sticker', png_sticker) result = await self.request(api.Methods.UPLOAD_STICKER_FILE, payload, files) return types.File(**result)
Use this method to upload a .png file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Source: https://core.telegram.org/bots/api#uploadstickerfile :param user_id: User identifier of sticker file owner :type user_id: :obj:`base.Integer` :param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. :type png_sticker: :obj:`base.InputFile` :return: Returns the uploaded File on success :rtype: :obj:`types.File`
def fromtext(source=None, encoding=None, errors='strict', strip=None, header=('lines',)): """ Extract a table from lines in the given text file. E.g.:: >>> import petl as etl >>> # setup example file ... text = 'a,1\\nb,2\\nc,2\\n' >>> with open('example.txt', 'w') as f: ... f.write(text) ... 12 >>> table1 = etl.fromtext('example.txt') >>> table1 +-------+ | lines | +=======+ | 'a,1' | +-------+ | 'b,2' | +-------+ | 'c,2' | +-------+ >>> # post-process, e.g., with capture() ... table2 = table1.capture('lines', '(.*),(.*)$', ['foo', 'bar']) >>> table2 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | '1' | +-----+-----+ | 'b' | '2' | +-----+-----+ | 'c' | '2' | +-----+-----+ Note that the strip() function is called on each line, which by default will remove leading and trailing whitespace, including the end-of-line character - use the `strip` keyword argument to specify alternative characters to strip. Set the `strip` argument to `False` to disable this behaviour and leave line endings in place. """ source = read_source_from_arg(source) return TextView(source, header=header, encoding=encoding, errors=errors, strip=strip)
Extract a table from lines in the given text file. E.g.:: >>> import petl as etl >>> # setup example file ... text = 'a,1\\nb,2\\nc,2\\n' >>> with open('example.txt', 'w') as f: ... f.write(text) ... 12 >>> table1 = etl.fromtext('example.txt') >>> table1 +-------+ | lines | +=======+ | 'a,1' | +-------+ | 'b,2' | +-------+ | 'c,2' | +-------+ >>> # post-process, e.g., with capture() ... table2 = table1.capture('lines', '(.*),(.*)$', ['foo', 'bar']) >>> table2 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | '1' | +-----+-----+ | 'b' | '2' | +-----+-----+ | 'c' | '2' | +-----+-----+ Note that the strip() function is called on each line, which by default will remove leading and trailing whitespace, including the end-of-line character - use the `strip` keyword argument to specify alternative characters to strip. Set the `strip` argument to `False` to disable this behaviour and leave line endings in place.
def normalized_nodes_on_bdry(nodes_on_bdry, length): """Return a list of 2-tuples of bool from the input parameter. This function is intended to normalize a ``nodes_on_bdry`` parameter that can be given as a single boolean (global) or as a sequence (per axis). Each entry of the sequence can either be a single boolean (global for the axis) or a boolean sequence of length 2. Parameters ---------- nodes_on_bdry : bool or sequence Input parameter to be normalized according to the above scheme. length : positive int Desired length of the returned list. Returns ------- normalized : list of 2-tuples of bool Normalized list with ``length`` entries, each of which is a 2-tuple of boolean values. Examples -------- Global for all axes: >>> normalized_nodes_on_bdry(True, length=2) [(True, True), (True, True)] Global per axis: >>> normalized_nodes_on_bdry([True, False], length=2) [(True, True), (False, False)] Mixing global and explicit per axis: >>> normalized_nodes_on_bdry([[True, False], False, True], length=3) [(True, False), (False, False), (True, True)] """ shape = np.shape(nodes_on_bdry) if shape == (): out_list = [(bool(nodes_on_bdry), bool(nodes_on_bdry))] * length elif length == 1 and shape == (2,): out_list = [(bool(nodes_on_bdry[0]), bool(nodes_on_bdry[1]))] elif len(nodes_on_bdry) == length: out_list = [] for i, on_bdry in enumerate(nodes_on_bdry): shape_i = np.shape(on_bdry) if shape_i == (): out_list.append((bool(on_bdry), bool(on_bdry))) elif shape_i == (2,): out_list.append((bool(on_bdry[0]), bool(on_bdry[1]))) else: raise ValueError('in axis {}: `nodes_on_bdry` has shape {}, ' 'expected (2,)' .format(i, shape_i)) else: raise ValueError('`nodes_on_bdry` has shape {}, expected ({},)' ''.format(shape, length)) return out_list
Return a list of 2-tuples of bool from the input parameter. This function is intended to normalize a ``nodes_on_bdry`` parameter that can be given as a single boolean (global) or as a sequence (per axis). Each entry of the sequence can either be a single boolean (global for the axis) or a boolean sequence of length 2. Parameters ---------- nodes_on_bdry : bool or sequence Input parameter to be normalized according to the above scheme. length : positive int Desired length of the returned list. Returns ------- normalized : list of 2-tuples of bool Normalized list with ``length`` entries, each of which is a 2-tuple of boolean values. Examples -------- Global for all axes: >>> normalized_nodes_on_bdry(True, length=2) [(True, True), (True, True)] Global per axis: >>> normalized_nodes_on_bdry([True, False], length=2) [(True, True), (False, False)] Mixing global and explicit per axis: >>> normalized_nodes_on_bdry([[True, False], False, True], length=3) [(True, False), (False, False), (True, True)]
def get(key, default=-1): """Backport support for original codes.""" if isinstance(key, int): return QS_Function(key) if key not in QS_Function._member_map_: extend_enum(QS_Function, key, default) return QS_Function[key]
Backport support for original codes.
def parse_host(host): """Parses host name and port number from a string. """ if re.match(r'^(\d+)$', host) is not None: return ("0.0.0.0", int(host)) if re.match(r'^(\w+)://', host) is None: host = "//" + host o = parse.urlparse(host) hostname = o.hostname or "0.0.0.0" port = o.port or 0 return (hostname, port)
Parses host name and port number from a string.
def filesize(num_bytes): """Return a string containing an approximate representation of *num_bytes* using a small number and decimal SI prefix.""" for prefix in '-KMGTEPZY': if num_bytes < 999.9: break num_bytes /= 1000.0 if prefix == '-': return '{} B'.format(num_bytes) return '{:.3n} {}B'.format(num_bytes, prefix)
Return a string containing an approximate representation of *num_bytes* using a small number and decimal SI prefix.
def get_ceph_pool_sample(self, sentry_unit, pool_id=0): """Take a sample of attributes of a ceph pool, returning ceph pool name, object count and disk space used for the specified pool ID number. :param sentry_unit: Pointer to amulet sentry instance (juju unit) :param pool_id: Ceph pool ID :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) for pool in df['pools']: if pool['id'] == pool_id: pool_name = pool['name'] obj_count = pool['stats']['objects'] kb_used = pool['stats']['kb_used'] self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) return pool_name, obj_count, kb_used
Take a sample of attributes of a ceph pool, returning ceph pool name, object count and disk space used for the specified pool ID number. :param sentry_unit: Pointer to amulet sentry instance (juju unit) :param pool_id: Ceph pool ID :returns: List of pool name, object count, kb disk space used
def unregister_service(self, name): """ Implementation of :meth:`twitcher.api.IRegistry.unregister_service`. """ try: self.store.delete_service(name=name) except Exception: LOGGER.exception('unregister failed') return False else: return True
Implementation of :meth:`twitcher.api.IRegistry.unregister_service`.
def setup_logging( config='logging.yaml', default_level=logging.INFO, env_key='LOG_CFG' ): """Setup logging configuration """ path = config value = os.getenv(env_key, None) if value: path = value if path.exists(): with open(path, 'rt') as f: config = yaml.safe_load(f.read()) logging.config.dictConfig(config) else: print('cannot read: ' + str(path)) logging.basicConfig(level=default_level)
Setup logging configuration
def ajIrreguliers(self): """ Chargement des formes irrégulières du fichier data/irregs.la """ lignes = lignesFichier(self.path("irregs.la")) for lin in lignes: try: irr = self.parse_irreg(lin) self.lemmatiseur._irregs[deramise(irr.gr())].append(irr) except Exception as E: warnings.warn("Erreur au chargement de l'irrégulier\n" + lin + "\n" + str(E)) raise E for irr in flatten(self.lemmatiseur._irregs.values()): irr.lemme().ajIrreg(irr)
Chargement des formes irrégulières du fichier data/irregs.la
def is_primitive(value): """ Checks if value has primitive type. Primitive types are: numbers, strings, booleans, date and time. Complex (non-primitive types are): objects, maps and arrays :param value: a value to check :return: true if the value has primitive type and false if value type is complex. """ typeCode = TypeConverter.to_type_code(value) return typeCode == TypeCode.String or typeCode == TypeCode.Enum or typeCode == TypeCode.Boolean \ or typeCode == TypeCode.Integer or typeCode == TypeCode.Long \ or typeCode == TypeCode.Float or typeCode == TypeCode.Double \ or typeCode == TypeCode.DateTime or typeCode == TypeCode.Duration
Checks if value has primitive type. Primitive types are: numbers, strings, booleans, date and time. Complex (non-primitive types are): objects, maps and arrays :param value: a value to check :return: true if the value has primitive type and false if value type is complex.
def absence_count(self): """Return the user's absence count. If the user has no absences or is not a signup user, returns 0. """ # FIXME: remove recursive dep from ..eighth.models import EighthSignup return EighthSignup.objects.filter(user=self, was_absent=True, scheduled_activity__attendance_taken=True).count()
Return the user's absence count. If the user has no absences or is not a signup user, returns 0.
def create_unique_autosave_filename(self, filename, autosave_dir): """ Create unique autosave file name for specified file name. Args: filename (str): original file name autosave_dir (str): directory in which autosave files are stored """ basename = osp.basename(filename) autosave_filename = osp.join(autosave_dir, basename) if autosave_filename in self.name_mapping.values(): counter = 0 root, ext = osp.splitext(basename) while autosave_filename in self.name_mapping.values(): counter += 1 autosave_basename = '{}-{}{}'.format(root, counter, ext) autosave_filename = osp.join(autosave_dir, autosave_basename) return autosave_filename
Create unique autosave file name for specified file name. Args: filename (str): original file name autosave_dir (str): directory in which autosave files are stored
def get_next_input(self): """ Returns the next line of input :return: string of input """ # TODO: could override input if we get input coming in at the same time all_input = Deployment.objects.get(pk=self.id).input or '' lines = all_input.splitlines() first_line = lines[0] if len(lines) else None lines = lines[1:] if len(lines) > 1 else [] Deployment.objects.filter(pk=self.id).update(input='\n'.join(lines)) return first_line
Returns the next line of input :return: string of input
def _fw_delete(self, drvr_name, data): """Firewall Delete routine. This function calls routines to remove FW from fabric and device. It also updates its local cache. """ fw_id = data.get('firewall_id') tenant_id = self.tenant_db.get_fw_tenant(fw_id) if tenant_id not in self.fwid_attr: LOG.error("Invalid tenant id for FW delete %s", tenant_id) return tenant_obj = self.fwid_attr[tenant_id] ret = self._check_delete_fw(tenant_id, drvr_name) if ret: tenant_obj.delete_fw(fw_id) self.tenant_db.del_fw_tenant(fw_id)
Firewall Delete routine. This function calls routines to remove FW from fabric and device. It also updates its local cache.
def delete_invalid_tickets(self): """ Delete consumed or expired ``Ticket``s that are not referenced by other ``Ticket``s. Invalid tickets are no longer valid for authentication and can be safely deleted. A custom management command is provided that executes this method on all applicable models by running ``manage.py cleanupcas``. """ for ticket in self.filter(Q(consumed__isnull=False) | Q(expires__lte=now())).order_by('-expires'): try: ticket.delete() except models.ProtectedError: pass
Delete consumed or expired ``Ticket``s that are not referenced by other ``Ticket``s. Invalid tickets are no longer valid for authentication and can be safely deleted. A custom management command is provided that executes this method on all applicable models by running ``manage.py cleanupcas``.
def rpc_put_zonefiles( self, zonefile_datas, **con_info ): """ Replicate one or more zonefiles, given as serialized strings. Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files) Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure) Returns {'error': ...} on error Takes at most 5 zonefiles """ conf = get_blockstack_opts() if not is_atlas_enabled(conf): return {'error': 'No data', 'http_status': 400} if 'zonefiles' not in conf: return {'error': 'No zonefiles directory (likely a configuration error)', 'http_status': 400} if type(zonefile_datas) != list: return {'error': 'Invalid data', 'http_status': 400} if len(zonefile_datas) > 5: return {'error': 'Too many zonefiles', 'http_status': 400} for zfd in zonefile_datas: if not check_string(zfd, max_length=((4 * RPC_MAX_ZONEFILE_LEN) / 3) + 3, pattern=OP_BASE64_EMPTY_PATTERN): return {'error': 'Invalid zone file payload (exceeds {} bytes and/or not base64-encoded)'.format(RPC_MAX_ZONEFILE_LEN)} zonefile_dir = conf.get("zonefiles", None) saved = [] for zonefile_data in zonefile_datas: # decode try: zonefile_data = base64.b64decode( zonefile_data ) except: log.debug("Invalid base64 zonefile") saved.append(0) continue if len(zonefile_data) > RPC_MAX_ZONEFILE_LEN: log.debug("Zonefile too long") saved.append(0) continue # is this zone file already discovered? zonefile_hash = get_zonefile_data_hash(str(zonefile_data)) zfinfos = atlasdb_get_zonefiles_by_hash(zonefile_hash, path=conf['atlasdb_path']) if not zfinfos: # nope log.debug("Unknown zonefile hash {}".format(zonefile_hash)) saved.append(0) continue # keep this zone file rc = store_atlas_zonefile_data( str(zonefile_data), zonefile_dir ) if not rc: log.error("Failed to store zonefile {}".format(zonefile_hash)) saved.append(0) continue # mark this zone file as present, so we don't ask anyone else for it was_present = atlasdb_set_zonefile_present(zonefile_hash, True, path=conf['atlasdb_path']) if was_present: # we already got this zone file # only process it if it's outside our recovery range recovery_start, recovery_end = get_recovery_range(self.working_dir) current_block = virtualchain_hooks.get_last_block(self.working_dir) if recovery_start is not None and recovery_end is not None and recovery_end < current_block: # no need to process log.debug("Already have zonefile {}".format(zonefile_hash)) saved.append(1) continue if self.subdomain_index: # got new zonefile # let the subdomain indexer know, along with giving it the minimum block height min_block_height = min([zfi['block_height'] for zfi in zfinfos]) log.debug("Enqueue {} from {} for subdomain processing".format(zonefile_hash, min_block_height)) self.subdomain_index.enqueue_zonefile(zonefile_hash, min_block_height) log.debug("Stored new zonefile {}".format(zonefile_hash)) saved.append(1) log.debug("Saved {} zonefile(s)".format(sum(saved))) log.debug("Reply: {}".format({'saved': saved})) return self.success_response( {'saved': saved} )
Replicate one or more zonefiles, given as serialized strings. Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files) Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure) Returns {'error': ...} on error Takes at most 5 zonefiles
async def await_reply(self, correlation_id, timeout=None): """Wait for a reply to a given correlation id. If a timeout is provided, it will raise a asyncio.TimeoutError. """ try: result = await asyncio.wait_for( self._futures[correlation_id], timeout=timeout) return result finally: del self._futures[correlation_id]
Wait for a reply to a given correlation id. If a timeout is provided, it will raise a asyncio.TimeoutError.
def map_parameters(cls, params): """Maps parameters to form field names""" d = {} for k, v in six.iteritems(params): d[cls.FIELD_MAP.get(k.lower(), k)] = v return d
Maps parameters to form field names
def list_leases(self, prefix): """Retrieve a list of lease ids. Supported methods: LIST: /sys/leases/lookup/{prefix}. Produces: 200 application/json :param prefix: Lease prefix to filter list by. :type prefix: str | unicode :return: The JSON response of the request. :rtype: dict """ api_path = '/v1/sys/leases/lookup/{prefix}'.format(prefix=prefix) response = self._adapter.list( url=api_path, ) return response.json()
Retrieve a list of lease ids. Supported methods: LIST: /sys/leases/lookup/{prefix}. Produces: 200 application/json :param prefix: Lease prefix to filter list by. :type prefix: str | unicode :return: The JSON response of the request. :rtype: dict
def _run_select(self): """ Run the query as a "select" statement against the connection. :return: The result :rtype: list """ return self._connection.select( self.to_sql(), self.get_bindings(), not self._use_write_connection )
Run the query as a "select" statement against the connection. :return: The result :rtype: list
def set_crypttab( name, device, password='none', options='', config='/etc/crypttab', test=False, match_on='name'): ''' Verify that this device is represented in the crypttab, change the device to match the name passed, or add the name if it is not present. CLI Example: .. code-block:: bash salt '*' cryptdev.set_crypttab foo /dev/sdz1 mypassword swap,size=256 ''' # Fix the options type if it is not a string if options is None: options = '' elif isinstance(options, six.string_types): pass elif isinstance(options, list): options = ','.join(options) else: msg = 'options must be a string or list of strings' raise CommandExecutionError(msg) # preserve arguments for updating entry_args = { 'name': name, 'device': device, 'password': password if password is not None else 'none', 'options': options, } lines = [] ret = None # Transform match_on into list--items will be checked later if isinstance(match_on, list): pass elif not isinstance(match_on, six.string_types): msg = 'match_on must be a string or list of strings' raise CommandExecutionError(msg) else: match_on = [match_on] # generate entry and criteria objects, handle invalid keys in match_on entry = _crypttab_entry(**entry_args) try: criteria = entry.pick(match_on) except KeyError: filterFn = lambda key: key not in _crypttab_entry.crypttab_keys invalid_keys = six.moves.filter(filterFn, match_on) msg = 'Unrecognized keys in match_on: "{0}"'.format(invalid_keys) raise CommandExecutionError(msg) # parse file, use ret to cache status if not os.path.isfile(config): raise CommandExecutionError('Bad config file "{0}"'.format(config)) try: with salt.utils.files.fopen(config, 'r') as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) try: if criteria.match(line): # Note: If ret isn't None here, # we've matched multiple lines ret = 'present' if entry.match(line): lines.append(line) else: ret = 'change' lines.append(six.text_type(entry)) else: lines.append(line) except _crypttab_entry.ParseError: lines.append(line) except (IOError, OSError) as exc: msg = 'Couldn\'t read from {0}: {1}' raise CommandExecutionError(msg.format(config, exc)) # add line if not present or changed if ret is None: lines.append(six.text_type(entry)) ret = 'new' if ret != 'present': # ret in ['new', 'change']: if not test: try: with salt.utils.files.fopen(config, 'w+') as ofile: # The line was changed, commit it! ofile.writelines((salt.utils.stringutils.to_str(line) for line in lines)) except (IOError, OSError): msg = 'File not writable {0}' raise CommandExecutionError(msg.format(config)) return ret
Verify that this device is represented in the crypttab, change the device to match the name passed, or add the name if it is not present. CLI Example: .. code-block:: bash salt '*' cryptdev.set_crypttab foo /dev/sdz1 mypassword swap,size=256
def ignore(code): """Should this code be ignored. :param str code: Error code (e.g. D201). :return: True if code should be ignored, False otherwise. :rtype: bool """ if code in Main.options['ignore']: return True if any(c in code for c in Main.options['ignore']): return True return False
Should this code be ignored. :param str code: Error code (e.g. D201). :return: True if code should be ignored, False otherwise. :rtype: bool
def unrate_url(obj): """ Generates a link to "un-rate" the given object - this can be used as a form target or for POSTing via Ajax. """ return reverse('ratings_unrate_object', args=( ContentType.objects.get_for_model(obj).pk, obj.pk, ))
Generates a link to "un-rate" the given object - this can be used as a form target or for POSTing via Ajax.
def action_filter(method_name, *args, **kwargs): """ Creates an effect that will call the action's method with the current value and specified arguments and keywords. @param method_name: the name of method belonging to the action. @type method_name: str """ def action_filter(value, context, **_params): method = getattr(context["action"], method_name) return _filter(method, value, args, kwargs) return action_filter
Creates an effect that will call the action's method with the current value and specified arguments and keywords. @param method_name: the name of method belonging to the action. @type method_name: str
def parse_declaration_expressn_operator(self, lhsAST, rhsAST, es, operator): """ Simply joins the left and right hand arguments lhs and rhs with an operator. :param lhsAST: :param rhsAST: :param es: :param operator: :return: """ if isinstance(lhsAST, wdl_parser.Terminal): if lhsAST.str == 'string': es = es + '"{string}"'.format(string=lhsAST.source_string) else: es = es + '{string}'.format(string=lhsAST.source_string) elif isinstance(lhsAST, wdl_parser.Ast): es = es + self.parse_declaration_expressn(lhsAST, es='') elif isinstance(lhsAST, wdl_parser.AstList): raise NotImplementedError es = es + operator if isinstance(rhsAST, wdl_parser.Terminal): if rhsAST.str == 'string': es = es + '"{string}"'.format(string=rhsAST.source_string) else: es = es + '{string}'.format(string=rhsAST.source_string) elif isinstance(rhsAST, wdl_parser.Ast): es = es + self.parse_declaration_expressn(rhsAST, es='') elif isinstance(rhsAST, wdl_parser.AstList): raise NotImplementedError return es
Simply joins the left and right hand arguments lhs and rhs with an operator. :param lhsAST: :param rhsAST: :param es: :param operator: :return:
def OnNodeActivated(self, event): """We have double-clicked for hit enter on a node refocus squaremap to this node""" try: node = self.sorted[event.GetIndex()] except IndexError, err: log.warn(_('Invalid index in node activated: %(index)s'), index=event.GetIndex()) else: wx.PostEvent( self, squaremap.SquareActivationEvent(node=node, point=None, map=None) )
We have double-clicked for hit enter on a node refocus squaremap to this node
def reset(self): """Reset openthread device, not equivalent to stop and start """ logger.debug('DUT> reset') self._log and self.pause() self._sendline('reset') self._read() self._log and self.resume()
Reset openthread device, not equivalent to stop and start
def create_uinput_device(mapping): """Creates a uinput device.""" if mapping not in _mappings: raise DeviceError("Unknown device mapping: {0}".format(mapping)) try: mapping = _mappings[mapping] device = UInputDevice(mapping) except UInputError as err: raise DeviceError(err) return device
Creates a uinput device.
def generate_ref(self): """ Ref can be link to remote or local definition. .. code-block:: python {'$ref': 'http://json-schema.org/draft-04/schema#'} { 'properties': { 'foo': {'type': 'integer'}, 'bar': {'$ref': '#/properties/foo'} } } """ with self._resolver.in_scope(self._definition['$ref']): name = self._resolver.get_scope_name() uri = self._resolver.get_uri() if uri not in self._validation_functions_done: self._needed_validation_functions[uri] = name # call validation function self.l('{}({variable})', name)
Ref can be link to remote or local definition. .. code-block:: python {'$ref': 'http://json-schema.org/draft-04/schema#'} { 'properties': { 'foo': {'type': 'integer'}, 'bar': {'$ref': '#/properties/foo'} } }
def parse_reaction_file(path, default_compartment=None): """Open and parse reaction file based on file extension Path can be given as a string or a context. """ context = FilePathContext(path) format = resolve_format(None, context.filepath) if format == 'tsv': logger.debug('Parsing reaction file {} as TSV'.format( context.filepath)) with context.open('r') as f: for reaction in parse_reaction_table_file( context, f, default_compartment): yield reaction elif format == 'yaml': logger.debug('Parsing reaction file {} as YAML'.format( context.filepath)) with context.open('r') as f: for reaction in parse_reaction_yaml_file( context, f, default_compartment): yield reaction else: raise ParseError('Unable to detect format of reaction file {}'.format( context.filepath))
Open and parse reaction file based on file extension Path can be given as a string or a context.
def _add_active_assets(specs): """ This function adds an assets key to the specs, which is filled in with a dictionary of all assets defined by apps and libs in the specs """ specs['assets'] = {} for spec in specs.get_apps_and_libs(): for asset in spec['assets']: if not specs['assets'].get(asset['name']): specs['assets'][asset['name']] = {} specs['assets'][asset['name']]['required_by'] = set() specs['assets'][asset['name']]['used_by'] = set() specs['assets'][asset['name']]['used_by'].add(spec.name) if asset['required']: specs['assets'][asset['name']]['required_by'].add(spec.name)
This function adds an assets key to the specs, which is filled in with a dictionary of all assets defined by apps and libs in the specs
def account_following(self, id, max_id=None, min_id=None, since_id=None, limit=None): """ Fetch users the given user is following. Returns a list of `user dicts`_. """ id = self.__unpack_id(id) if max_id != None: max_id = self.__unpack_id(max_id) if min_id != None: min_id = self.__unpack_id(min_id) if since_id != None: since_id = self.__unpack_id(since_id) params = self.__generate_params(locals(), ['id']) url = '/api/v1/accounts/{0}/following'.format(str(id)) return self.__api_request('GET', url, params)
Fetch users the given user is following. Returns a list of `user dicts`_.
def build_disagg_matrix(bdata, bin_edges, sid, mon=Monitor): """ :param bdata: a dictionary of probabilities of no exceedence :param bin_edges: bin edges :param sid: site index :param mon: a Monitor instance :returns: a dictionary key -> matrix|pmf for each key in bdata """ with mon('build_disagg_matrix'): mag_bins, dist_bins, lon_bins, lat_bins, eps_bins = bin_edges dim1, dim2, dim3, dim4, dim5 = shape = get_shape(bin_edges, sid) # find bin indexes of rupture attributes; bins are assumed closed # on the lower bound, and open on the upper bound, that is [ ) # longitude values need an ad-hoc method to take into account # the 'international date line' issue # the 'minus 1' is needed because the digitize method returns the # index of the upper bound of the bin mags_idx = numpy.digitize(bdata.mags+pmf.PRECISION, mag_bins) - 1 dists_idx = numpy.digitize(bdata.dists[:, sid], dist_bins) - 1 lons_idx = _digitize_lons(bdata.lons[:, sid], lon_bins[sid]) lats_idx = numpy.digitize(bdata.lats[:, sid], lat_bins[sid]) - 1 # because of the way numpy.digitize works, values equal to the last bin # edge are associated to an index equal to len(bins) which is not a # valid index for the disaggregation matrix. Such values are assumed # to fall in the last bin mags_idx[mags_idx == dim1] = dim1 - 1 dists_idx[dists_idx == dim2] = dim2 - 1 lons_idx[lons_idx == dim3] = dim3 - 1 lats_idx[lats_idx == dim4] = dim4 - 1 out = {} cache = {} cache_hit = 0 num_zeros = 0 for k, allpnes in bdata.items(): pnes = allpnes[:, sid, :] # shape (U, N, E) cache_key = pnes.sum() if cache_key == pnes.size: # all pnes are 1 num_zeros += 1 continue # zero matrices are not transferred try: matrix = cache[cache_key] cache_hit += 1 except KeyError: mat = numpy.ones(shape) for i_mag, i_dist, i_lon, i_lat, pne in zip( mags_idx, dists_idx, lons_idx, lats_idx, pnes): mat[i_mag, i_dist, i_lon, i_lat] *= pne matrix = 1. - mat cache[cache_key] = matrix out[k] = matrix # operations, hits, num_zeros if hasattr(mon, 'cache_info'): mon.cache_info += numpy.array([len(bdata), cache_hit, num_zeros]) else: mon.cache_info = numpy.array([len(bdata), cache_hit, num_zeros]) return out
:param bdata: a dictionary of probabilities of no exceedence :param bin_edges: bin edges :param sid: site index :param mon: a Monitor instance :returns: a dictionary key -> matrix|pmf for each key in bdata
def _read_packet(f, pos, n_smp, n_allchan, abs_delta): """ Read a packet of compressed data Parameters ---------- f : instance of opened file erd file pos : int index of the start of the packet in the file (in bytes from beginning of the file) n_smp : int number of samples to read n_allchan : int number of channels (we should specify if shorted or not) abs_delta: byte if the delta has this value, it means that you should read the absolute value at the end of packet. If schema is 7, the length is 1; if schema is 8 or 9, the length is 2. Returns ------- ndarray data read in the packet up to n_smp. Notes ----- TODO: shorted chan. If I remember correctly, deltamask includes all the channels, but the absolute values are only used for not-shorted channels TODO: implement schema 7, which is slightly different, but I don't remember where exactly. """ if len(abs_delta) == 1: # schema 7 abs_delta = unpack('b', abs_delta)[0] else: # schema 8, 9 abs_delta = unpack('h', abs_delta)[0] l_deltamask = int(ceil(n_allchan / BITS_IN_BYTE)) dat = empty((n_allchan, n_smp), dtype=int32) f.seek(pos) for i_smp in range(n_smp): eventbite = f.read(1) try: assert eventbite in (b'\x00', b'\x01') except: raise Exception('at pos ' + str(i_smp) + ', eventbite (should be x00 or x01): ' + str(eventbite)) byte_deltamask = unpack('<' + 'B' * l_deltamask, f.read(l_deltamask)) deltamask = unpackbits(array(byte_deltamask[::-1], dtype ='uint8')) deltamask = deltamask[:-n_allchan-1:-1] n_bytes = int(deltamask.sum()) + deltamask.shape[0] deltamask = deltamask.astype('bool') # numpy has a weird way of handling string/bytes. # We create a byte representation, because then tostring() works fine delta_dtype = empty(n_allchan, dtype='a1') delta_dtype[deltamask] = 'h' delta_dtype[~deltamask] = 'b' relval = array(unpack('<' + delta_dtype.tostring().decode(), f.read(n_bytes))) read_abs = (delta_dtype == b'h') & (relval == abs_delta) dat[~read_abs, i_smp] = dat[~read_abs, i_smp - 1] + relval[~read_abs] dat[read_abs, i_smp] = fromfile(f, 'i', count=read_abs.sum()) return dat
Read a packet of compressed data Parameters ---------- f : instance of opened file erd file pos : int index of the start of the packet in the file (in bytes from beginning of the file) n_smp : int number of samples to read n_allchan : int number of channels (we should specify if shorted or not) abs_delta: byte if the delta has this value, it means that you should read the absolute value at the end of packet. If schema is 7, the length is 1; if schema is 8 or 9, the length is 2. Returns ------- ndarray data read in the packet up to n_smp. Notes ----- TODO: shorted chan. If I remember correctly, deltamask includes all the channels, but the absolute values are only used for not-shorted channels TODO: implement schema 7, which is slightly different, but I don't remember where exactly.
def text_response(self, contents, code=200, headers={}): """shortcut to return simple plain/text messages in the response. :param contents: a string with the response contents :param code: the http status code :param headers: a dict with optional headers :returns: a :py:class:`flask.Response` with the ``text/plain`` **Content-Type** header. """ return Response(contents, status=code, headers={ 'Content-Type': 'text/plain' })
shortcut to return simple plain/text messages in the response. :param contents: a string with the response contents :param code: the http status code :param headers: a dict with optional headers :returns: a :py:class:`flask.Response` with the ``text/plain`` **Content-Type** header.
def insert_record_by_dict(self, table: str, valuedict: Dict[str, Any]) -> Optional[int]: """Inserts a record into database, table "table", using a dictionary containing field/value mappings. Returns the new PK (or None).""" if not valuedict: return None n = len(valuedict) fields = [] args = [] for f, v in valuedict.items(): fields.append(self.delimit(f)) args.append(v) query = """ INSERT INTO {table} ({fields}) VALUES ({placeholders}) """.format( table=table, fields=",".join(fields), placeholders=",".join(["?"]*n) ) query = self.localize_sql(query) log.debug("About to insert_record_by_dict with SQL template: " + query) try: cursor = self.db.cursor() debug_sql(query, args) cursor.execute(query, args) new_pk = get_pk_of_last_insert(cursor) log.debug("Record inserted.") return new_pk except: # nopep8 log.exception("insert_record_by_dict: Failed to insert record.") raise
Inserts a record into database, table "table", using a dictionary containing field/value mappings. Returns the new PK (or None).
def version(): ''' Return imgadm version CLI Example: .. code-block:: bash salt '*' imgadm.version ''' ret = {} cmd = 'imgadm --version' res = __salt__['cmd.run'](cmd).splitlines() ret = res[0].split() return ret[-1]
Return imgadm version CLI Example: .. code-block:: bash salt '*' imgadm.version
def getRAM(self,ram=None): """This function grabs the atari RAM. ram MUST be a numpy array of uint8/int8. This can be initialized like so: ram = np.array(ram_size,dtype=uint8) Notice: It must be ram_size where ram_size can be retrieved via the getRAMSize function. If it is None, then this function will initialize it. """ if(ram is None): ram_size = ale_lib.getRAMSize(self.obj) ram = np.zeros(ram_size,dtype=np.uint8) ale_lib.getRAM(self.obj,as_ctypes(ram))
This function grabs the atari RAM. ram MUST be a numpy array of uint8/int8. This can be initialized like so: ram = np.array(ram_size,dtype=uint8) Notice: It must be ram_size where ram_size can be retrieved via the getRAMSize function. If it is None, then this function will initialize it.
def post_shared_file(self, image_file=None, source_link=None, shake_id=None, title=None, description=None): """ Upload an image. TODO: Don't have a pro account to test (or even write) code to upload a shared filed to a particular shake. Args: image_file (str): path to an image (jpg/gif) on your computer. source_link (str): URL of a source (youtube/vine/etc.) shake_id (int): shake to which to upload the file or source_link [optional] title (str): title of the SharedFile [optional] description (str): description of the SharedFile Returns: SharedFile key. """ if image_file and source_link: raise Exception('You can only specify an image file or ' 'a source link, not both.') if not image_file and not source_link: raise Exception('You must specify an image file or a source link') content_type = self._get_image_type(image_file) if not title: title = os.path.basename(image_file) f = open(image_file, 'rb') endpoint = '/api/upload' files = {'file': (title, f, content_type)} data = self._make_request('POST', endpoint=endpoint, files=files) f.close() return data
Upload an image. TODO: Don't have a pro account to test (or even write) code to upload a shared filed to a particular shake. Args: image_file (str): path to an image (jpg/gif) on your computer. source_link (str): URL of a source (youtube/vine/etc.) shake_id (int): shake to which to upload the file or source_link [optional] title (str): title of the SharedFile [optional] description (str): description of the SharedFile Returns: SharedFile key.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'exclude') and self.exclude is not None: _dict['exclude'] = self.exclude if hasattr(self, 'include') and self.include is not None: _dict['include'] = self.include return _dict
Return a json dictionary representing this model.
def select_radio_button(self, key): """Helper to select a radio button with key. :param key: The key of the radio button. :type key: str """ key_index = list(self._parameter.options.keys()).index(key) radio_button = self.input_button_group.button(key_index) radio_button.click()
Helper to select a radio button with key. :param key: The key of the radio button. :type key: str
def WritePreprocessingInformation(self, knowledge_base): """Writes preprocessing information. Args: knowledge_base (KnowledgeBase): contains the preprocessing information. Raises: IOError: if the storage type does not support writing preprocess information or the storage file is closed or read-only. OSError: if the storage type does not support writing preprocess information or the storage file is closed or read-only. """ self._RaiseIfNotWritable() if self.storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError('Preprocess information not supported by storage type.') system_configuration = knowledge_base.GetSystemConfigurationArtifact() self._WriteAttributeContainer(system_configuration)
Writes preprocessing information. Args: knowledge_base (KnowledgeBase): contains the preprocessing information. Raises: IOError: if the storage type does not support writing preprocess information or the storage file is closed or read-only. OSError: if the storage type does not support writing preprocess information or the storage file is closed or read-only.
def nextSunrise(jd, lat, lon): """ Returns the JD of the next sunrise. """ return swe.sweNextTransit(const.SUN, jd, lat, lon, 'RISE')
Returns the JD of the next sunrise.
def in_check(self, position, location=None): """ Finds if the king is in check or if both kings are touching. :type: position: Board :return: bool """ location = location or self.location for piece in position: if piece is not None and piece.color != self.color: if not isinstance(piece, King): for move in piece.possible_moves(position): if move.end_loc == location: return True else: if self.loc_adjacent_to_opponent_king(piece.location, position): return True return False
Finds if the king is in check or if both kings are touching. :type: position: Board :return: bool
def more_like_this(self, q, mltfl, handler='mlt', **kwargs): """ Finds and returns results similar to the provided query. Returns ``self.results_cls`` class object (defaults to ``pysolr.Results``) Requires Solr 1.3+. Usage:: similar = solr.more_like_this('id:doc_234', 'text') """ params = { 'q': q, 'mlt.fl': mltfl, } params.update(kwargs) response = self._mlt(params, handler=handler) decoded = self.decoder.decode(response) self.log.debug( "Found '%s' MLT results.", # cover both cases: there is no response key or value is None (decoded.get('response', {}) or {}).get('numFound', 0) ) return self.results_cls(decoded)
Finds and returns results similar to the provided query. Returns ``self.results_cls`` class object (defaults to ``pysolr.Results``) Requires Solr 1.3+. Usage:: similar = solr.more_like_this('id:doc_234', 'text')
def message(self, level, *args): """ Format the message of the logger. You can rewrite this method to format your own message:: class MyLogger(Logger): def message(self, level, *args): msg = ' '.join(args) if level == 'error': return terminal.red(msg) return msg """ msg = ' '.join((str(o) for o in args)) if level not in ('start', 'end', 'debug', 'info', 'warn', 'error'): return msg return '%s: %s' % (level, msg)
Format the message of the logger. You can rewrite this method to format your own message:: class MyLogger(Logger): def message(self, level, *args): msg = ' '.join(args) if level == 'error': return terminal.red(msg) return msg
def order(self): """Produce a flatten list of the partition, ordered by classes """ return [x.val for theclass in self.classes for x in theclass.items]
Produce a flatten list of the partition, ordered by classes
def _might_have_parameter(fn_or_cls, arg_name): """Returns True if `arg_name` might be a valid parameter for `fn_or_cls`. Specifically, this means that `fn_or_cls` either has a parameter named `arg_name`, or has a `**kwargs` parameter. Args: fn_or_cls: The function or class to check. arg_name: The name fo the parameter. Returns: Whether `arg_name` might be a valid argument of `fn`. """ if inspect.isclass(fn_or_cls): fn = _find_class_construction_fn(fn_or_cls) else: fn = fn_or_cls while hasattr(fn, '__wrapped__'): fn = fn.__wrapped__ arg_spec = _get_cached_arg_spec(fn) if six.PY3: if arg_spec.varkw: return True return arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs else: if arg_spec.keywords: return True return arg_name in arg_spec.args
Returns True if `arg_name` might be a valid parameter for `fn_or_cls`. Specifically, this means that `fn_or_cls` either has a parameter named `arg_name`, or has a `**kwargs` parameter. Args: fn_or_cls: The function or class to check. arg_name: The name fo the parameter. Returns: Whether `arg_name` might be a valid argument of `fn`.
def end(self): """End of the Glances server session.""" if not self.args.disable_autodiscover: self.autodiscover_client.close() self.server.end()
End of the Glances server session.
def message(self, text): """ Public message. """ self.client.publish(self.keys.external, '{}: {}'.format(self.resource, text))
Public message.
def get_upcoming_events(self, days_to_look_ahead): '''Returns the events from the calendar for the next days_to_look_ahead days.''' now = datetime.now(tz=self.timezone) # timezone? start_time = datetime(year=now.year, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=self.timezone) end_time = start_time + timedelta(days = days_to_look_ahead) start_time = start_time.isoformat() end_time = end_time.isoformat() return self.get_events(start_time, end_time)
Returns the events from the calendar for the next days_to_look_ahead days.
def _get_mean(self, imt, mag, hypo_depth, rrup, d): """ Return mean value as defined in equation 3.5.1-1 page 148 """ # clip magnitude at 8.3 as per note at page 3-36 in table Table 3.3.2-6 # in "Technical Reports on National Seismic Hazard Maps for Japan" mag = min(mag, 8.3) if imt.name == 'PGV': mean = ( 0.58 * mag + 0.0038 * hypo_depth + d - 1.29 - np.log10(rrup + 0.0028 * 10 ** (0.5 * mag)) - 0.002 * rrup ) else: mean = ( 0.50 * mag + 0.0043 * hypo_depth + d + 0.61 - np.log10(rrup + 0.0055 * 10 ** (0.5 * mag)) - 0.003 * rrup ) mean = np.log10(10**(mean)/(g*100)) return mean
Return mean value as defined in equation 3.5.1-1 page 148
def _hbf_handle_child_elements(self, obj, ntl): """ Indirect recursion through _gen_hbf_el """ # accumulate a list of the children names in ko, and # the a dictionary of tag to xml elements. # repetition of a tag means that it will map to a list of # xml elements cd = {} ko = [] ks = set() for child in ntl: k = child.nodeName if k == 'meta' and (not self._badgerfish_style_conversion): matk, matv = self._transform_meta_key_value(child) if matk is not None: _add_value_to_dict_bf(obj, matk, matv) else: if k not in ks: ko.append(k) ks.add(k) _add_value_to_dict_bf(cd, k, child) # Converts the child XML elements to dicts by recursion and # adds these to the dict. for k in ko: v = _index_list_of_values(cd, k) dcl = [] ct = None for xc in v: ct, dc = self._gen_hbf_el(xc) dcl.append(dc) # this assertion will trip is the hacky stripping of namespaces # results in a name clash among the tags of the children assert ct not in obj obj[ct] = dcl # delete redundant about attributes that are used in XML, but not JSON (last rule of HoneyBadgerFish) _cull_redundant_about(obj) return obj
Indirect recursion through _gen_hbf_el
def create_dockwidget(self): """Add to parent QMainWindow as a dock widget""" # Creating dock widget dock = SpyderDockWidget(self.get_plugin_title(), self.main) # Set properties dock.setObjectName(self.__class__.__name__+"_dw") dock.setAllowedAreas(self.ALLOWED_AREAS) dock.setFeatures(self.FEATURES) dock.setWidget(self) self.update_margins() dock.visibilityChanged.connect(self.visibility_changed) dock.topLevelChanged.connect(self.on_top_level_changed) dock.sig_plugin_closed.connect(self.plugin_closed) self.dockwidget = dock if self.shortcut is not None: sc = QShortcut(QKeySequence(self.shortcut), self.main, self.switch_to_plugin) self.register_shortcut(sc, "_", "Switch to %s" % self.CONF_SECTION) return (dock, self.LOCATION)
Add to parent QMainWindow as a dock widget
def rename(script, label='blank', layer_num=None): """ Rename layer label Can be useful for outputting mlp files, as the output file names use the labels. Args: script: the mlx.FilterScript object or script filename to write the filter to. label (str): new label for the mesh layer layer_num (int): layer number to rename. Default is the current layer. Not supported on the file base API. Layer stack: Renames a layer MeshLab versions: 2016.12 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="Rename Current Mesh">\n', ' <Param name="newName" ', 'value="{}" '.format(label), 'description="New Label" ', 'type="RichString" ', '/>\n', ' </filter>\n']) if isinstance(script, mlx.FilterScript): if (layer_num is None) or (layer_num == script.current_layer()): util.write_filter(script, filter_xml) script.layer_stack[script.current_layer()] = label else: cur_layer = script.current_layer() change(script, layer_num) util.write_filter(script, filter_xml) change(script, cur_layer) script.layer_stack[layer_num] = label else: util.write_filter(script, filter_xml) return None
Rename layer label Can be useful for outputting mlp files, as the output file names use the labels. Args: script: the mlx.FilterScript object or script filename to write the filter to. label (str): new label for the mesh layer layer_num (int): layer number to rename. Default is the current layer. Not supported on the file base API. Layer stack: Renames a layer MeshLab versions: 2016.12 1.3.4BETA
def substitute(self, index, func_grp, bond_order=1): """ Substitute atom at index with a functional group. Args: index (int): Index of atom to substitute. func_grp: Substituent molecule. There are two options: 1. Providing an actual Molecule as the input. The first atom must be a DummySpecie X, indicating the position of nearest neighbor. The second atom must be the next nearest atom. For example, for a methyl group substitution, func_grp should be X-CH3, where X is the first site and C is the second site. What the code will do is to remove the index site, and connect the nearest neighbor to the C atom in CH3. The X-C bond indicates the directionality to connect the atoms. 2. A string name. The molecule will be obtained from the relevant template in func_groups.json. bond_order (int): A specified bond order to calculate the bond length between the attached functional group and the nearest neighbor site. Defaults to 1. """ # Find the nearest neighbor that is not a terminal atom. all_non_terminal_nn = [] for nn, dist in self.get_neighbors(self[index], 3): # Check that the nn has neighbors within a sensible distance but # is not the site being substituted. for inn, dist2 in self.get_neighbors(nn, 3): if inn != self[index] and \ dist2 < 1.2 * get_bond_length(nn.specie, inn.specie): all_non_terminal_nn.append((nn, dist)) break if len(all_non_terminal_nn) == 0: raise RuntimeError("Can't find a non-terminal neighbor to attach" " functional group to.") non_terminal_nn = min(all_non_terminal_nn, key=lambda d: d[1])[0] # Set the origin point to be the coordinates of the nearest # non-terminal neighbor. origin = non_terminal_nn.coords # Pass value of functional group--either from user-defined or from # functional.json if isinstance(func_grp, Molecule): func_grp = func_grp else: # Check to see whether the functional group is in database. if func_grp not in FunctionalGroups: raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead") else: func_grp = FunctionalGroups[func_grp] # If a bond length can be found, modify func_grp so that the X-group # bond length is equal to the bond length. try: bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie, bond_order=bond_order) # Catches for case of incompatibility between Element(s) and Specie(s) except TypeError: bl = None if bl is not None: func_grp = func_grp.copy() vec = func_grp[0].coords - func_grp[1].coords vec /= np.linalg.norm(vec) func_grp[0] = "X", func_grp[1].coords + float(bl) * vec # Align X to the origin. x = func_grp[0] func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords) # Find angle between the attaching bond and the bond to be replaced. v1 = func_grp[1].coords - origin v2 = self[index].coords - origin angle = get_angle(v1, v2) if 1 < abs(angle % 180) < 179: # For angles which are not 0 or 180, we perform a rotation about # the origin along an axis perpendicular to both bonds to align # bonds. axis = np.cross(v1, v2) op = SymmOp.from_origin_axis_angle(origin, axis, angle) func_grp.apply_operation(op) elif abs(abs(angle) - 180) < 1: # We have a 180 degree angle. Simply do an inversion about the # origin for i in range(len(func_grp)): func_grp[i] = (func_grp[i].species, origin - (func_grp[i].coords - origin)) # Remove the atom to be replaced, and add the rest of the functional # group. del self[index] for site in func_grp[1:]: s_new = PeriodicSite(site.species, site.coords, self.lattice, coords_are_cartesian=True) self._sites.append(s_new)
Substitute atom at index with a functional group. Args: index (int): Index of atom to substitute. func_grp: Substituent molecule. There are two options: 1. Providing an actual Molecule as the input. The first atom must be a DummySpecie X, indicating the position of nearest neighbor. The second atom must be the next nearest atom. For example, for a methyl group substitution, func_grp should be X-CH3, where X is the first site and C is the second site. What the code will do is to remove the index site, and connect the nearest neighbor to the C atom in CH3. The X-C bond indicates the directionality to connect the atoms. 2. A string name. The molecule will be obtained from the relevant template in func_groups.json. bond_order (int): A specified bond order to calculate the bond length between the attached functional group and the nearest neighbor site. Defaults to 1.
def yaml_filter(element, doc, tag=None, function=None, tags=None, strict_yaml=False): ''' Convenience function for parsing code blocks with YAML options This function is useful to create a filter that applies to code blocks that have specific classes. It is used as an argument of ``run_filter``, with two additional options: ``tag`` and ``function``. Using this is equivalent to having filter functions that: 1. Check if the element is a code block 2. Check if the element belongs to a specific class 3. Split the YAML options (at the beginning of the block, by looking for ``...`` or ``---`` strings in a separate line 4. Parse the YAML 5. Use the YAML options and (optionally) the data that follows the YAML to return a new or modified element Instead, you just need to: 1. Call ``run_filter`` with ``yaml_filter`` as the action function, and with the additional arguments ``tag`` and ``function`` 2. Construct a ``fenced_action`` function that takes four arguments: (options, data, element, doc). Note that options is a dict and data is a raw string. Notice that this is similar to the ``action`` functions of standard filters, but with *options* and *data* as the new ones. Note: if you want to apply multiple functions to separate classes, you can use the ``tags`` argument, which receives a dict of ``tag: function`` pairs. Note: use the ``strict_yaml=True`` option in order to allow for more verbose but flexible YAML metadata: more than one YAML blocks are allowed, but they all must start with ``---`` (even at the beginning) and end with ``---`` or ``...``. Also, YAML is not the default content when no delimiters are set. Example:: """ Replace code blocks of class 'foo' with # horizontal rules """ import panflute as pf def fenced_action(options, data, element, doc): count = options.get('count', 1) div = pf.Div(attributes={'count': str(count)}) div.content.extend([pf.HorizontalRule] * count) return div if __name__ == '__main__': pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action) ''' # Allow for either tag+function or a dict {tag: function} assert (tag is None) + (tags is None) == 1 # XOR if tags is None: tags = {tag: function} if type(element) == CodeBlock: for tag in tags: if tag in element.classes: function = tags[tag] if not strict_yaml: # Split YAML and data parts (separated by ... or ---) raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 1, re.MULTILINE) data = raw[2] if len(raw) > 2 else '' data = data.lstrip('\n') raw = raw[0] try: options = yaml.safe_load(raw) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return if options is None: options = {} else: options = {} data = [] raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 0, re.MULTILINE) rawmode = True for chunk in raw: chunk = chunk.strip('\n') if not chunk: continue if rawmode: if chunk.startswith('---'): rawmode = False else: data.append(chunk) else: if chunk.startswith('---') or chunk.startswith('...'): rawmode = True else: try: options.update(yaml.safe_load(chunk)) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return data = '\n'.join(data) return function(options=options, data=data, element=element, doc=doc)
Convenience function for parsing code blocks with YAML options This function is useful to create a filter that applies to code blocks that have specific classes. It is used as an argument of ``run_filter``, with two additional options: ``tag`` and ``function``. Using this is equivalent to having filter functions that: 1. Check if the element is a code block 2. Check if the element belongs to a specific class 3. Split the YAML options (at the beginning of the block, by looking for ``...`` or ``---`` strings in a separate line 4. Parse the YAML 5. Use the YAML options and (optionally) the data that follows the YAML to return a new or modified element Instead, you just need to: 1. Call ``run_filter`` with ``yaml_filter`` as the action function, and with the additional arguments ``tag`` and ``function`` 2. Construct a ``fenced_action`` function that takes four arguments: (options, data, element, doc). Note that options is a dict and data is a raw string. Notice that this is similar to the ``action`` functions of standard filters, but with *options* and *data* as the new ones. Note: if you want to apply multiple functions to separate classes, you can use the ``tags`` argument, which receives a dict of ``tag: function`` pairs. Note: use the ``strict_yaml=True`` option in order to allow for more verbose but flexible YAML metadata: more than one YAML blocks are allowed, but they all must start with ``---`` (even at the beginning) and end with ``---`` or ``...``. Also, YAML is not the default content when no delimiters are set. Example:: """ Replace code blocks of class 'foo' with # horizontal rules """ import panflute as pf def fenced_action(options, data, element, doc): count = options.get('count', 1) div = pf.Div(attributes={'count': str(count)}) div.content.extend([pf.HorizontalRule] * count) return div if __name__ == '__main__': pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
def to_csv(self, dest:str)->None: "Save `self.to_df()` to a CSV file in `self.path`/`dest`." self.to_df().to_csv(self.path/dest, index=False)
Save `self.to_df()` to a CSV file in `self.path`/`dest`.
def create_api_method(restApiId, resourcePath, httpMethod, authorizationType, apiKeyRequired=False, requestParameters=None, requestModels=None, region=None, key=None, keyid=None, profile=None): ''' Creates API method for a resource in the given API CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_method restApiId resourcePath, httpMethod, authorizationType, \\ apiKeyRequired=False, requestParameters='{"name", "value"}', requestModels='{"content-type", "value"}' ''' try: resource = describe_api_resource(restApiId, resourcePath, region=region, key=key, keyid=keyid, profile=profile).get('resource') if resource: requestParameters = dict() if requestParameters is None else requestParameters requestModels = dict() if requestModels is None else requestModels conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) method = conn.put_method(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod, authorizationType=str(authorizationType), apiKeyRequired=apiKeyRequired, # future lint: disable=blacklisted-function requestParameters=requestParameters, requestModels=requestModels) return {'created': True, 'method': method} return {'created': False, 'error': 'Failed to create method'} except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)}
Creates API method for a resource in the given API CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_method restApiId resourcePath, httpMethod, authorizationType, \\ apiKeyRequired=False, requestParameters='{"name", "value"}', requestModels='{"content-type", "value"}'