code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def labels(ctx): """Crate or update labels in github """ config = ctx.obj['agile'] repos = config.get('repositories') labels = config.get('labels') if not isinstance(repos, list): raise CommandError( 'You need to specify the "repos" list in the config' ) if not isinstance(labels, dict): raise CommandError( 'You need to specify the "labels" dictionary in the config' ) git = GithubApi() for repo in repos: repo = git.repo(repo) for label, color in labels.items(): if repo.label(label, color): click.echo('Created label "%s" @ %s' % (label, repo)) else: click.echo('Updated label "%s" @ %s' % (label, repo))
Crate or update labels in github
def run_lldptool(self, args): """Function for invoking the lldptool utility. """ full_args = ['lldptool'] + args try: return utils.execute(full_args, root_helper=self.root_helper) except Exception as exc: LOG.error("Unable to execute %(cmd)s. " "Exception: %(exception)s", {'cmd': full_args, 'exception': str(exc)})
Function for invoking the lldptool utility.
def cfrom(self): """ The initial character position in the surface string. Defaults to -1 if there is no valid cfrom value. """ cfrom = -1 try: if self.lnk.type == Lnk.CHARSPAN: cfrom = self.lnk.data[0] except AttributeError: pass # use default cfrom of -1 return cfrom
The initial character position in the surface string. Defaults to -1 if there is no valid cfrom value.
def name_suggest(q=None, datasetKey=None, rank=None, limit=100, offset=None, **kwargs): ''' A quick and simple autocomplete service that returns up to 20 name usages by doing prefix matching against the scientific name. Results are ordered by relevance. :param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*`` (Required) :param datasetKey: [str] Filters by the checklist dataset key (a uuid, see examples) :param rank: [str] A taxonomic rank. One of ``class``, ``cultivar``, ``cultivar_group``, ``domain``, ``family``, ``form``, ``genus``, ``informal``, ``infrageneric_name``, ``infraorder``, ``infraspecific_name``, ``infrasubspecific_name``, ``kingdom``, ``order``, ``phylum``, ``section``, ``series``, ``species``, ``strain``, ``subclass``, ``subfamily``, ``subform``, ``subgenus``, ``subkingdom``, ``suborder``, ``subphylum``, ``subsection``, ``subseries``, ``subspecies``, ``subtribe``, ``subvariety``, ``superclass``, ``superfamily``, ``superorder``, ``superphylum``, ``suprageneric_name``, ``tribe``, ``unranked``, or ``variety``. :param limit: [fixnum] Number of records to return. Maximum: ``1000``. (optional) :param offset: [fixnum] Record number to start at. (optional) :return: A dictionary References: http://www.gbif.org/developer/species#searching Usage:: from pygbif import species species.name_suggest(q='Puma concolor') x = species.name_suggest(q='Puma') species.name_suggest(q='Puma', rank="genus") species.name_suggest(q='Puma', rank="subspecies") species.name_suggest(q='Puma', rank="species") species.name_suggest(q='Puma', rank="infraspecific_name") species.name_suggest(q='Puma', limit=2) ''' url = gbif_baseurl + 'species/suggest' args = {'q':q, 'rank':rank, 'offset':offset, 'limit':limit} return gbif_GET(url, args, **kwargs)
A quick and simple autocomplete service that returns up to 20 name usages by doing prefix matching against the scientific name. Results are ordered by relevance. :param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*`` (Required) :param datasetKey: [str] Filters by the checklist dataset key (a uuid, see examples) :param rank: [str] A taxonomic rank. One of ``class``, ``cultivar``, ``cultivar_group``, ``domain``, ``family``, ``form``, ``genus``, ``informal``, ``infrageneric_name``, ``infraorder``, ``infraspecific_name``, ``infrasubspecific_name``, ``kingdom``, ``order``, ``phylum``, ``section``, ``series``, ``species``, ``strain``, ``subclass``, ``subfamily``, ``subform``, ``subgenus``, ``subkingdom``, ``suborder``, ``subphylum``, ``subsection``, ``subseries``, ``subspecies``, ``subtribe``, ``subvariety``, ``superclass``, ``superfamily``, ``superorder``, ``superphylum``, ``suprageneric_name``, ``tribe``, ``unranked``, or ``variety``. :param limit: [fixnum] Number of records to return. Maximum: ``1000``. (optional) :param offset: [fixnum] Record number to start at. (optional) :return: A dictionary References: http://www.gbif.org/developer/species#searching Usage:: from pygbif import species species.name_suggest(q='Puma concolor') x = species.name_suggest(q='Puma') species.name_suggest(q='Puma', rank="genus") species.name_suggest(q='Puma', rank="subspecies") species.name_suggest(q='Puma', rank="species") species.name_suggest(q='Puma', rank="infraspecific_name") species.name_suggest(q='Puma', limit=2)
def create_introspect_response(self, uri, http_method='POST', body=None, headers=None): """Create introspect valid or invalid response If the authorization server is unable to determine the state of the token without additional information, it SHOULD return an introspection response indicating the token is not active as described in Section 2.2. """ resp_headers = { 'Content-Type': 'application/json', 'Cache-Control': 'no-store', 'Pragma': 'no-cache', } request = Request(uri, http_method, body, headers) try: self.validate_introspect_request(request) log.debug('Token introspect valid for %r.', request) except OAuth2Error as e: log.debug('Client error during validation of %r. %r.', request, e) resp_headers.update(e.headers) return resp_headers, e.json, e.status_code claims = self.request_validator.introspect_token( request.token, request.token_type_hint, request ) if claims is None: return resp_headers, json.dumps(dict(active=False)), 200 if "active" in claims: claims.pop("active") return resp_headers, json.dumps(dict(active=True, **claims)), 200
Create introspect valid or invalid response If the authorization server is unable to determine the state of the token without additional information, it SHOULD return an introspection response indicating the token is not active as described in Section 2.2.
def _instantiate_task(api, kwargs): """Create a Task object from raw kwargs""" file_id = kwargs['file_id'] kwargs['file_id'] = file_id if str(file_id).strip() else None kwargs['cid'] = kwargs['file_id'] or None kwargs['rate_download'] = kwargs['rateDownload'] kwargs['percent_done'] = kwargs['percentDone'] kwargs['add_time'] = get_utcdatetime(kwargs['add_time']) kwargs['last_update'] = get_utcdatetime(kwargs['last_update']) is_transferred = (kwargs['status'] == 2 and kwargs['move'] == 1) if is_transferred: kwargs['pid'] = api.downloads_directory.cid else: kwargs['pid'] = None del kwargs['rateDownload'] del kwargs['percentDone'] if 'url' in kwargs: if not kwargs['url']: kwargs['url'] = None else: kwargs['url'] = None task = Task(api, **kwargs) if is_transferred: task._parent = api.downloads_directory return task
Create a Task object from raw kwargs
def pk_names(cls): """ Primary key column name list. """ if cls._cache_pk_names is None: cls._cache_pk_names = cls._get_primary_key_names() return cls._cache_pk_names
Primary key column name list.
def _wait_for_process(self, pid, name): """Wait for the given process to terminate. @return tuple of exit code and resource usage """ try: logging.debug("Waiting for process %s with pid %s", name, pid) unused_pid, exitcode, ru_child = os.wait4(pid, 0) return exitcode, ru_child except OSError as e: if self.PROCESS_KILLED and e.errno == errno.EINTR: # Interrupted system call seems always to happen # if we killed the process ourselves after Ctrl+C was pressed # We can try again to get exitcode and resource usage. logging.debug("OSError %s while waiting for termination of %s (%s): %s.", e.errno, name, pid, e.strerror) try: unused_pid, exitcode, ru_child = os.wait4(pid, 0) return exitcode, ru_child except OSError: pass # original error will be handled and this ignored logging.critical("OSError %s while waiting for termination of %s (%s): %s.", e.errno, name, pid, e.strerror) return (0, None)
Wait for the given process to terminate. @return tuple of exit code and resource usage
def buy_close(id_or_ins, amount, price=None, style=None, close_today=False): """ 平卖仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None :example: .. code-block:: python #市价单将现有IF1603空仓买入平仓2张: buy_close('IF1603', 2) """ position_effect = POSITION_EFFECT.CLOSE_TODAY if close_today else POSITION_EFFECT.CLOSE return order(id_or_ins, amount, SIDE.BUY, position_effect, cal_style(price, style))
平卖仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None :example: .. code-block:: python #市价单将现有IF1603空仓买入平仓2张: buy_close('IF1603', 2)
def splitter(iterable, chunksize=60): """Split an iterable that supports indexing into chunks of 'chunksize'.""" return (iterable[0+i:chunksize+i] for i in range(0, len(iterable), chunksize))
Split an iterable that supports indexing into chunks of 'chunksize'.
def log_posterior_transit_plus_line(theta, params, model, t, flux, err_flux, priorbounds): ''' Evaluate posterior probability given proposed model parameters and the observed flux timeseries. ''' lp = _log_prior_transit_plus_line(theta, priorbounds) if not np.isfinite(lp): return -np.inf else: return ( lp + _log_likelihood_transit_plus_line( theta, params, model, t, flux, err_flux, priorbounds) )
Evaluate posterior probability given proposed model parameters and the observed flux timeseries.
def get_cameras_signal_strength(self): """Return a list of signal strength of all cameras.""" signal_strength = {} if not self.camera_properties: return None for camera in self.camera_properties: serialnum = camera.get('serialNumber') cam_strength = camera.get('signalStrength') signal_strength[serialnum] = cam_strength return signal_strength
Return a list of signal strength of all cameras.
def add(self, datapoint): """ Adds the datapoint to the tensor if room is available. """ if not self.is_full: self.set_datapoint(self.cur_index, datapoint) self.cur_index += 1
Adds the datapoint to the tensor if room is available.
def _add_chrome_arguments(self, options): """Add Chrome arguments from properties file :param options: chrome options object """ try: for pref, pref_value in dict(self.config.items('ChromeArguments')).items(): pref_value = '={}'.format(pref_value) if pref_value else '' self.logger.debug("Added chrome argument: %s%s", pref, pref_value) options.add_argument('{}{}'.format(pref, self._convert_property_type(pref_value))) except NoSectionError: pass
Add Chrome arguments from properties file :param options: chrome options object
def wait_func_accept_retry_state(wait_func): """Wrap wait function to accept "retry_state" parameter.""" if not six.callable(wait_func): return wait_func if func_takes_retry_state(wait_func): return wait_func if func_takes_last_result(wait_func): @_utils.wraps(wait_func) def wrapped_wait_func(retry_state): warn_about_non_retry_state_deprecation( 'wait', wait_func, stacklevel=4) return wait_func( retry_state.attempt_number, retry_state.seconds_since_start, last_result=retry_state.outcome, ) else: @_utils.wraps(wait_func) def wrapped_wait_func(retry_state): warn_about_non_retry_state_deprecation( 'wait', wait_func, stacklevel=4) return wait_func( retry_state.attempt_number, retry_state.seconds_since_start, ) return wrapped_wait_func
Wrap wait function to accept "retry_state" parameter.
def _get_log_level(level): """ small static method to get logging level :param str level: string of the level e.g. "INFO" :returns logging.<LEVEL>: appropriate debug level """ # default to DEBUG if level is None or level == "DEBUG": return logging.DEBUG level = level.upper() # Make debugging configurable if level == "INFO": return logging.INFO elif level == "WARNING": return logging.WARNING elif level == "CRITICAL": return logging.CRITICAL elif level == "ERROR": return logging.ERROR elif level == "FATAL": return logging.FATAL else: raise Exception("UnknownLogLevelException: enter a valid log level")
small static method to get logging level :param str level: string of the level e.g. "INFO" :returns logging.<LEVEL>: appropriate debug level
def fetch(args: List[str], env: Dict[str, str] = None, encoding: str = sys.getdefaultencoding()) -> str: """ Run a command and returns its stdout. Args: args: the command-line arguments env: the operating system environment to use encoding: the encoding to use for ``stdout`` Returns: the command's ``stdout`` output """ stdout, _ = run(args, env=env, capture_stdout=True, echo_stdout=False, encoding=encoding) log.debug(stdout) return stdout
Run a command and returns its stdout. Args: args: the command-line arguments env: the operating system environment to use encoding: the encoding to use for ``stdout`` Returns: the command's ``stdout`` output
def parse(s): """ Parse a string representing a time interval or duration into seconds, or raise an exception :param str s: a string representation of a time interval :raises ValueError: if ``s`` can't be interpreted as a duration """ parts = s.replace(',', ' ').split() if not parts: raise ValueError('Cannot parse empty string') pieces = [] for part in parts: m = PART_MATCH(part) pieces.extend(m.groups() if m else [part]) if len(pieces) == 1: pieces.append('s') if len(pieces) % 2: raise ValueError('Malformed duration %s: %s: %s' % (s, parts, pieces)) result = 0 for number, units in zip(*[iter(pieces)] * 2): number = float(number) if number < 0: raise ValueError('Durations cannot have negative components') result += number * _get_units(units) return result
Parse a string representing a time interval or duration into seconds, or raise an exception :param str s: a string representation of a time interval :raises ValueError: if ``s`` can't be interpreted as a duration
def _url_builder(url_root,api_key,path,params): """ Helper funcation to build a parameterized url. """ params['api_key'] = api_key url_end = urlencode(params) url = "%s%s%s" % (url_root,path,url_end) return url
Helper funcation to build a parameterized url.
def event_handler(event_name): """ Decorator for designating a handler for an event type. ``event_name`` must be a string representing the name of the event type. The decorated function must accept a parameter: the body of the received event, which will be a Python object that can be encoded as a JSON (dict, list, str, int, bool, float or None) :param event_name: The name of the event that will be handled. Only one handler per event name is supported by the same microservice. """ def wrapper(func): func._event_handler = True func._handled_event = event_name return func return wrapper
Decorator for designating a handler for an event type. ``event_name`` must be a string representing the name of the event type. The decorated function must accept a parameter: the body of the received event, which will be a Python object that can be encoded as a JSON (dict, list, str, int, bool, float or None) :param event_name: The name of the event that will be handled. Only one handler per event name is supported by the same microservice.
def collect(self): """ Load and save ``PageColorScheme`` for every ``PageTheme`` .. code-block:: bash static/themes/bootswatch/united/variables.scss static/themes/bootswatch/united/styles.scss """ self.ignore_patterns = [ '*.png', '*.jpg', '*.js', '*.gif', '*.ttf', '*.md', '*.rst', '*.svg'] page_themes = PageTheme.objects.all() for finder in get_finders(): for path, storage in finder.list(self.ignore_patterns): for t in page_themes: static_path = 'themes/{0}'.format(t.name.split('/')[-1]) if static_path in path: try: page_theme = PageTheme.objects.get(id=t.id) except PageTheme.DoesNotExist: raise Exception( "Run sync_themes before this command") except Exception as e: self.stdout.write( "Cannot load {} into database original error: {}".format(t, e)) # find and load skins skins_path = os.path.join( storage.path('/'.join(path.split('/')[0:-1]))) for dirpath, skins, filenames in os.walk(skins_path): for skin in [s for s in skins if s not in ['fonts']]: for skin_dirpath, skins, filenames in os.walk(os.path.join(dirpath, skin)): skin, created = PageColorScheme.objects.get_or_create( theme=page_theme, label=skin, name=skin.title()) for f in filenames: if 'styles' in f: with codecs.open(os.path.join(skin_dirpath, f)) as style_file: skin.styles = style_file.read() elif 'variables' in f: with codecs.open(os.path.join(skin_dirpath, f)) as variables_file: skin.variables = variables_file.read() skin.save() self.skins_updated += 1 self.page_themes_updated += len(page_themes)
Load and save ``PageColorScheme`` for every ``PageTheme`` .. code-block:: bash static/themes/bootswatch/united/variables.scss static/themes/bootswatch/united/styles.scss
def set_wm_wallpaper(img): """Set the wallpaper for non desktop environments.""" if shutil.which("feh"): util.disown(["feh", "--bg-fill", img]) elif shutil.which("nitrogen"): util.disown(["nitrogen", "--set-zoom-fill", img]) elif shutil.which("bgs"): util.disown(["bgs", "-z", img]) elif shutil.which("hsetroot"): util.disown(["hsetroot", "-fill", img]) elif shutil.which("habak"): util.disown(["habak", "-mS", img]) elif shutil.which("display"): util.disown(["display", "-backdrop", "-window", "root", img]) else: logging.error("No wallpaper setter found.") return
Set the wallpaper for non desktop environments.
def sync_config_tasks(self): """ Performs the first sync of a list of tasks, often defined in the config file. """ tasks_by_hash = {_hash_task(t): t for t in self.config_tasks} for task in self.all_tasks: if tasks_by_hash.get(task["hash"]): del tasks_by_hash[task["hash"]] else: self.collection.remove({"_id": task["_id"]}) log.debug("Scheduler: deleted %s" % task["hash"]) # What remains are the new ones to be inserted for h, task in tasks_by_hash.items(): task["hash"] = h task["datelastqueued"] = datetime.datetime.fromtimestamp(0) if task.get("dailytime"): # Because MongoDB can store datetimes but not times, # we add today's date to the dailytime. # The date part will be discarded in check() task["dailytime"] = datetime.datetime.combine( datetime.datetime.utcnow(), task["dailytime"]) task["interval"] = 3600 * 24 # Avoid to queue task in check() if today dailytime is already passed if datetime.datetime.utcnow().time() > task["dailytime"].time(): task["datelastqueued"] = datetime.datetime.utcnow() self.collection.find_one_and_update({"hash": task["hash"]}, {"$set": task}, upsert=True) log.debug("Scheduler: added %s" % task["hash"])
Performs the first sync of a list of tasks, often defined in the config file.
def complete_io(self, iocb, msg): """Called by a handler to return data to the client.""" if _debug: IOQController._debug("complete_io %r %r", iocb, msg) # check to see if it is completing the active one if iocb is not self.active_iocb: raise RuntimeError("not the current iocb") # normal completion IOController.complete_io(self, iocb, msg) # no longer an active iocb self.active_iocb = None # check to see if we should wait a bit if self.wait_time: # change our state self.state = CTRL_WAITING _statelog.debug("%s %s %s" % (_strftime(), self.name, "waiting")) # schedule a call in the future task = FunctionTask(IOQController._wait_trigger, self) task.install_task(delta=self.wait_time) else: # change our state self.state = CTRL_IDLE _statelog.debug("%s %s %s" % (_strftime(), self.name, "idle")) # look for more to do deferred(IOQController._trigger, self)
Called by a handler to return data to the client.
def set_prop(self, prop, value, ef=None): """ set attributes values :param prop: :param value: :param ef: :return: """ if ef: # prop should be restricted to n_decoys, an int, the no. of decoys corresponding to a given FPF. # value is restricted to the corresponding enrichment factor and should be a float self.ef[prop] = value else: if prop == 'ensemble': # value is a tuple of strings that gives the ensemble composition self.ensemble = value elif prop == 'auc': # value is a float that gives the auc value self.auc = value
set attributes values :param prop: :param value: :param ef: :return:
def load_file(file_path, credentials=None): """Load a file from either local or gcs. Args: file_path: The target file path, which should have the prefix 'gs://' if to be loaded from gcs. credentials: Optional credential to be used to load the file from gcs. Returns: A python File object if loading file from local or a StringIO object if loading from gcs. """ if file_path.startswith('gs://'): return _load_file_from_gcs(file_path, credentials) else: return open(file_path, 'r')
Load a file from either local or gcs. Args: file_path: The target file path, which should have the prefix 'gs://' if to be loaded from gcs. credentials: Optional credential to be used to load the file from gcs. Returns: A python File object if loading file from local or a StringIO object if loading from gcs.
def _compute_MFP_matrix(self): """See Fouss et al. (2006). This is the mean-first passage time matrix. It's not a distance. Mfp[i, k] := m(k|i) in the notation of Fouss et al. (2006). This corresponds to the standard notation for transition matrices (left index initial state, right index final state, i.e. a right-stochastic matrix, with each row summing to one). """ self.MFP = np.zeros(self.Lp.shape) for i in range(self.Lp.shape[0]): for k in range(self.Lp.shape[1]): for j in range(self.Lp.shape[1]): self.MFP[i, k] += (self.Lp[i, j] - self.Lp[i, k] - self.Lp[k, j] + self.Lp[k, k]) * self.z[j] settings.mt(0, 'computed mean first passage time matrix') self.distances_dpt = self.MFP
See Fouss et al. (2006). This is the mean-first passage time matrix. It's not a distance. Mfp[i, k] := m(k|i) in the notation of Fouss et al. (2006). This corresponds to the standard notation for transition matrices (left index initial state, right index final state, i.e. a right-stochastic matrix, with each row summing to one).
def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() controller = IPAvailabilityController(plugin) return [extensions.ResourceExtension(Ip_availability.get_alias(), controller)]
Returns Ext Resources.
def load(fp, **kwargs) -> BioCCollection: """ Deserialize fp (a .read()-supporting text file or binary file containing a JSON document) to a BioCCollection object Args: fp: a file containing a JSON document **kwargs: Returns: BioCCollection: a collection """ obj = json.load(fp, **kwargs) return parse_collection(obj)
Deserialize fp (a .read()-supporting text file or binary file containing a JSON document) to a BioCCollection object Args: fp: a file containing a JSON document **kwargs: Returns: BioCCollection: a collection
def list_present(name, acl_type, acl_names=None, perms='', recurse=False, force=False): ''' Ensure a Linux ACL list is present Takes a list of acl names and add them to the given path name The acl path acl_type The type of the acl is used for it can be 'user' or 'group' acl_names The list of users or groups perms Set the permissions eg.: rwx recurse Set the permissions recursive in the path force Wipe out old permissions and ensure only the new permissions are set ''' if acl_names is None: acl_names = [] ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} _octal = {'r': 4, 'w': 2, 'x': 1, '-': 0} _octal_perms = sum([_octal.get(i, i) for i in perms]) if not os.path.exists(name): ret['comment'] = '{0} does not exist'.format(name) ret['result'] = False return ret __current_perms = __salt__['acl.getfacl'](name) if acl_type.startswith(('d:', 'default:')): _acl_type = ':'.join(acl_type.split(':')[1:]) _current_perms = __current_perms[name].get('defaults', {}) _default = True else: _acl_type = acl_type _current_perms = __current_perms[name] _default = False _origin_group = _current_perms.get('comment', {}).get('group', None) _origin_owner = _current_perms.get('comment', {}).get('owner', None) _current_acl_types = [] diff_perms = False for key in _current_perms[acl_type]: for current_acl_name in key.keys(): _current_acl_types.append(current_acl_name.encode('utf-8')) diff_perms = _octal_perms == key[current_acl_name]['octal'] if acl_type == 'user': try: _current_acl_types.remove(_origin_owner) except ValueError: pass else: try: _current_acl_types.remove(_origin_group) except ValueError: pass diff_acls = set(_current_acl_types) ^ set(acl_names) if not diff_acls and diff_perms and not force: ret = {'name': name, 'result': True, 'changes': {}, 'comment': 'Permissions and {}s are in the desired state'.format(acl_type)} return ret # The getfacl execution module lists default with empty names as being # applied to the user/group that owns the file, e.g., # default:group::rwx would be listed as default:group:root:rwx # In this case, if acl_name is empty, we really want to search for root # but still uses '' for other # We search through the dictionary getfacl returns for the owner of the # file if acl_name is empty. if acl_names == '': _search_names = __current_perms[name].get('comment').get(_acl_type, '') else: _search_names = acl_names if _current_perms.get(_acl_type, None) or _default: try: users = {} for i in _current_perms[_acl_type]: if i and next(six.iterkeys(i)) in _search_names: users.update(i) except (AttributeError, KeyError): users = None if users: changes = {} for count, search_name in enumerate(_search_names): if search_name in users: if users[search_name]['octal'] == sum([_octal.get(i, i) for i in perms]): ret['comment'] = 'Permissions are in the desired state' else: changes.update({'new': {'acl_name': ', '.join(acl_names), 'acl_type': acl_type, 'perms': _octal_perms}, 'old': {'acl_name': ', '.join(acl_names), 'acl_type': acl_type, 'perms': six.text_type(users[search_name]['octal'])}}) if __opts__['test']: ret.update({'comment': 'Updated permissions will be applied for ' '{0}: {1} -> {2}'.format( acl_names, six.text_type(users[search_name]['octal']), perms), 'result': None, 'changes': changes}) return ret try: if force: __salt__['acl.wipefacls'](name, recursive=recurse, raise_err=True) for acl_name in acl_names: __salt__['acl.modfacl'](acl_type, acl_name, perms, name, recursive=recurse, raise_err=True) ret.update({'comment': 'Updated permissions for ' '{0}'.format(acl_names), 'result': True, 'changes': changes}) except CommandExecutionError as exc: ret.update({'comment': 'Error updating permissions for ' '{0}: {1}'.format(acl_names, exc.strerror), 'result': False}) else: changes = {'new': {'acl_name': ', '.join(acl_names), 'acl_type': acl_type, 'perms': perms}} if __opts__['test']: ret.update({'comment': 'New permissions will be applied for ' '{0}: {1}'.format(acl_names, perms), 'result': None, 'changes': changes}) ret['result'] = None return ret try: if force: __salt__['acl.wipefacls'](name, recursive=recurse, raise_err=True) for acl_name in acl_names: __salt__['acl.modfacl'](acl_type, acl_name, perms, name, recursive=recurse, raise_err=True) ret.update({'comment': 'Applied new permissions for ' '{0}'.format(', '.join(acl_names)), 'result': True, 'changes': changes}) except CommandExecutionError as exc: ret.update({'comment': 'Error updating permissions for {0}: ' '{1}'.format(acl_names, exc.strerror), 'result': False}) else: changes = {'new': {'acl_name': ', '.join(acl_names), 'acl_type': acl_type, 'perms': perms}} if __opts__['test']: ret.update({'comment': 'New permissions will be applied for ' '{0}: {1}'.format(acl_names, perms), 'result': None, 'changes': changes}) ret['result'] = None return ret try: if force: __salt__['acl.wipefacls'](name, recursive=recurse, raise_err=True) for acl_name in acl_names: __salt__['acl.modfacl'](acl_type, acl_name, perms, name, recursive=recurse, raise_err=True) ret.update({'comment': 'Applied new permissions for ' '{0}'.format(', '.join(acl_names)), 'result': True, 'changes': changes}) except CommandExecutionError as exc: ret.update({'comment': 'Error updating permissions for {0}: ' '{1}'.format(acl_names, exc.strerror), 'result': False}) else: ret['comment'] = 'ACL Type does not exist' ret['result'] = False return ret
Ensure a Linux ACL list is present Takes a list of acl names and add them to the given path name The acl path acl_type The type of the acl is used for it can be 'user' or 'group' acl_names The list of users or groups perms Set the permissions eg.: rwx recurse Set the permissions recursive in the path force Wipe out old permissions and ensure only the new permissions are set
def _parse_title(dom, details): """ Parse title/name of the book. Args: dom (obj): HTMLElement containing whole HTML page. details (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title. Raises: AssertionError: If title not found. """ title = details.find("h1") # if the header is missing, try to parse title from the <title> tag if not title: title = dom.find("title") assert title, "Can't find <title> tag!" return title[0].getContent().split("|")[0].strip() return title[0].getContent().strip()
Parse title/name of the book. Args: dom (obj): HTMLElement containing whole HTML page. details (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title. Raises: AssertionError: If title not found.
def get_dimensions_units(names): """Create dictionary of unit dimensions.""" dimensions_uni = {} for name in names: key = get_key_from_dimensions(names[name].dimensions) dimensions_uni[key] = names[name] plain_dimensions = [{'base': name, 'power': 1}] key = get_key_from_dimensions(plain_dimensions) dimensions_uni[key] = names[name] if not names[name].dimensions: names[name].dimensions = plain_dimensions names[name].dimensions = [{'base': names[i['base']].name, 'power': i['power']} for i in names[name].dimensions] return dimensions_uni
Create dictionary of unit dimensions.
def use_comparative_activity_view(self): """Pass through to provider ActivityLookupSession.use_comparative_activity_view""" self._object_views['activity'] = COMPARATIVE # self._get_provider_session('activity_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_activity_view() except AttributeError: pass
Pass through to provider ActivityLookupSession.use_comparative_activity_view
def _urljoin(base, url): """ Join relative URLs to base URLs like urllib.parse.urljoin but support arbitrary URIs (esp. 'http+unix://'). """ parsed = urlparse(base) scheme = parsed.scheme return urlparse( urljoin(parsed._replace(scheme='http').geturl(), url) )._replace(scheme=scheme).geturl()
Join relative URLs to base URLs like urllib.parse.urljoin but support arbitrary URIs (esp. 'http+unix://').
def VerifyMessageSignature(self, unused_response_comms, packed_message_list, cipher, cipher_verified, api_version, remote_public_key): """Verify the message list signature. This is the way the messages are verified in the client. In the client we also check that the nonce returned by the server is correct (the timestamp doubles as a nonce). If the nonce fails we deem the response unauthenticated since it might have resulted from a replay attack. Args: packed_message_list: The PackedMessageList rdfvalue from the server. cipher: The cipher belonging to the remote end. cipher_verified: If True, the cipher's signature is not verified again. api_version: The api version we should use. remote_public_key: The public key of the source. Returns: An rdf_flows.GrrMessage.AuthorizationState. Raises: DecryptionError: if the message is corrupt. """ # This is not used atm since we only support a single api version (3). _ = api_version result = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED if cipher_verified or cipher.VerifyCipherSignature(remote_public_key): stats_collector_instance.Get().IncrementCounter( "grr_authenticated_messages") result = rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED # Check for replay attacks. We expect the server to return the same # timestamp nonce we sent. if packed_message_list.timestamp != self.timestamp: # pytype: disable=attribute-error result = rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED if not cipher.cipher_metadata: # Fake the metadata cipher.cipher_metadata = rdf_flows.CipherMetadata( source=packed_message_list.source) return result
Verify the message list signature. This is the way the messages are verified in the client. In the client we also check that the nonce returned by the server is correct (the timestamp doubles as a nonce). If the nonce fails we deem the response unauthenticated since it might have resulted from a replay attack. Args: packed_message_list: The PackedMessageList rdfvalue from the server. cipher: The cipher belonging to the remote end. cipher_verified: If True, the cipher's signature is not verified again. api_version: The api version we should use. remote_public_key: The public key of the source. Returns: An rdf_flows.GrrMessage.AuthorizationState. Raises: DecryptionError: if the message is corrupt.
async def parse_target(self, runtime, target_str): '''A target is a pipeline of a module into zero or more rules, and each module and rule can itself be scoped with zero or more module names.''' pipeline_parts = target_str.split(RULE_SEPARATOR) module = await self.resolve_module(runtime, pipeline_parts[0], target_str) rules = [] for part in pipeline_parts[1:]: rule = await self.resolve_rule(runtime, part) rules.append(rule) return module, tuple(rules)
A target is a pipeline of a module into zero or more rules, and each module and rule can itself be scoped with zero or more module names.
def main(argv): """ Identify the release type and create a new target file with TOML header. Requires three arguments. """ source, target, tag = argv if "a" in tag: bump = "alpha" if "b" in tag: bump = "beta" else: bump = find_bump(target, tag) filename = "{}.md".format(tag) destination = copy(join(source, filename), target) build_hugo_md(destination, tag, bump)
Identify the release type and create a new target file with TOML header. Requires three arguments.
def transform_properties(properties, schema): """Transform properties types according to a schema. Parameters ---------- properties : dict Properties to transform. schema : dict Fiona schema containing the types. """ new_properties = properties.copy() for prop_value, (prop_name, prop_type) in zip(new_properties.values(), schema["properties"].items()): if prop_value is None: continue elif prop_type == "time": new_properties[prop_name] = parse_date(prop_value).time() elif prop_type == "date": new_properties[prop_name] = parse_date(prop_value).date() elif prop_type == "datetime": new_properties[prop_name] = parse_date(prop_value) return new_properties
Transform properties types according to a schema. Parameters ---------- properties : dict Properties to transform. schema : dict Fiona schema containing the types.
def pointer_gate(num_qubits, U): """ Make a pointer gate on `num_qubits`. The one-qubit gate U will act on the qubit addressed by the pointer qubits interpreted as an unsigned binary integer. There are P = floor(lg(num_qubits)) pointer qubits, and qubits numbered N - 1 N - 2 ... N - P are those reserved to represent the pointer. The first N - P qubits are the qubits which the one-qubit gate U can act on. """ ptr_bits = int(floor(np.log2(num_qubits))) data_bits = num_qubits - ptr_bits ptr_state = 0 assert ptr_bits > 0 program = pq.Program() program.defgate("CU", controlled(ptr_bits, U)) for _, target_qubit, changed in gray(ptr_bits): if changed is None: for ptr_qubit in range(num_qubits - ptr_bits, num_qubits): program.inst(X(ptr_qubit)) ptr_state ^= 1 << (ptr_qubit - data_bits) else: program.inst(X(data_bits + changed)) ptr_state ^= 1 << changed if target_qubit < data_bits: control_qubits = tuple(data_bits + i for i in range(ptr_bits)) program.inst(("CU",) + control_qubits + (target_qubit,)) fixup(program, data_bits, ptr_bits, ptr_state) return program
Make a pointer gate on `num_qubits`. The one-qubit gate U will act on the qubit addressed by the pointer qubits interpreted as an unsigned binary integer. There are P = floor(lg(num_qubits)) pointer qubits, and qubits numbered N - 1 N - 2 ... N - P are those reserved to represent the pointer. The first N - P qubits are the qubits which the one-qubit gate U can act on.
def configure(self, config): """ Configures component by passing configuration parameters. :param config: configuration parameters to be set. """ credentials = CredentialParams.many_from_config(config) for credential in credentials: self._credentials.append(credential)
Configures component by passing configuration parameters. :param config: configuration parameters to be set.
def sample_std(self, f, *args, **kwargs): r"""Sample standard deviation of numerical method f over all samples Calls f(\*args, \*\*kwargs) on all samples and computes the standard deviation. f must return a numerical value or an ndarray. Parameters ---------- f : method reference or name (str) Model method to be evaluated for each model sample args : arguments Non-keyword arguments to be passed to the method in each call kwargs : keyword-argments Keyword arguments to be passed to the method in each call Returns ------- std : float or ndarray standard deviation or array of standard deviations """ vals = self.sample_f(f, *args, **kwargs) return _np.std(vals, axis=0)
r"""Sample standard deviation of numerical method f over all samples Calls f(\*args, \*\*kwargs) on all samples and computes the standard deviation. f must return a numerical value or an ndarray. Parameters ---------- f : method reference or name (str) Model method to be evaluated for each model sample args : arguments Non-keyword arguments to be passed to the method in each call kwargs : keyword-argments Keyword arguments to be passed to the method in each call Returns ------- std : float or ndarray standard deviation or array of standard deviations
def package(self, value): """ Setter for **self.__package** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "package", value) self.__package = value
Setter for **self.__package** attribute. :param value: Attribute value. :type value: unicode
def get_bitcoind_config(config_file=None, impl=None): """ Set bitcoind options globally. Call this before trying to talk to bitcoind. """ loaded = False bitcoind_server = None bitcoind_port = None bitcoind_user = None bitcoind_passwd = None bitcoind_timeout = None bitcoind_regtest = None bitcoind_p2p_port = None bitcoind_spv_path = None regtest = None if config_file is not None: parser = SafeConfigParser() parser.read(config_file) if parser.has_section('bitcoind'): if parser.has_option('bitcoind', 'server'): bitcoind_server = parser.get('bitcoind', 'server') if parser.has_option('bitcoind', 'port'): bitcoind_port = int(parser.get('bitcoind', 'port')) if parser.has_option('bitcoind', 'p2p_port'): bitcoind_p2p_port = int(parser.get('bitcoind', 'p2p_port')) if parser.has_option('bitcoind', 'user'): bitcoind_user = parser.get('bitcoind', 'user') if parser.has_option('bitcoind', 'passwd'): bitcoind_passwd = parser.get('bitcoind', 'passwd') if parser.has_option('bitcoind', 'spv_path'): bitcoind_spv_path = parser.get('bitcoind', 'spv_path') if parser.has_option('bitcoind', 'regtest'): regtest = parser.get('bitcoind', 'regtest') else: regtest = 'no' if parser.has_option('bitcoind', 'timeout'): bitcoind_timeout = float(parser.get('bitcoind', 'timeout')) if regtest.lower() in ["yes", "y", "true", "1", "on"]: bitcoind_regtest = True else: bitcoind_regtest = False loaded = True if not loaded: bitcoind_server = 'bitcoin.blockstack.com' bitcoind_port = 8332 bitcoind_user = 'blockstack' bitcoind_passwd = 'blockstacksystem' bitcoind_regtest = False bitcoind_timeout = 300 bitcoind_p2p_port = 8333 bitcoind_spv_path = os.path.expanduser("~/.virtualchain-spv-headers.dat") default_bitcoin_opts = { "bitcoind_user": bitcoind_user, "bitcoind_passwd": bitcoind_passwd, "bitcoind_server": bitcoind_server, "bitcoind_port": bitcoind_port, "bitcoind_timeout": bitcoind_timeout, "bitcoind_regtest": bitcoind_regtest, "bitcoind_p2p_port": bitcoind_p2p_port, "bitcoind_spv_path": bitcoind_spv_path } return default_bitcoin_opts
Set bitcoind options globally. Call this before trying to talk to bitcoind.
def compute_bgband (evtpath, srcreg, bkgreg, ebins, env=None): """Compute background information for a source in one or more energy bands. evtpath Path to a CIAO events file srcreg String specifying the source region to consider; use 'region(path.reg)' if you have the region saved in a file. bkgreg String specifying the background region to consider; same format as srcreg ebins Iterable of 2-tuples giving low and high bounds of the energy bins to consider, measured in eV. env An optional CiaoEnvironment instance; default settings are used if unspecified. Returns a DataFrame containing at least the following columns: elo The low bound of this energy bin, in eV. ehi The high bound of this energy bin, in eV. ewidth The width of the bin in eV; simply `abs(ehi - elo)`. nsrc The number of events within the specified source region and energy range. nbkg The number of events within the specified background region and energy range. nbkg_scaled The number of background events scaled to the source area; not an integer. nsrc_subbed The estimated number of non-background events in the source region; simply `nsrc - nbkg_scaled`. log_prob_bkg The logarithm of the probability that all counts in the source region are due to background events. src_sigma The confidence of source detection in sigma inferred from log_prob_bkg. The probability of backgrounditude is computed as: b^s * exp (-b) / s! where `b` is `nbkg_scaled` and `s` is `nsrc`. The confidence of source detection is computed as: sqrt(2) * erfcinv (prob_bkg) where `erfcinv` is the inverse complementary error function. """ import numpy as np import pandas as pd from scipy.special import erfcinv, gammaln if env is None: from . import CiaoEnvironment env = CiaoEnvironment () srcarea = get_region_area (env, evtpath, srcreg) bkgarea = get_region_area (env, evtpath, bkgreg) srccounts = [count_events (env, evtpath, '[sky=%s][energy=%d:%d]' % (srcreg, elo, ehi)) for elo, ehi in ebins] bkgcounts = [count_events (env, evtpath, '[sky=%s][energy=%d:%d]' % (bkgreg, elo, ehi)) for elo, ehi in ebins] df = pd.DataFrame ({ 'elo': [t[0] for t in ebins], 'ehi': [t[1] for t in ebins], 'nsrc': srccounts, 'nbkg': bkgcounts }) df['ewidth'] = np.abs (df['ehi'] - df['elo']) df['nbkg_scaled'] = df['nbkg'] * srcarea / bkgarea df['log_prob_bkg'] = df['nsrc'] * np.log (df['nbkg_scaled']) - df['nbkg_scaled'] - gammaln (df['nsrc'] + 1) df['src_sigma'] = np.sqrt (2) * erfcinv (np.exp (df['log_prob_bkg'])) df['nsrc_subbed'] = df['nsrc'] - df['nbkg_scaled'] return df
Compute background information for a source in one or more energy bands. evtpath Path to a CIAO events file srcreg String specifying the source region to consider; use 'region(path.reg)' if you have the region saved in a file. bkgreg String specifying the background region to consider; same format as srcreg ebins Iterable of 2-tuples giving low and high bounds of the energy bins to consider, measured in eV. env An optional CiaoEnvironment instance; default settings are used if unspecified. Returns a DataFrame containing at least the following columns: elo The low bound of this energy bin, in eV. ehi The high bound of this energy bin, in eV. ewidth The width of the bin in eV; simply `abs(ehi - elo)`. nsrc The number of events within the specified source region and energy range. nbkg The number of events within the specified background region and energy range. nbkg_scaled The number of background events scaled to the source area; not an integer. nsrc_subbed The estimated number of non-background events in the source region; simply `nsrc - nbkg_scaled`. log_prob_bkg The logarithm of the probability that all counts in the source region are due to background events. src_sigma The confidence of source detection in sigma inferred from log_prob_bkg. The probability of backgrounditude is computed as: b^s * exp (-b) / s! where `b` is `nbkg_scaled` and `s` is `nsrc`. The confidence of source detection is computed as: sqrt(2) * erfcinv (prob_bkg) where `erfcinv` is the inverse complementary error function.
def common_log(environ, response, response_time=None): """ Given the WSGI environ and the response, log this event in Common Log Format. """ logger = logging.getLogger() if response_time: formatter = ApacheFormatter(with_response_time=True) try: log_entry = formatter(response.status_code, environ, len(response.content), rt_us=response_time) except TypeError: # Upstream introduced a very annoying breaking change on the rt_ms/rt_us kwarg. log_entry = formatter(response.status_code, environ, len(response.content), rt_ms=response_time) else: formatter = ApacheFormatter(with_response_time=False) log_entry = formatter(response.status_code, environ, len(response.content)) logger.info(log_entry) return log_entry
Given the WSGI environ and the response, log this event in Common Log Format.
def to_repr(self: Variable, values, brackets1d: Optional[bool] = False) \ -> str: """Return a valid string representation for the given |Variable| object. Function |to_repr| it thought for internal purposes only, more specifically for defining string representations of subclasses of class |Variable| like the following: >>> from hydpy.core.variabletools import to_repr, Variable >>> class Var(Variable): ... NDIM = 0 ... TYPE = int ... __hydpy__connect_variable2subgroup__ = None ... initinfo = 1.0, False >>> var = Var(None) >>> var.value = 2 >>> var var(2) The following examples demonstrate all covered cases. Note that option `brackets1d` allows choosing between a "vararg" and an "iterable" string representation for 1-dimensional variables (the first one being the default): >>> print(to_repr(var, 2)) var(2) >>> Var.NDIM = 1 >>> var = Var(None) >>> var.shape = 3 >>> print(to_repr(var, range(3))) var(0, 1, 2) >>> print(to_repr(var, range(3), True)) var([0, 1, 2]) >>> print(to_repr(var, range(30))) var(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29) >>> print(to_repr(var, range(30), True)) var([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) >>> Var.NDIM = 2 >>> var = Var(None) >>> var.shape = (2, 3) >>> print(to_repr(var, [range(3), range(3, 6)])) var([[0, 1, 2], [3, 4, 5]]) >>> print(to_repr(var, [range(30), range(30, 60)])) var([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59]]) """ prefix = f'{self.name}(' if isinstance(values, str): string = f'{self.name}({values})' elif self.NDIM == 0: string = f'{self.name}({objecttools.repr_(values)})' elif self.NDIM == 1: if brackets1d: string = objecttools.assignrepr_list(values, prefix, 72) + ')' else: string = objecttools.assignrepr_values( values, prefix, 72) + ')' else: string = objecttools.assignrepr_list2(values, prefix, 72) + ')' return '\n'.join(self.commentrepr + [string])
Return a valid string representation for the given |Variable| object. Function |to_repr| it thought for internal purposes only, more specifically for defining string representations of subclasses of class |Variable| like the following: >>> from hydpy.core.variabletools import to_repr, Variable >>> class Var(Variable): ... NDIM = 0 ... TYPE = int ... __hydpy__connect_variable2subgroup__ = None ... initinfo = 1.0, False >>> var = Var(None) >>> var.value = 2 >>> var var(2) The following examples demonstrate all covered cases. Note that option `brackets1d` allows choosing between a "vararg" and an "iterable" string representation for 1-dimensional variables (the first one being the default): >>> print(to_repr(var, 2)) var(2) >>> Var.NDIM = 1 >>> var = Var(None) >>> var.shape = 3 >>> print(to_repr(var, range(3))) var(0, 1, 2) >>> print(to_repr(var, range(3), True)) var([0, 1, 2]) >>> print(to_repr(var, range(30))) var(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29) >>> print(to_repr(var, range(30), True)) var([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]) >>> Var.NDIM = 2 >>> var = Var(None) >>> var.shape = (2, 3) >>> print(to_repr(var, [range(3), range(3, 6)])) var([[0, 1, 2], [3, 4, 5]]) >>> print(to_repr(var, [range(30), range(30, 60)])) var([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59]])
def profile(self, frame, event, arg): #PYCHOK arg requ. to match signature """Profiling method used to profile matching codepoints and events.""" if (self.events == None) or (event in self.events): frame_info = inspect.getframeinfo(frame) cp = (frame_info[0], frame_info[2], frame_info[1]) if self.codepoint_included(cp): objects = muppy.get_objects() size = muppy.get_size(objects) if cp not in self.memories: self.memories[cp] = [0,0,0,0] self.memories[cp][0] = 1 self.memories[cp][1] = size self.memories[cp][2] = size else: self.memories[cp][0] += 1 if self.memories[cp][1] > size: self.memories[cp][1] = size if self.memories[cp][2] < size: self.memories[cp][2] = size
Profiling method used to profile matching codepoints and events.
def expire_at(self, key, _time): """ Sets the expiration time of @key to @_time @_time: absolute Unix timestamp (seconds since January 1, 1970) """ return self._client.expireat(self.get_key(key), round(_time))
Sets the expiration time of @key to @_time @_time: absolute Unix timestamp (seconds since January 1, 1970)
def parse_timespan(timedef): """ Convert a string timespan definition to seconds, for example converting '1m30s' to 90. If *timedef* is already an int, the value will be returned unmodified. :param timedef: The timespan definition to convert to seconds. :type timedef: int, str :return: The converted value in seconds. :rtype: int """ if isinstance(timedef, int): return timedef converter_order = ('w', 'd', 'h', 'm', 's') converters = { 'w': 604800, 'd': 86400, 'h': 3600, 'm': 60, 's': 1 } timedef = timedef.lower() if timedef.isdigit(): return int(timedef) elif len(timedef) == 0: return 0 seconds = -1 for spec in converter_order: timedef = timedef.split(spec) if len(timedef) == 1: timedef = timedef[0] continue elif len(timedef) > 2 or not timedef[0].isdigit(): seconds = -1 break adjustment = converters[spec] seconds = max(seconds, 0) seconds += (int(timedef[0]) * adjustment) timedef = timedef[1] if not len(timedef): break if seconds < 0: raise ValueError('invalid time format') return seconds
Convert a string timespan definition to seconds, for example converting '1m30s' to 90. If *timedef* is already an int, the value will be returned unmodified. :param timedef: The timespan definition to convert to seconds. :type timedef: int, str :return: The converted value in seconds. :rtype: int
def get(self, entry): """ get the value from vault secret backend """ if self.apiVersion == 1: path = self.secretsmount + '/' + entry else: path = self.secretsmount + '/data/' + entry # note that the HTTP path contains v1 for both versions of the key-value # secret engine. Different versions of the key-value engine are # effectively separate secret engines in vault, with the same base HTTP # API, but with different paths within it. proj = yield self._http.get('/v1/{0}'.format(path)) code = yield proj.code if code != 200: raise KeyError("The key %s does not exist in Vault provider: request" " return code:%d." % (entry, code)) json = yield proj.json() if self.apiVersion == 1: ret = json.get('data', {}).get('value') else: ret = json.get('data', {}).get('data', {}).get('value') return ret
get the value from vault secret backend
def get_database_data(file_name=''): """return the energy (eV) and Sigma (barn) from the file_name Parameters: =========== file_name: string ('' by default) name of csv file Returns: ======== pandas dataframe Raises: ======= IOError if file does not exist """ if not os.path.exists(file_name): raise IOError("File {} does not exist!".format(file_name)) df = pd.read_csv(file_name, header=1) return df
return the energy (eV) and Sigma (barn) from the file_name Parameters: =========== file_name: string ('' by default) name of csv file Returns: ======== pandas dataframe Raises: ======= IOError if file does not exist
def addcomment(self, order_increment_id, status, comment=None, notify=False): """ Add comment to order or change its state :param order_increment_id: Order ID TODO: Identify possible values for status """ if comment is None: comment = "" return bool(self.call( 'sales_order.addComment', [order_increment_id, status, comment, notify] ) )
Add comment to order or change its state :param order_increment_id: Order ID TODO: Identify possible values for status
def do_catch_fedora_errors(parser, token): """Catches fedora errors between ``{% fedora_access %}`` and ``{% end_fedora_access %}``. Template designers may specify optional ``{% permission_denied %}`` and ``{% fedora_failed %}`` sections with fallback content in case of permission or other errors while rendering the main block. Note that when Django's ``TEMPLATE_DEBUG`` setting is on, it precludes all error handling and displays the Django exception screen for all errors, including fedora errors, even if you use this template tag. Turn off ``TEMPLATE_DEBUG`` if that debug screen is getting in the way of the use of {% fedora_access %}. """ END_TAGS = ('end_fedora_access', 'permission_denied', 'fedora_failed') blocks = {} blocks['fedora_access'] = parser.parse(END_TAGS) token = parser.next_token() while token.contents != 'end_fedora_access': # need to convert token.contents manually to a str. django gives us # a unicode. we use it below in **blocks, but python 2.6.2 and # earlier can't use **kwargs with unicode keys. (2.6.5 is ok with # it; not sure about intervening versions.) in any case, direct # conversion to string is safe here (i.e., no encoding needed) # because the parser guarantees it's one of our END_TAGS, which are # all ascii. current_block = str(token.contents) if current_block in blocks: raise template.TemplateSyntaxError( current_block + ' may appear only once in a fedora_access block') blocks[current_block] = parser.parse(END_TAGS) token = parser.next_token() return CatchFedoraErrorsNode(**blocks)
Catches fedora errors between ``{% fedora_access %}`` and ``{% end_fedora_access %}``. Template designers may specify optional ``{% permission_denied %}`` and ``{% fedora_failed %}`` sections with fallback content in case of permission or other errors while rendering the main block. Note that when Django's ``TEMPLATE_DEBUG`` setting is on, it precludes all error handling and displays the Django exception screen for all errors, including fedora errors, even if you use this template tag. Turn off ``TEMPLATE_DEBUG`` if that debug screen is getting in the way of the use of {% fedora_access %}.
def _pre_run_checks(self, stream=sys.stdout, dry_run=False): """Do some checks before running this link This checks if input and output files are present. If input files are missing this will raise `OSError` if dry_run is False If all output files are present this return False. Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function dry_run : bool Print command but do not run it Returns ------- status : bool True if it is ok to proceed with running the link """ input_missing = self.check_input_files(return_found=False) if input_missing: if dry_run: stream.write("Input files are missing: %s: %i\n" % (self.linkname, len(input_missing))) else: print (self.args) raise OSError("Input files are missing: %s" % input_missing) output_found, output_missing = self.check_output_files() if output_found and not output_missing: stream.write("All output files for %s already exist: %i %i %i\n" % (self.linkname, len(output_found), len(output_missing), len(self.files.output_files))) if dry_run: pass else: pass # return False return True
Do some checks before running this link This checks if input and output files are present. If input files are missing this will raise `OSError` if dry_run is False If all output files are present this return False. Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function dry_run : bool Print command but do not run it Returns ------- status : bool True if it is ok to proceed with running the link
def generate_nucmer_commands( filenames, outdir=".", nucmer_exe=pyani_config.NUCMER_DEFAULT, filter_exe=pyani_config.FILTER_DEFAULT, maxmatch=False, ): """Return a tuple of lists of NUCmer command-lines for ANIm The first element is a list of NUCmer commands, the second a list of delta_filter_wrapper.py commands. These are ordered such that commands are paired. The NUCmer commands should be run before the delta-filter commands. - filenames - a list of paths to input FASTA files - outdir - path to output directory - nucmer_exe - location of the nucmer binary - maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option Loop over all FASTA files generating NUCmer command lines for each pairwise comparison. """ nucmer_cmdlines, delta_filter_cmdlines = [], [] for idx, fname1 in enumerate(filenames[:-1]): for fname2 in filenames[idx + 1 :]: ncmd, dcmd = construct_nucmer_cmdline( fname1, fname2, outdir, nucmer_exe, filter_exe, maxmatch ) nucmer_cmdlines.append(ncmd) delta_filter_cmdlines.append(dcmd) return (nucmer_cmdlines, delta_filter_cmdlines)
Return a tuple of lists of NUCmer command-lines for ANIm The first element is a list of NUCmer commands, the second a list of delta_filter_wrapper.py commands. These are ordered such that commands are paired. The NUCmer commands should be run before the delta-filter commands. - filenames - a list of paths to input FASTA files - outdir - path to output directory - nucmer_exe - location of the nucmer binary - maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option Loop over all FASTA files generating NUCmer command lines for each pairwise comparison.
def os_volumes(self): """ Gets the OS Volumes API client. Returns: OsVolumes: """ if not self.__os_volumes: self.__os_volumes = OsVolumes(self.__connection) return self.__os_volumes
Gets the OS Volumes API client. Returns: OsVolumes:
def init(self, scope): """ Loads the models from the orb system into the inputted scope. :param scope | <dict> autoGenerate | <bool> schemas | [<orb.TableSchema>, ..] || None database | <str> || None """ schemas = self.schemas().values() for schema in schemas: scope[schema.name()] = schema.model()
Loads the models from the orb system into the inputted scope. :param scope | <dict> autoGenerate | <bool> schemas | [<orb.TableSchema>, ..] || None database | <str> || None
def main(): """ Entry point for the Windows loopback tool. """ parser = argparse.ArgumentParser(description='%(prog)s add/remove Windows loopback adapters') parser.add_argument('-a', "--add", nargs=3, action=parse_add_loopback(), help="add a Windows loopback adapter") parser.add_argument("-r", "--remove", action="store", help="remove a Windows loopback adapter") try: args = parser.parse_args() except argparse.ArgumentTypeError as e: raise SystemExit(e) # devcon is required to install/remove Windows loopback adapters devcon_path = shutil.which("devcon") if not devcon_path: raise SystemExit("Could not find devcon.exe") from win32com.shell import shell if not shell.IsUserAnAdmin(): raise SystemExit("You must run this script as an administrator") try: if args.add: add_loopback(devcon_path, args.add[0], args.add[1], args.add[2]) if args.remove: remove_loopback(devcon_path, args.remove) except SystemExit as e: print(e) os.system("pause")
Entry point for the Windows loopback tool.
def is_correct(self): """Check if this object configuration is correct :: * Check our own specific properties * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False :rtype: bool """ state = True # _internal_host_check is for having an host check result # without running a check plugin if self.command_name.startswith('_internal_host_check'): # Command line may contain: [state_id][;output] parameters = self.command_line.split(';') if len(parameters) < 2: self.command_name = "_internal_host_check;0;Host assumed to be UP" self.add_warning("[%s::%s] has no defined state nor output. Changed to %s" % (self.my_type, self.command_name, self.command_name)) elif len(parameters) < 3: state = 3 try: state = int(parameters[1]) except ValueError: self.add_warning("[%s::%s] required a non integer state: %s. Using 3." % (self.my_type, self.command_name, parameters[1])) if state > 4: self.add_warning("[%s::%s] required an impossible state: %d. Using 3." % (self.my_type, self.command_name, state)) output = {0: "UP", 1: "DOWN", 2: "DOWN", 3: "UNKNOWN", 4: "UNREACHABLE", }[state] self.command_name = "_internal_host_check;Host assumed to be %s" % output self.add_warning("[%s::%s] has no defined output. Changed to %s" % (self.my_type, self.command_name, self.command_name)) elif len(parameters) > 3: self.command_name = "%s;%s;%s" % (parameters[0], parameters[1], parameters[2]) self.add_warning("[%s::%s] has too many parameters. Changed to %s" % (self.my_type, self.command_name, self.command_name)) return super(Command, self).is_correct() and state
Check if this object configuration is correct :: * Check our own specific properties * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False :rtype: bool
def convert_loguniform_categorical(self, challenger_dict): """Convert the values of type `loguniform` back to their initial range Also, we convert categorical: categorical values in search space are changed to list of numbers before, those original values will be changed back in this function Parameters ---------- challenger_dict: dict challenger dict Returns ------- dict dict which stores copy of challengers """ converted_dict = {} for key, value in challenger_dict.items(): # convert to loguniform if key in self.loguniform_key: converted_dict[key] = np.exp(challenger_dict[key]) # convert categorical back to original value elif key in self.categorical_dict: idx = challenger_dict[key] converted_dict[key] = self.categorical_dict[key][idx] else: converted_dict[key] = value return converted_dict
Convert the values of type `loguniform` back to their initial range Also, we convert categorical: categorical values in search space are changed to list of numbers before, those original values will be changed back in this function Parameters ---------- challenger_dict: dict challenger dict Returns ------- dict dict which stores copy of challengers
def save_prov_to_files(self, showattributes=False): """ Write-out provn serialisation to nidm.provn. """ self.doc.add_bundle(self.bundle) # provn_file = os.path.join(self.export_dir, 'nidm.provn') # provn_fid = open(provn_file, 'w') # # FIXME None # # provn_fid.write(self.doc.get_provn(4).replace("None", "-")) # provn_fid.close() ttl_file = os.path.join(self.export_dir, 'nidm.ttl') ttl_txt = self.doc.serialize(format='rdf', rdf_format='turtle') ttl_txt, json_context = self.use_prefixes(ttl_txt) # Add namespaces to json-ld context for namespace in self.doc._namespaces.get_registered_namespaces(): json_context[namespace._prefix] = namespace._uri for namespace in \ list(self.doc._namespaces._default_namespaces.values()): json_context[namespace._prefix] = namespace._uri json_context["xsd"] = "http://www.w3.org/2000/01/rdf-schema#" # Work-around to issue with INF value in rdflib (reported in # https://github.com/RDFLib/rdflib/pull/655) ttl_txt = ttl_txt.replace(' inf ', ' "INF"^^xsd:float ') with open(ttl_file, 'w') as ttl_fid: ttl_fid.write(ttl_txt) # print(json_context) jsonld_file = os.path.join(self.export_dir, 'nidm.json') jsonld_txt = self.doc.serialize(format='rdf', rdf_format='json-ld', context=json_context) with open(jsonld_file, 'w') as jsonld_fid: jsonld_fid.write(jsonld_txt) # provjsonld_file = os.path.join(self.export_dir, 'nidm.provjsonld') # provjsonld_txt = self.doc.serialize(format='jsonld') # with open(provjsonld_file, 'w') as provjsonld_fid: # provjsonld_fid.write(provjsonld_txt) # provn_file = os.path.join(self.export_dir, 'nidm.provn') # provn_txt = self.doc.serialize(format='provn') # with open(provn_file, 'w') as provn_fid: # provn_fid.write(provn_txt) # Post-processing if not self.zipped: # Just rename temp directory to output_path os.rename(self.export_dir, self.out_dir) else: # Create a zip file that contains the content of the temp directory os.chdir(self.export_dir) zf = zipfile.ZipFile(os.path.join("..", self.out_dir), mode='w') try: for root, dirnames, filenames in os.walk("."): for filename in filenames: zf.write(os.path.join(filename)) finally: zf.close() # Need to move up before deleting the folder os.chdir("..") shutil.rmtree(os.path.join("..", self.export_dir))
Write-out provn serialisation to nidm.provn.
def sync_model(self, comment='', compact_central=False, release_borrowed=True, release_workset=True, save_local=False): """Append a sync model entry to the journal. This instructs Revit to sync the currently open workshared model. Args: comment (str): comment to be provided for the sync step compact_central (bool): if True compacts the central file release_borrowed (bool): if True releases the borrowed elements release_workset (bool): if True releases the borrowed worksets save_local (bool): if True saves the local file as well """ self._add_entry(templates.FILE_SYNC_START) if compact_central: self._add_entry(templates.FILE_SYNC_COMPACT) if release_borrowed: self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED) if release_workset: self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS) if save_local: self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL) self._add_entry(templates.FILE_SYNC_COMMENT_OK .format(sync_comment=comment))
Append a sync model entry to the journal. This instructs Revit to sync the currently open workshared model. Args: comment (str): comment to be provided for the sync step compact_central (bool): if True compacts the central file release_borrowed (bool): if True releases the borrowed elements release_workset (bool): if True releases the borrowed worksets save_local (bool): if True saves the local file as well
def play_station(self, station): """Play the station until something ends it This function will run forever until termintated by calling end_station. """ for song in iterate_forever(station.get_playlist): try: self.play(song) except StopIteration: self.stop() return
Play the station until something ends it This function will run forever until termintated by calling end_station.
def create_device(name, role, model, manufacturer, site): ''' .. versionadded:: 2019.2.0 Create a new device with a name, role, model, manufacturer and site. All these components need to be already in Netbox. name The name of the device, e.g., ``edge_router`` role String of device role, e.g., ``router`` model String of device model, e.g., ``MX480`` manufacturer String of device manufacturer, e.g., ``Juniper`` site String of device site, e.g., ``BRU`` CLI Example: .. code-block:: bash salt myminion netbox.create_device edge_router router MX480 Juniper BRU ''' try: nb_role = get_('dcim', 'device-roles', name=role) if not nb_role: return False nb_type = get_('dcim', 'device-types', model=model) if not nb_type: return False nb_site = get_('dcim', 'sites', name=site) if not nb_site: return False status = {'label': "Active", 'value': 1} except RequestError as e: log.error('%s, %s, %s', e.req.request.headers, e.request_body, e.error) return False payload = {'name': name, 'display_name': name, 'slug': slugify(name), 'device_type': nb_type['id'], 'device_role': nb_role['id'], 'site': nb_site['id']} new_dev = _add('dcim', 'devices', payload) if new_dev: return {'dcim': {'devices': payload}} else: return False
.. versionadded:: 2019.2.0 Create a new device with a name, role, model, manufacturer and site. All these components need to be already in Netbox. name The name of the device, e.g., ``edge_router`` role String of device role, e.g., ``router`` model String of device model, e.g., ``MX480`` manufacturer String of device manufacturer, e.g., ``Juniper`` site String of device site, e.g., ``BRU`` CLI Example: .. code-block:: bash salt myminion netbox.create_device edge_router router MX480 Juniper BRU
def get_controller_state(self): """Returns the current state of the 3d mouse, a dictionary of pos, orn, grasp, and reset.""" dpos = self.control[:3] * 0.005 roll, pitch, yaw = self.control[3:] * 0.005 self.grasp = self.control_gripper # convert RPY to an absolute orientation drot1 = rotation_matrix(angle=-pitch, direction=[1., 0, 0], point=None)[:3, :3] drot2 = rotation_matrix(angle=roll, direction=[0, 1., 0], point=None)[:3, :3] drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.], point=None)[:3, :3] self.rotation = self.rotation.dot(drot1.dot(drot2.dot(drot3))) return dict( dpos=dpos, rotation=self.rotation, grasp=self.grasp, reset=self._reset_state )
Returns the current state of the 3d mouse, a dictionary of pos, orn, grasp, and reset.
def hideEvent(self, event): """ Reimplemented to disconnect signal handlers and event filter. """ super(CallTipWidget, self).hideEvent(event) self._text_edit.cursorPositionChanged.disconnect( self._cursor_position_changed) self._text_edit.removeEventFilter(self)
Reimplemented to disconnect signal handlers and event filter.
def _init_params(self, inputs, overwrite=False): """Initialize weight parameters and auxiliary states.""" inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs] input_shapes = {item.name: item.shape for item in inputs} arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes) assert arg_shapes is not None input_dtypes = {item.name: item.dtype for item in inputs} arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes) assert arg_dtypes is not None arg_names = self.symbol.list_arguments() input_names = input_shapes.keys() param_names = [key for key in arg_names if key not in input_names] aux_names = self.symbol.list_auxiliary_states() param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes) if x[0] in param_names] arg_params = {k : nd.zeros(shape=s, dtype=t) for k, s, t in param_name_attrs} aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes) if x[0] in aux_names] aux_params = {k : nd.zeros(shape=s, dtype=t) for k, s, t in aux_name_attrs} for k, v in arg_params.items(): if self.arg_params and k in self.arg_params and (not overwrite): arg_params[k][:] = self.arg_params[k][:] else: self.initializer(k, v) for k, v in aux_params.items(): if self.aux_params and k in self.aux_params and (not overwrite): aux_params[k][:] = self.aux_params[k][:] else: self.initializer(k, v) self.arg_params = arg_params self.aux_params = aux_params return (arg_names, list(param_names), aux_names)
Initialize weight parameters and auxiliary states.
def _forward_mode(self, *args): """Forward mode differentiation for a constant""" # Evaluate inner function self.f X: np.ndarray dX: np.ndarray X, dX = self.f._forward_mode(*args) # Alias the power to p for legibility p: float = self.p # The function value val = X ** p # The derivative diff = p * X ** (p-1) * dX return (val, diff)
Forward mode differentiation for a constant
def filter(self, filters): ''' Apply filters to the pileup elements, and return a new Pileup with the filtered elements removed. Parameters ---------- filters : list of PileupElement -> bool callables A PileupUp element is retained if all filters return True when called on it. ''' new_elements = [ e for e in self.elements if all(function(e) for function in filters)] return Pileup(self.locus, new_elements)
Apply filters to the pileup elements, and return a new Pileup with the filtered elements removed. Parameters ---------- filters : list of PileupElement -> bool callables A PileupUp element is retained if all filters return True when called on it.
def load(self, model_file, save_dir, verbose=True): """Load model from file and rebuild the model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool """ if not os.path.exists(save_dir): self.logger.error("Loading failed... Directory does not exist.") try: checkpoint = torch.load(f"{save_dir}/{model_file}") except BaseException: self.logger.error( f"Loading failed... Cannot load model from {save_dir}/{model_file}" ) self.load_state_dict(checkpoint["model"]) self.settings = checkpoint["config"] self.cardinality = checkpoint["cardinality"] self.name = checkpoint["name"] if verbose: self.logger.info( f"[{self.name}] Model loaded as {model_file} in {save_dir}" )
Load model from file and rebuild the model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool
def safe_call(self, kwargs, args=None): """ Call the underlying function safely, given a set of keyword arguments. If successful, the function return value (likely None) will be returned. If the underlying function raises an exception, the return value will be the exception message, unless an argparse Namespace object defining a 'debug' attribute of True is provided; in this case, the exception will be re-raised. :param kwargs: A dictionary of keyword arguments to pass to the underlying function. :param args: If provided, this should be a Namespace object with a 'debug' attribute set to a boolean value. :returns: The function return value, or the string value of the exception raised by the function. """ # Now let's call the function try: return self._func(**kwargs) except Exception as exc: if args and getattr(args, 'debug', False): raise return str(exc)
Call the underlying function safely, given a set of keyword arguments. If successful, the function return value (likely None) will be returned. If the underlying function raises an exception, the return value will be the exception message, unless an argparse Namespace object defining a 'debug' attribute of True is provided; in this case, the exception will be re-raised. :param kwargs: A dictionary of keyword arguments to pass to the underlying function. :param args: If provided, this should be a Namespace object with a 'debug' attribute set to a boolean value. :returns: The function return value, or the string value of the exception raised by the function.
def _get_gecos(name): ''' Retrieve GECOS field info and return it in dictionary form ''' gecos_field = pwd.getpwnam(name).pw_gecos.split(',', 3) if not gecos_field: return {} else: # Assign empty strings for any unspecified trailing GECOS fields while len(gecos_field) < 4: gecos_field.append('') return {'fullname': six.text_type(gecos_field[0]), 'roomnumber': six.text_type(gecos_field[1]), 'workphone': six.text_type(gecos_field[2]), 'homephone': six.text_type(gecos_field[3])}
Retrieve GECOS field info and return it in dictionary form
def _merge_dicts(first, second): """Merge the 'second' multiple-dictionary into the 'first' one.""" new = deepcopy(first) for k, v in second.items(): if isinstance(v, dict) and v: ret = _merge_dicts(new.get(k, dict()), v) new[k] = ret else: new[k] = second[k] return new
Merge the 'second' multiple-dictionary into the 'first' one.
def random(cls, engine_or_session, limit=5): """ Return random ORM instance. :type engine_or_session: Union[Engine, Session] :type limit: int :rtype: List[ExtendedBase] """ ses, auto_close = ensure_session(engine_or_session) result = ses.query(cls).order_by(func.random()).limit(limit).all() if auto_close: # pragma: no cover ses.close() return result
Return random ORM instance. :type engine_or_session: Union[Engine, Session] :type limit: int :rtype: List[ExtendedBase]
def swd_read8(self, offset): """Gets a unit of ``8`` bits from the input buffer. Args: self (JLink): the ``JLink`` instance offset (int): the offset (in bits) from which to start reading Returns: The integer read from the input buffer. """ value = self._dll.JLINK_SWD_GetU8(offset) return ctypes.c_uint8(value).value
Gets a unit of ``8`` bits from the input buffer. Args: self (JLink): the ``JLink`` instance offset (int): the offset (in bits) from which to start reading Returns: The integer read from the input buffer.
def scatter_plot(self, ax, topic_dims, t=None, ms_limits=True, **kwargs_plot): """ 2D or 3D scatter plot. :param axes ax: matplotlib axes (use Axes3D if 3D data) :param tuple topic_dims: list of (topic, dims) tuples, where topic is a string and dims is a list of dimensions to be plotted for that topic. :param int t: time indexes to be plotted :param dict kwargs_plot: argument to be passed to matplotlib's plot function, e.g. the style of the plotted points 'or' :param bool ms_limits: if set to True, automatically set axes boundaries to the sensorimotor boundaries (default: True) """ plot_specs = {'marker': 'o', 'linestyle': 'None'} plot_specs.update(kwargs_plot) # t_bound = float('inf') # if t is None: # for topic, _ in topic_dims: # t_bound = min(t_bound, self.counts[topic]) # t = range(t_bound) # data = self.pack(topic_dims, t) data = self.data_t(topic_dims, t) ax.plot(*(data.T), **plot_specs) if ms_limits: ax.axis(self.axes_limits(topic_dims))
2D or 3D scatter plot. :param axes ax: matplotlib axes (use Axes3D if 3D data) :param tuple topic_dims: list of (topic, dims) tuples, where topic is a string and dims is a list of dimensions to be plotted for that topic. :param int t: time indexes to be plotted :param dict kwargs_plot: argument to be passed to matplotlib's plot function, e.g. the style of the plotted points 'or' :param bool ms_limits: if set to True, automatically set axes boundaries to the sensorimotor boundaries (default: True)
def humanize_time(secs): """convert second in to hh:mm:ss format """ if secs is None: return '--' if secs < 1: return "{:.2f}ms".format(secs*1000) elif secs < 10: return "{:.2f}s".format(secs) else: mins, secs = divmod(secs, 60) hours, mins = divmod(mins, 60) return '{:02d}:{:02d}:{:02d}'.format(int(hours), int(mins), int(secs))
convert second in to hh:mm:ss format
def send_message(self, message): """ Dispatch a message using 0mq """ with self._instance_lock: if message is None: Global.LOGGER.error("can't deliver a null messages") return if message.sender is None: Global.LOGGER.error(f"can't deliver anonymous messages with body {message.body}") return if message.receiver is None: Global.LOGGER.error( f"can't deliver message from {message.sender}: recipient not specified") return if message.message is None: Global.LOGGER.error(f"can't deliver message with no body from {message.sender}") return sender = "*" + message.sender + "*" self.socket.send_multipart( [bytes(sender, 'utf-8'), pickle.dumps(message)]) if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug("dispatched : " + message.sender + "-" + message.message + "-" + message.receiver) self.dispatched = self.dispatched + 1
Dispatch a message using 0mq
def get_names_owned_by_address(address, proxy=None, hostport=None): """ Get the names owned by an address. Returns the list of names on success Returns {'error': ...} on error """ assert proxy or hostport, 'Need proxy or hostport' if proxy is None: proxy = connect_hostport(hostport) owned_schema = { 'type': 'object', 'properties': { 'names': { 'type': 'array', 'items': { 'type': 'string', 'uniqueItems': True }, }, }, 'required': [ 'names', ], } schema = json_response_schema( owned_schema ) resp = {} try: resp = proxy.get_names_owned_by_address(address) resp = json_validate(schema, resp) if json_is_error(resp): return resp # names must be valid for n in resp['names']: assert is_name_valid(str(n)), ('Invalid name "{}"'.format(str(n))) except ValidationError as ve: if BLOCKSTACK_DEBUG: log.exception(ve) resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502} return resp except AssertionError as e: if BLOCKSTACK_DEBUG: log.exception(e) resp = {'error': 'Got an invalid name from the server'} return resp except socket.timeout: log.error("Connection timed out") resp = {'error': 'Connection to remote host timed out.', 'http_status': 503} return resp except socket.error as se: log.error("Connection error {}".format(se.errno)) resp = {'error': 'Connection to remote host failed.', 'http_status': 502} return resp except Exception as ee: if BLOCKSTACK_DEBUG: log.exception(ee) log.error("Caught exception while connecting to Blockstack node: {}".format(ee)) resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500} return resp return resp['names']
Get the names owned by an address. Returns the list of names on success Returns {'error': ...} on error
def get_eval_func(obj, feature, slice=np.s_[...]): """ Return the function of interest (kernel or mean) for the expectation depending on the type of :obj: and whether any features are given """ if feature is not None: # kernel + feature combination if not isinstance(feature, InducingFeature) or not isinstance(obj, kernels.Kernel): raise TypeError("If `feature` is supplied, `obj` must be a kernel.") return lambda x: tf.transpose(Kuf(feature, obj, x))[slice] elif isinstance(obj, mean_functions.MeanFunction): return lambda x: obj(x)[slice] elif isinstance(obj, kernels.Kernel): return lambda x: obj.Kdiag(x) else: raise NotImplementedError()
Return the function of interest (kernel or mean) for the expectation depending on the type of :obj: and whether any features are given
def frame_received_cb(self, frame): """Received message.""" PYVLXLOG.debug("REC: %s", frame) for frame_received_cb in self.frame_received_cbs: # pylint: disable=not-callable self.loop.create_task(frame_received_cb(frame))
Received message.
def pop(self): """Removes the topmost item from the stack, will return the old value or `None` if the stack was already empty. """ stack = getattr(self._local, "stack", None) if stack is None: return None elif len(stack) == 1: release_local(self._local) return stack[-1] else: return stack.pop()
Removes the topmost item from the stack, will return the old value or `None` if the stack was already empty.
def from_ymd_to_excel(year, month, day): """ converts date as `(year, month, day)` tuple into Microsoft Excel representation style :param tuple(int, int, int): int tuple `year, month, day` :return int: """ if not is_valid_ymd(year, month, day): raise ValueError("Invalid date {0}.{1}.{2}".format(year, month, day)) days = _cum_month_days[month - 1] + day days += 1 if (is_leap_year(year) and month > 2) else 0 years_distance = year - 1900 days += years_distance * 365 + \ (years_distance + 3) // 4 - (years_distance + 99) // 100 + (years_distance + 299) // 400 # count days since 30.12.1899 (excluding 30.12.1899) (workaround for excel bug) days += 1 if (year, month, day) > (1900, 2, 28) else 0 return days
converts date as `(year, month, day)` tuple into Microsoft Excel representation style :param tuple(int, int, int): int tuple `year, month, day` :return int:
def warn(message, category=None, stacklevel=1, emitstacklevel=1): """Issue a warning, or maybe ignore it or raise an exception. Duplicate of the standard library warn function except it takes the following argument: `emitstacklevel` : default to 1, number of stackframe to consider when matching the module that emits this warning. """ # Check if message is already a Warning object #################### ### Get category ### #################### if isinstance(message, Warning): category = message.__class__ # Check category argument if category is None: category = UserWarning if not (isinstance(category, type) and issubclass(category, Warning)): raise TypeError("category must be a Warning subclass, " "not '{:s}'".format(type(category).__name__)) # Get context information try: frame = _get_stack_frame(stacklevel) except ValueError: globals = sys.__dict__ lineno = 1 else: globals = frame.f_globals lineno = frame.f_lineno try: eframe = _get_stack_frame(emitstacklevel) except ValueError: eglobals = sys.__dict__ else: eglobals = eframe.f_globals if '__name__' in eglobals: emodule = eglobals['__name__'] else: emodule = "<string>" #################### ### Get Filename ### #################### if '__name__' in globals: module = globals['__name__'] else: module = "<string>" #################### ### Get Filename ### #################### filename = globals.get('__file__') if filename: fnl = filename.lower() if fnl.endswith(".pyc"): filename = filename[:-1] else: if module == "__main__": try: filename = sys.argv[0] except AttributeError: # embedded interpreters don't have sys.argv, see bug #839151 filename = '__main__' if not filename: filename = module registry = globals.setdefault("__warningregistry__", {}) warn_explicit(message, category, filename, lineno, module, registry, globals, emit_module=emodule)
Issue a warning, or maybe ignore it or raise an exception. Duplicate of the standard library warn function except it takes the following argument: `emitstacklevel` : default to 1, number of stackframe to consider when matching the module that emits this warning.
def interpret(self, values={}, functions={}): """Like `substitute`, but forces the interpreter (rather than the compiled version) to be used. The interpreter includes exception-handling code for missing variables and buggy template functions but is much slower. """ return self.expr.evaluate(Environment(values, functions))
Like `substitute`, but forces the interpreter (rather than the compiled version) to be used. The interpreter includes exception-handling code for missing variables and buggy template functions but is much slower.
def _group_paths_without_options(cls, line_parse_result): """ Given a parsed options specification as a list of groups, combine groups without options with the first subsequent group which has options. A line of the form 'A B C [opts] D E [opts_2]' results in [({A, B, C}, [opts]), ({D, E}, [opts_2])] """ active_pathspecs = set() for group in line_parse_result: active_pathspecs.add(group['pathspec']) has_options = ( 'norm_options' in group or 'plot_options' in group or 'style_options' in group ) if has_options: yield active_pathspecs, group active_pathspecs = set() if active_pathspecs: yield active_pathspecs, {}
Given a parsed options specification as a list of groups, combine groups without options with the first subsequent group which has options. A line of the form 'A B C [opts] D E [opts_2]' results in [({A, B, C}, [opts]), ({D, E}, [opts_2])]
def SetTimeZone(self, time_zone): """Sets the time zone. Args: time_zone (str): time zone. Raises: ValueError: if the timezone is not supported. """ try: self._time_zone = pytz.timezone(time_zone) except (AttributeError, pytz.UnknownTimeZoneError): raise ValueError('Unsupported timezone: {0!s}'.format(time_zone))
Sets the time zone. Args: time_zone (str): time zone. Raises: ValueError: if the timezone is not supported.
def follow(name: str) -> snug.Query[bool]: """follow another user""" request = snug.PUT(f'https://api.github.com/user/following/{name}') response = yield request return response.status_code == 204
follow another user
def Parse(self, statentry, file_object, knowledge_base): """Parse the Plist file.""" _ = knowledge_base kwargs = {} try: kwargs["aff4path"] = file_object.urn except AttributeError: pass direct_copy_items = [ "Label", "Disabled", "UserName", "GroupName", "Program", "StandardInPath", "StandardOutPath", "StandardErrorPath", "LimitLoadToSessionType", "EnableGlobbing", "EnableTransactions", "OnDemand", "RunAtLoad", "RootDirectory", "WorkingDirectory", "Umask", "TimeOut", "ExitTimeOut", "ThrottleInterval", "InitGroups", "StartOnMount", "StartInterval", "Debug", "WaitForDebugger", "Nice", "ProcessType", "AbandonProcessGroup", "LowPriorityIO", "LaunchOnlyOnce" ] string_array_items = [ "LimitLoadToHosts", "LimitLoadFromHosts", "LimitLoadToSessionType", "ProgramArguments", "WatchPaths", "QueueDirectories" ] flag_only_items = ["SoftResourceLimits", "HardResourceLimits", "Sockets"] plist = {} try: plist = biplist.readPlist(file_object) except (biplist.InvalidPlistException, ValueError, IOError) as e: plist["Label"] = "Could not parse plist: %s" % e # These are items that can be directly copied for key in direct_copy_items: kwargs[key] = plist.get(key) # These could be a string, they could be an array, we don't know and neither # does Apple so we check. for key in string_array_items: elements = plist.get(key) if isinstance(elements, string_types): kwargs[key] = [elements] else: kwargs[key] = elements # These are complex items that can appear in multiple data structures # so we only flag on their existence for key in flag_only_items: if plist.get(key): kwargs[key] = True if plist.get("inetdCompatability") is not None: kwargs["inetdCompatabilityWait"] = plist.get("inetdCompatability").get( "Wait") keepalive = plist.get("KeepAlive") if isinstance(keepalive, bool) or keepalive is None: kwargs["KeepAlive"] = keepalive else: keepalivedict = {} keepalivedict["SuccessfulExit"] = keepalive.get("SuccessfulExit") keepalivedict["NetworkState"] = keepalive.get("NetworkState") pathstates = keepalive.get("PathState") if pathstates is not None: keepalivedict["PathState"] = [] for pathstate in pathstates: keepalivedict["PathState"].append( rdf_plist.PlistBoolDictEntry( name=pathstate, value=pathstates[pathstate])) otherjobs = keepalive.get("OtherJobEnabled") if otherjobs is not None: keepalivedict["OtherJobEnabled"] = [] for otherjob in otherjobs: keepalivedict["OtherJobEnabled"].append( rdf_plist.PlistBoolDictEntry( name=otherjob, value=otherjobs[otherjob])) kwargs["KeepAliveDict"] = rdf_plist.LaunchdKeepAlive(**keepalivedict) envvars = plist.get("EnvironmentVariables") if envvars is not None: kwargs["EnvironmentVariables"] = [] for envvar in envvars: kwargs["EnvironmentVariables"].append( rdf_plist.PlistStringDictEntry(name=envvar, value=envvars[envvar])) startcalendarinterval = plist.get("StartCalendarInterval") if startcalendarinterval is not None: if isinstance(startcalendarinterval, dict): kwargs["StartCalendarInterval"] = [ rdf_plist.LaunchdStartCalendarIntervalEntry( Minute=startcalendarinterval.get("Minute"), Hour=startcalendarinterval.get("Hour"), Day=startcalendarinterval.get("Day"), Weekday=startcalendarinterval.get("Weekday"), Month=startcalendarinterval.get("Month")) ] else: kwargs["StartCalendarInterval"] = [] for entry in startcalendarinterval: kwargs["StartCalendarInterval"].append( rdf_plist.LaunchdStartCalendarIntervalEntry( Minute=entry.get("Minute"), Hour=entry.get("Hour"), Day=entry.get("Day"), Weekday=entry.get("Weekday"), Month=entry.get("Month"))) yield rdf_plist.LaunchdPlist(**kwargs)
Parse the Plist file.
def volume(self): """ The volume of the primitive extrusion. Calculated from polygon and height to avoid mesh creation. Returns ---------- volume: float, volume of 3D extrusion """ volume = abs(self.primitive.polygon.area * self.primitive.height) return volume
The volume of the primitive extrusion. Calculated from polygon and height to avoid mesh creation. Returns ---------- volume: float, volume of 3D extrusion
def printer(self, message, color_level='info'): """Print Messages and Log it. :param message: item to print to screen """ if self.job_args.get('colorized'): print(cloud_utils.return_colorized(msg=message, color=color_level)) else: print(message)
Print Messages and Log it. :param message: item to print to screen
def add_substitution(self, substitution): """Add a substitution to the email :param value: Add a substitution to the email :type value: Substitution """ if substitution.personalization: try: personalization = \ self._personalizations[substitution.personalization] has_internal_personalization = True except IndexError: personalization = Personalization() has_internal_personalization = False personalization.add_substitution(substitution) if not has_internal_personalization: self.add_personalization( personalization, index=substitution.personalization) else: if isinstance(substitution, list): for s in substitution: for p in self.personalizations: p.add_substitution(s) else: for p in self.personalizations: p.add_substitution(substitution)
Add a substitution to the email :param value: Add a substitution to the email :type value: Substitution
def _GetDatabaseConfig(self): """ Get all configuration from database. This includes values from the Config table as well as populating lists for supported formats and ignored directories from their respective database tables. """ goodlogging.Log.Seperator() goodlogging.Log.Info("CLEAR", "Getting configuration variables...") goodlogging.Log.IncreaseIndent() # SOURCE DIRECTORY if self._sourceDir is None: self._sourceDir = self._GetConfigValue('SourceDir', 'source directory') # TV DIRECTORY if self._inPlaceRename is False and self._tvDir is None: self._tvDir = self._GetConfigValue('TVDir', 'tv directory') # ARCHIVE DIRECTORY self._archiveDir = self._GetConfigValue('ArchiveDir', 'archive directory', isDir = False) # SUPPORTED FILE FORMATS self._supportedFormatsList = self._GetSupportedFormats() # IGNORED DIRECTORIES self._ignoredDirsList = self._GetIgnoredDirs() goodlogging.Log.NewLine() goodlogging.Log.Info("CLEAR", "Configuation is:") goodlogging.Log.IncreaseIndent() goodlogging.Log.Info("CLEAR", "Source directory = {0}".format(self._sourceDir)) goodlogging.Log.Info("CLEAR", "TV directory = {0}".format(self._tvDir)) goodlogging.Log.Info("CLEAR", "Supported formats = {0}".format(self._supportedFormatsList)) goodlogging.Log.Info("CLEAR", "Ignored directory list = {0}".format(self._ignoredDirsList)) goodlogging.Log.ResetIndent()
Get all configuration from database. This includes values from the Config table as well as populating lists for supported formats and ignored directories from their respective database tables.
def _green_worker(self): """ A worker that does actual jobs """ while not self.quit.is_set(): try: task = self.green_queue.get(timeout=1) timestamp, missile, marker = task planned_time = self.start_time + (timestamp / 1000.0) delay = planned_time - time.time() if delay > 0: time.sleep(delay) try: with self.instance_counter.get_lock(): self.instance_counter.value += 1 self.gun.shoot(missile, marker) finally: with self.instance_counter.get_lock(): self.instance_counter.value -= 1 self._free_threads_count += 1 except (KeyboardInterrupt, SystemExit): break except Empty: continue except Full: logger.warning("Couldn't put to result queue because it's full") except Exception: logger.exception("Bfg shoot exception")
A worker that does actual jobs
def display_molecule(mol, autozoom=True): '''Display a `~chemlab.core.Molecule` instance in the viewer. This function wraps the molecule in a system before displaying it. ''' s = System([mol]) display_system(s, autozoom=True)
Display a `~chemlab.core.Molecule` instance in the viewer. This function wraps the molecule in a system before displaying it.
def parseGTF(inGTF): """ Reads an extracts all attributes in the attributes section of a GTF and constructs a new dataframe wiht one collumn per attribute instead of the attributes column :param inGTF: GTF dataframe to be parsed :returns: a dataframe of the orignal input GTF with attributes parsed. """ desc=attributesGTF(inGTF) ref=inGTF.copy() ref.reset_index(inplace=True, drop=True) df=ref.drop(['attribute'],axis=1).copy() for d in desc: field=retrieve_GTF_field(d,ref) df=pd.concat([df,field],axis=1) return df
Reads an extracts all attributes in the attributes section of a GTF and constructs a new dataframe wiht one collumn per attribute instead of the attributes column :param inGTF: GTF dataframe to be parsed :returns: a dataframe of the orignal input GTF with attributes parsed.
def sql_fingerprint(query, hide_columns=True): """ Simplify a query, taking away exact values and fields selected. Imperfect but better than super explicit, value-dependent queries. """ parsed_query = parse(query)[0] sql_recursively_simplify(parsed_query, hide_columns=hide_columns) return str(parsed_query)
Simplify a query, taking away exact values and fields selected. Imperfect but better than super explicit, value-dependent queries.
def run(self, args): """ Gives user permission based on auth_role arg and sends email to that user. :param args Namespace arguments parsed from the command line """ email = args.email # email of person to send email to username = args.username # username of person to send email to, will be None if email is specified force_send = args.resend # is this a resend so we should force sending auth_role = args.auth_role # authorization role(project permissions) to give to the user msg_file = args.msg_file # message file who's contents will be sent with the share message = read_argument_file_contents(msg_file) print("Sharing project.") to_user = self.remote_store.lookup_or_register_user_by_email_or_username(email, username) try: project = self.fetch_project(args, must_exist=True, include_children=False) dest_email = self.service.share(project, to_user, force_send, auth_role, message) print("Share email message sent to " + dest_email) except D4S2Error as ex: if ex.warning: print(ex.message) else: raise
Gives user permission based on auth_role arg and sends email to that user. :param args Namespace arguments parsed from the command line
def from_string(contents): """ Creates GaussianInput from a string. Args: contents: String representing an Gaussian input file. Returns: GaussianInput object """ lines = [l.strip() for l in contents.split("\n")] link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)") link0_dict = {} for i, l in enumerate(lines): if link0_patt.match(l): m = link0_patt.match(l) link0_dict[m.group(1).strip("=")] = m.group(2) route_patt = re.compile(r"^#[sSpPnN]*.*") route = "" route_index = None for i, l in enumerate(lines): if route_patt.match(l): route += " " + l route_index = i # This condition allows for route cards spanning multiple lines elif (l == "" or l.isspace()) and route_index: break functional, basis_set, route_paras, dieze_tag = read_route_line(route) ind = 2 title = [] while lines[route_index + ind].strip(): title.append(lines[route_index + ind].strip()) ind += 1 title = ' '.join(title) ind += 1 toks = re.split(r"[,\s]+", lines[route_index + ind]) charge = int(toks[0]) spin_mult = int(toks[1]) coord_lines = [] spaces = 0 input_paras = {} ind += 1 for i in range(route_index + ind, len(lines)): if lines[i].strip() == "": spaces += 1 if spaces >= 2: d = lines[i].split("=") if len(d) == 2: input_paras[d[0]] = d[1] else: coord_lines.append(lines[i].strip()) mol = GaussianInput._parse_coords(coord_lines) mol.set_charge_and_spin(charge, spin_mult) return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult, title=title, functional=functional, basis_set=basis_set, route_parameters=route_paras, input_parameters=input_paras, link0_parameters=link0_dict, dieze_tag=dieze_tag)
Creates GaussianInput from a string. Args: contents: String representing an Gaussian input file. Returns: GaussianInput object
def access_token(self): """Get access_token.""" if self.cache_token: return self.access_token_ or \ self._resolve_credential('access_token') return self.access_token_
Get access_token.