code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _cache_key(self, attr_name): """ Memcache keys can't have spaces in them, so we'll remove them from the DN for maximum compatibility. """ dn = self._ldap_user.dn return valid_cache_key( "auth_ldap.{}.{}.{}".format(self.__class__.__name__, attr_name, dn) )
Memcache keys can't have spaces in them, so we'll remove them from the DN for maximum compatibility.
def _update_record(self, record_id, name, address, ttl): """Updates an existing record.""" data = json.dumps({'record': {'name': name, 'content': address, 'ttl': ttl}}) headers = {'Content-Type': 'application/json'} request = self._session.put(self._baseurl + '/%d' % record_id, data=data, headers=headers) if not request.ok: raise RuntimeError('Failed to update record: %s - %s' % (self._format_hostname(name), request.json())) record = request.json() if 'record' not in record or 'id' not in record['record']: raise RuntimeError('Invalid record JSON format: %s - %s' % (self._format_hostname(name), request.json())) return record['record']
Updates an existing record.
def _get_bandgap_from_bands(energies, nelec): """Compute difference in conduction band min and valence band max""" nelec = int(nelec) valence = [x[nelec-1] for x in energies] conduction = [x[nelec] for x in energies] return max(min(conduction) - max(valence), 0.0)
Compute difference in conduction band min and valence band max
def file_download_using_requests(self,url): '''It will download file specified by url using requests module''' file_name=url.split('/')[-1] if os.path.exists(os.path.join(os.getcwd(),file_name)): print 'File already exists' return #print 'Downloading file %s '%file_name #print 'Downloading from %s'%url try: r=requests.get(url,stream=True,timeout=200) except requests.exceptions.SSLError: try: response=requests.get(url,stream=True,verify=False,timeout=200) except requests.exceptions.RequestException as e: print e quit() except requests.exceptions.RequestException as e: print e quit() chunk_size = 1024 total_size = int(r.headers['Content-Length']) total_chunks = total_size/chunk_size file_iterable = r.iter_content(chunk_size = chunk_size) tqdm_iter = tqdm(iterable = file_iterable,total = total_chunks,unit = 'KB', leave = False ) with open(file_name,'wb') as f: for data in tqdm_iter: f.write(data) #total_size=float(r.headers['Content-Length'])/(1024*1024) '''print 'Total size of file to be downloaded %.2f MB '%total_size total_downloaded_size=0.0 with open(file_name,'wb') as f: for chunk in r.iter_content(chunk_size=1*1024*1024): if chunk: size_of_chunk=float(len(chunk))/(1024*1024) total_downloaded_size+=size_of_chunk print '{0:.0%} Downloaded'.format(total_downloaded_size/total_size) f.write(chunk)''' print 'Downloaded file %s '%file_name
It will download file specified by url using requests module
def _copy_dist_from_dir(link_path, location): """Copy distribution files in `link_path` to `location`. Invoked when user requests to install a local directory. E.g.: pip install . pip install ~/dev/git-repos/python-prompt-toolkit """ # Note: This is currently VERY SLOW if you have a lot of data in the # directory, because it copies everything with `shutil.copytree`. # What it should really do is build an sdist and install that. # See https://github.com/pypa/pip/issues/2195 if os.path.isdir(location): rmtree(location) # build an sdist setup_py = 'setup.py' sdist_args = [sys.executable] sdist_args.append('-c') sdist_args.append(SETUPTOOLS_SHIM % setup_py) sdist_args.append('sdist') sdist_args += ['--dist-dir', location] logger.info('Running setup.py sdist for %s', link_path) with indent_log(): call_subprocess(sdist_args, cwd=link_path, show_stdout=False) # unpack sdist into `location` sdist = os.path.join(location, os.listdir(location)[0]) logger.info('Unpacking sdist %s into %s', sdist, location) unpack_file(sdist, location, content_type=None, link=None)
Copy distribution files in `link_path` to `location`. Invoked when user requests to install a local directory. E.g.: pip install . pip install ~/dev/git-repos/python-prompt-toolkit
async def eventuallyAll(*coroFuncs: FlexFunc, # (use functools.partials if needed) totalTimeout: float, retryWait: float=0.1, acceptableExceptions=None, acceptableFails: int=0, override_timeout_limit=False): # TODO: Bug when `acceptableFails` > 0 if the first check fails, it will # exhaust the entire timeout. """ :param coroFuncs: iterable of no-arg functions :param totalTimeout: :param retryWait: :param acceptableExceptions: :param acceptableFails: how many of the passed in coroutines can ultimately fail and still be ok :return: """ start = time.perf_counter() def remaining(): return totalTimeout + start - time.perf_counter() funcNames = [] others = 0 fails = 0 rem = None for cf in coroFuncs: if len(funcNames) < 2: funcNames.append(get_func_name(cf)) else: others += 1 # noinspection PyBroadException try: rem = remaining() if rem <= 0: break await eventually(cf, retryWait=retryWait, timeout=rem, acceptableExceptions=acceptableExceptions, verbose=True, override_timeout_limit=override_timeout_limit) except Exception as ex: if acceptableExceptions and type(ex) not in acceptableExceptions: raise fails += 1 logger.debug("a coro {} with args {} timed out without succeeding; fail count: " "{}, acceptable: {}". format(get_func_name(cf), get_func_args(cf), fails, acceptableFails)) if fails > acceptableFails: raise if rem is not None and rem <= 0: fails += 1 if fails > acceptableFails: err = 'All checks could not complete successfully since total timeout ' \ 'expired {} sec ago'.format(-1 * rem if rem < 0 else 0) raise EventuallyTimeoutException(err) if others: funcNames.append("and {} others".format(others)) desc = ", ".join(funcNames) logger.debug("{} succeeded with {:.2f} seconds to spare". format(desc, remaining()))
:param coroFuncs: iterable of no-arg functions :param totalTimeout: :param retryWait: :param acceptableExceptions: :param acceptableFails: how many of the passed in coroutines can ultimately fail and still be ok :return:
def getmembers(object, predicate=None): """Return all members of an object as (name, value) pairs sorted by name. Optionally, only return members that satisfy a given predicate.""" if inspect.isclass(object): mro = (object,) + inspect.getmro(object) else: mro = () results = [] processed = set() names = dir(object) # :dd any DynamicClassAttributes to the list of names if object is a class; # this may result in duplicate entries if, for example, a virtual # attribute with the same name as a DynamicClassAttribute exists try: for base in object.__bases__: for k, v in base.__dict__.items(): if isinstance(v, types.DynamicClassAttribute): names.append(k) except AttributeError: pass for key in names: # First try to get the value via getattr. Some descriptors don't # like calling their __get__ (see bug #1785), so fall back to # looking in the __dict__. try: value = getattr(object, key) # handle the duplicate key if key in processed: raise AttributeError except AttributeError: for base in mro: if key in base.__dict__: value = base.__dict__[key] break else: # could be a (currently) missing slot member, or a buggy # __dir__; discard and move on continue except Exception as e: value = (RAISES_EXCEPTION, e) if not predicate or predicate(value): results.append((key, value)) processed.add(key) results.sort(key=lambda pair: pair[0]) return results
Return all members of an object as (name, value) pairs sorted by name. Optionally, only return members that satisfy a given predicate.
def sr(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): """Send and receive packets at layer 3""" s = conf.L3socket(promisc=promisc, filter=filter, iface=iface, nofilter=nofilter) result = sndrcv(s, x, *args, **kargs) s.close() return result
Send and receive packets at layer 3
def _ensure_api_keys(task_desc, failure_ret=None): """Wrap Elsevier methods which directly use the API keys. Ensure that the keys are retrieved from the environment or config file when first called, and store global scope. Subsequently use globally stashed results and check for required ids. """ def check_func_wrapper(func): @wraps(func) def check_api_keys(*args, **kwargs): global ELSEVIER_KEYS if ELSEVIER_KEYS is None: ELSEVIER_KEYS = {} # Try to read in Elsevier API keys. For each key, first check # the environment variables, then check the INDRA config file. if not has_config(INST_KEY_ENV_NAME): logger.warning('Institution API key %s not found in config ' 'file or environment variable: this will ' 'limit access for %s' % (INST_KEY_ENV_NAME, task_desc)) ELSEVIER_KEYS['X-ELS-Insttoken'] = get_config(INST_KEY_ENV_NAME) if not has_config(API_KEY_ENV_NAME): logger.error('API key %s not found in configuration file ' 'or environment variable: cannot %s' % (API_KEY_ENV_NAME, task_desc)) return failure_ret ELSEVIER_KEYS['X-ELS-APIKey'] = get_config(API_KEY_ENV_NAME) elif 'X-ELS-APIKey' not in ELSEVIER_KEYS.keys(): logger.error('No Elsevier API key %s found: cannot %s' % (API_KEY_ENV_NAME, task_desc)) return failure_ret return func(*args, **kwargs) return check_api_keys return check_func_wrapper
Wrap Elsevier methods which directly use the API keys. Ensure that the keys are retrieved from the environment or config file when first called, and store global scope. Subsequently use globally stashed results and check for required ids.
def multi_reciprocal_extra(xs, ys, noise=False): """ Calculates for a series of powers ns the parameters for which the last two points are at the curve. With these parameters measure how well the other data points fit. return the best fit. """ ns = np.linspace(0.5, 6.0, num=56) best = ['', np.inf] fit_results = {} weights = get_weights(xs, ys) for n in ns: popt = extrapolate_reciprocal(xs, ys, n, noise) m = measure(reciprocal, xs, ys, popt, weights) pcov = [] fit_results.update({n: {'measure': m, 'popt': popt, 'pcov': pcov}}) for n in fit_results: if fit_results[n]['measure'] <= best[1]: best = reciprocal, fit_results[n]['measure'], n return fit_results[best[2]]['popt'], fit_results[best[2]]['pcov'], best
Calculates for a series of powers ns the parameters for which the last two points are at the curve. With these parameters measure how well the other data points fit. return the best fit.
def p_param_args_noname(self, p): 'param_args_noname : param_args_noname COMMA param_arg_noname' p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1))
param_args_noname : param_args_noname COMMA param_arg_noname
def set_pos(self, pos): """ set the position of this column in the Table """ self.pos = pos if pos is not None and self.typ is not None: self.typ._v_pos = pos return self
set the position of this column in the Table
def construct_infrastructure_factory(self, *args, **kwargs): """ :rtype: InfrastructureFactory """ factory_class = self.infrastructure_factory_class assert issubclass(factory_class, InfrastructureFactory) return factory_class( record_manager_class=self.record_manager_class, integer_sequenced_record_class=self.stored_event_record_class, sequenced_item_class=self.sequenced_item_class, contiguous_record_ids=self.contiguous_record_ids, application_name=self.name, pipeline_id=self.pipeline_id, snapshot_record_class=self.snapshot_record_class, *args, **kwargs )
:rtype: InfrastructureFactory
def to_html(self): """Returns attributes formatted as html.""" id, classes, kvs = self.id, self.classes, self.kvs id_str = 'id="{}"'.format(id) if id else '' class_str = 'class="{}"'.format(' '.join(classes)) if classes else '' key_str = ' '.join('{}={}'.format(k, v) for k, v in kvs.items()) return ' '.join((id_str, class_str, key_str)).strip()
Returns attributes formatted as html.
def update_location(self, text=''): """Update text of location.""" self.text_project_name.setEnabled(self.radio_new_dir.isChecked()) name = self.text_project_name.text().strip() if name and self.radio_new_dir.isChecked(): path = osp.join(self.location, name) self.button_create.setDisabled(os.path.isdir(path)) elif self.radio_from_dir.isChecked(): self.button_create.setEnabled(True) path = self.location else: self.button_create.setEnabled(False) path = self.location self.text_location.setText(path)
Update text of location.
def go_to_place(self, place, weight=''): """Assuming I'm in a :class:`Place` that has a :class:`Portal` direct to the given :class:`Place`, schedule myself to travel to the given :class:`Place`, taking an amount of time indicated by the ``weight`` stat on the :class:`Portal`, if given; else 1 turn. Return the number of turns the travel will take. """ if hasattr(place, 'name'): placen = place.name else: placen = place curloc = self["location"] orm = self.character.engine turns = self.engine._portal_objs[ (self.character.name, curloc, place)].get(weight, 1) with self.engine.plan(): orm.turn += turns self['location'] = placen return turns
Assuming I'm in a :class:`Place` that has a :class:`Portal` direct to the given :class:`Place`, schedule myself to travel to the given :class:`Place`, taking an amount of time indicated by the ``weight`` stat on the :class:`Portal`, if given; else 1 turn. Return the number of turns the travel will take.
def _twofilter_smoothing_ON(self, t, ti, info, phi, lwinfo, return_ess, modif_forward, modif_info): """O(N) version of two-filter smoothing. This method should not be called directly, see twofilter_smoothing. """ if modif_info is not None: lwinfo += modif_info Winfo = rs.exp_and_normalise(lwinfo) I = rs.multinomial(Winfo) if modif_forward is not None: lw = self.wgt[t].lw + modif_forward W = rs.exp_and_normalise(lw) else: W = self.wgt[t].W J = rs.multinomial(W) log_omega = self.model.logpt(t + 1, self.X[t][J], info.hist.X[ti][I]) if modif_forward is not None: log_omega -= modif_forward[J] if modif_info is not None: log_omega -= modif_info[I] Om = rs.exp_and_normalise(log_omega) est = np.average(phi(self.X[t][J], info.hist.X[ti][I]), axis=0, weights=Om) if return_ess: return (est, 1. / np.sum(Om**2)) else: return est
O(N) version of two-filter smoothing. This method should not be called directly, see twofilter_smoothing.
def download_object(self, container, obj, directory, structure=True): """ Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters. """ return container.download(obj, directory, structure=structure)
Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters.
def iftrain(self, then_branch, else_branch): """ Execute `then_branch` when training. """ return ifelse(self._training_flag, then_branch, else_branch, name="iftrain")
Execute `then_branch` when training.
def console(loop, log): """Connect to receiver and show events as they occur. Pulls the following arguments from the command line (not method arguments): :param host: Hostname or IP Address of the device. :param port: TCP port number of the device. :param verbose: Show debug logging. """ parser = argparse.ArgumentParser(description=console.__doc__) parser.add_argument('--host', default='127.0.0.1', help='IP or FQDN of AVR') parser.add_argument('--port', default='14999', help='Port of AVR') parser.add_argument('--verbose', '-v', action='count') args = parser.parse_args() if args.verbose: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(level=level) def log_callback(message): """Receives event callback from Anthem Protocol class.""" log.info('Callback invoked: %s' % message) host = args.host port = int(args.port) log.info('Connecting to Anthem AVR at %s:%i' % (host, port)) conn = yield from anthemav.Connection.create( host=host, port=port, loop=loop, update_callback=log_callback) log.info('Power state is '+str(conn.protocol.power)) conn.protocol.power = True log.info('Power state is '+str(conn.protocol.power)) yield from asyncio.sleep(10, loop=loop) log.info('Panel brightness (raw) is '+str(conn.protocol.panel_brightness)) log.info('Panel brightness (text) is '+str(conn.protocol.panel_brightness_text))
Connect to receiver and show events as they occur. Pulls the following arguments from the command line (not method arguments): :param host: Hostname or IP Address of the device. :param port: TCP port number of the device. :param verbose: Show debug logging.
def sample_stats_to_xarray(self): """Extract sample_stats from PyMC3 trace.""" rename_key = {"model_logp": "lp"} data = {} for stat in self.trace.stat_names: name = rename_key.get(stat, stat) data[name] = np.array(self.trace.get_sampler_stats(stat, combine=False)) log_likelihood, dims = self._extract_log_likelihood() if log_likelihood is not None: data["log_likelihood"] = log_likelihood dims = {"log_likelihood": dims} else: dims = None return dict_to_dataset(data, library=self.pymc3, dims=dims, coords=self.coords)
Extract sample_stats from PyMC3 trace.
def container_running(self, container_name): """ Finds out if a container with name ``container_name`` is running. :return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise. :rtype: Optional[docker.models.container.Container] """ filters = { "name": container_name, "status": "running", } for container in self.client.containers.list(filters=filters): if container_name == container.name: return container return None
Finds out if a container with name ``container_name`` is running. :return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise. :rtype: Optional[docker.models.container.Container]
def list(self, request, *args, **kwargs): """ To get a list of service settings, run **GET** against */api/service-settings/* as an authenticated user. Only settings owned by this user or shared settings will be listed. Supported filters are: - ?name=<text> - partial matching used for searching - ?type=<type> - choices: OpenStack, DigitalOcean, Amazon, JIRA, GitLab, Oracle - ?state=<state> - choices: New, Creation Scheduled, Creating, Sync Scheduled, Syncing, In Sync, Erred - ?shared=<bool> - allows to filter shared service settings """ return super(ServiceSettingsViewSet, self).list(request, *args, **kwargs)
To get a list of service settings, run **GET** against */api/service-settings/* as an authenticated user. Only settings owned by this user or shared settings will be listed. Supported filters are: - ?name=<text> - partial matching used for searching - ?type=<type> - choices: OpenStack, DigitalOcean, Amazon, JIRA, GitLab, Oracle - ?state=<state> - choices: New, Creation Scheduled, Creating, Sync Scheduled, Syncing, In Sync, Erred - ?shared=<bool> - allows to filter shared service settings
def add_ephemeral_listener(self, callback, event_type=None): """Add a callback handler for ephemeral events going to this room. Args: callback (func(room, event)): Callback called when an ephemeral event arrives. event_type (str): The event_type to filter for. Returns: uuid.UUID: Unique id of the listener, can be used to identify the listener. """ listener_id = uuid4() self.ephemeral_listeners.append( { 'uid': listener_id, 'callback': callback, 'event_type': event_type } ) return listener_id
Add a callback handler for ephemeral events going to this room. Args: callback (func(room, event)): Callback called when an ephemeral event arrives. event_type (str): The event_type to filter for. Returns: uuid.UUID: Unique id of the listener, can be used to identify the listener.
def top_segment_proportions(mtx, ns): """ Calculates total percentage of counts in top ns genes. Parameters ---------- mtx : `Union[np.array, sparse.spmatrix]` Matrix, where each row is a sample, each column a feature. ns : `Container[Int]` Positions to calculate cumulative proportion at. Values are considered 1-indexed, e.g. `ns=[50]` will calculate cumulative proportion up to the 50th most expressed gene. """ # Pretty much just does dispatch if not (max(ns) <= mtx.shape[1] and min(ns) > 0): raise IndexError("Positions outside range of features.") if issparse(mtx): if not isspmatrix_csr(mtx): mtx = csr_matrix(mtx) return top_segment_proportions_sparse_csr(mtx.data, mtx.indptr, np.array(ns, dtype=np.int)) else: return top_segment_proportions_dense(mtx, ns)
Calculates total percentage of counts in top ns genes. Parameters ---------- mtx : `Union[np.array, sparse.spmatrix]` Matrix, where each row is a sample, each column a feature. ns : `Container[Int]` Positions to calculate cumulative proportion at. Values are considered 1-indexed, e.g. `ns=[50]` will calculate cumulative proportion up to the 50th most expressed gene.
def next_history(self, e): # (C-n) u'''Move forward through the history list, fetching the next command. ''' self._history.next_history(self.l_buffer) self.finalize()
u'''Move forward through the history list, fetching the next command.
def getTextBlocks(page, images=False): """Return the text blocks on a page. Notes: Lines in a block are concatenated with line breaks. Args: images: (bool) also return meta data of any images. Image data are never returned with this method. Returns: A list of the blocks. Each item contains the containing rectangle coordinates, text lines, block type and running block number. """ CheckParent(page) dl = page.getDisplayList() flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE if images: flags |= TEXT_PRESERVE_IMAGES tp = dl.getTextPage(flags) l = tp._extractTextBlocks_AsList() del tp del dl return l
Return the text blocks on a page. Notes: Lines in a block are concatenated with line breaks. Args: images: (bool) also return meta data of any images. Image data are never returned with this method. Returns: A list of the blocks. Each item contains the containing rectangle coordinates, text lines, block type and running block number.
def crc16(data): """ Calculate an ISO13239 CRC checksum of the input buffer (bytestring). """ m_crc = 0xffff for this in data: m_crc ^= ord_byte(this) for _ in range(8): j = m_crc & 1 m_crc >>= 1 if j: m_crc ^= 0x8408 return m_crc
Calculate an ISO13239 CRC checksum of the input buffer (bytestring).
def get_list_information(self, query_params=None): ''' Get information for this list. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
Get information for this list. Returns a dictionary of values.
def previous_sibling(self): """The previous sibling statement. :returns: The previous sibling statement node. :rtype: NodeNG or None """ stmts = self.parent.child_sequence(self) index = stmts.index(self) if index >= 1: return stmts[index - 1] return None
The previous sibling statement. :returns: The previous sibling statement node. :rtype: NodeNG or None
def write_molden(*args, **kwargs): """Deprecated, use :func:`~chemcoord.xyz_functions.to_molden` """ message = 'Will be removed in the future. Please use to_molden().' with warnings.catch_warnings(): warnings.simplefilter("always") warnings.warn(message, DeprecationWarning) return to_molden(*args, **kwargs)
Deprecated, use :func:`~chemcoord.xyz_functions.to_molden`
def observer(self, component_type=ComponentType): """ You can use ``@broker.observer()`` as a decorator to your callback instead of :func:`Broker.add_observer`. """ def inner(func): self.add_observer(func, component_type) return func return inner
You can use ``@broker.observer()`` as a decorator to your callback instead of :func:`Broker.add_observer`.
def dictmerge(x, y): """ merge two dictionaries """ z = x.copy() z.update(y) return z
merge two dictionaries
def interact(self, banner=None): """Closely emulate the interactive Python console. This method overwrites its superclass' method to specify a different help text and to enable proper handling of the debugger status line. Args: banner: Text to be displayed on interpreter startup. """ sys.ps1 = getattr(sys, 'ps1', '>>> ') sys.ps2 = getattr(sys, 'ps2', '... ') if banner is None: print ('Pyringe (Python %s.%s.%s) on %s\n%s' % (sys.version_info.major, sys.version_info.minor, sys.version_info.micro, sys.platform, _WELCOME_MSG)) else: print banner more = False while True: try: if more: prompt = sys.ps2 else: prompt = self.StatusLine() + '\n' + sys.ps1 try: line = self.raw_input(prompt) except EOFError: print '' break else: more = self.push(line) except KeyboardInterrupt: print '\nKeyboardInterrupt' self.resetbuffer() more = False
Closely emulate the interactive Python console. This method overwrites its superclass' method to specify a different help text and to enable proper handling of the debugger status line. Args: banner: Text to be displayed on interpreter startup.
def _prob_match(self, features): """Compute match probabilities. Parameters ---------- features : numpy.ndarray The data to train the model on. Returns ------- numpy.ndarray The match probabilties. """ # compute the probabilities probs = self.kernel.predict_proba(features) # get the position of match probabilities classes = list(self.kernel.classes_) match_class_position = classes.index(1) return probs[:, match_class_position]
Compute match probabilities. Parameters ---------- features : numpy.ndarray The data to train the model on. Returns ------- numpy.ndarray The match probabilties.
def kernel_restarted_message(self, msg): """Show kernel restarted/died messages.""" if not self.is_error_shown: # If there are kernel creation errors, jupyter_client will # try to restart the kernel and qtconsole prints a # message about it. # So we read the kernel's stderr_file and display its # contents in the client instead of the usual message shown # by qtconsole. try: stderr = self._read_stderr() except Exception: stderr = None if stderr: self.show_kernel_error('<tt>%s</tt>' % stderr) else: self.shellwidget._append_html("<br>%s<hr><br>" % msg, before_prompt=False)
Show kernel restarted/died messages.
def digest(self, elimseq=False, notrunc=False): """ Obtain the fuzzy hash. This operation does not change the state at all. It reports the hash for the concatenation of the data previously fed using update(). :return: The fuzzy hash :rtype: String :raises InternalError: If lib returns an internal error """ if self._state == ffi.NULL: raise InternalError("State object is NULL") flags = (binding.lib.FUZZY_FLAG_ELIMSEQ if elimseq else 0) | \ (binding.lib.FUZZY_FLAG_NOTRUNC if notrunc else 0) result = ffi.new("char[]", binding.lib.FUZZY_MAX_RESULT) if binding.lib.fuzzy_digest(self._state, result, flags) != 0: raise InternalError("Function returned an unexpected error code") return ffi.string(result).decode("ascii")
Obtain the fuzzy hash. This operation does not change the state at all. It reports the hash for the concatenation of the data previously fed using update(). :return: The fuzzy hash :rtype: String :raises InternalError: If lib returns an internal error
def voip_play2(s1, **kargs): """ Same than voip_play, but will play both incoming and outcoming packets. The sound will surely suffer distortion. Only supports sniffing. .. seealso:: voip_play to play only incoming packets. """ dsp, rd = os.popen2(sox_base % "-c 2") global x1, x2 x1 = "" x2 = "" def play(pkt): global x1, x2 if not pkt: return if not pkt.haslayer(UDP) or not pkt.haslayer(IP): return ip = pkt.getlayer(IP) if s1 in [ip.src, ip.dst]: if ip.dst == s1: x1 += pkt.getlayer(conf.raw_layer).load[12:] else: x2 += pkt.getlayer(conf.raw_layer).load[12:] x1, x2, r = _merge_sound_bytes(x1, x2) dsp.write(r) sniff(store=0, prn=play, **kargs)
Same than voip_play, but will play both incoming and outcoming packets. The sound will surely suffer distortion. Only supports sniffing. .. seealso:: voip_play to play only incoming packets.
def directions(self, origin, destination, mode=None, alternatives=None, waypoints=None, optimize_waypoints=False, avoid=None, language=None, units=None, region=None, departure_time=None, arrival_time=None, sensor=None): """Get directions between locations :param origin: Origin location - string address; (latitude, longitude) two-tuple, dict with ("lat", "lon") keys or object with (lat, lon) attributes :param destination: Destination location - type same as origin :param mode: Travel mode as string, defaults to "driving". See `google docs details <https://developers.google.com/maps/documentation/directions/#TravelModes>`_ :param alternatives: True if provide it has to return more then one route alternative :param waypoints: Iterable with set of intermediate stops, like ("Munich", "Dallas") See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param optimize_waypoints: if true will attempt to re-order supplied waypoints to minimize overall cost of the route. If waypoints are optimized, the route returned will show the optimized order under "waypoint_order". See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param avoid: Iterable with set of restrictions, like ("tolls", "highways"). For full list refer to `google docs details <https://developers.google.com/maps/documentation/directions/#Restrictions>`_ :param language: The language in which to return results. See `list of supported languages <https://developers.google.com/maps/faq#languagesupport>`_ :param units: Unit system for result. Defaults to unit system of origin's country. See `google docs details <https://developers.google.com/maps/documentation/directions/#UnitSystems>`_ :param region: The region code. Affects geocoding of origin and destination (see `gmaps.Geocoding.geocode` region parameter) :param departure_time: Desired time of departure as seconds since midnight, January 1, 1970 UTC :param arrival_time: Desired time of arrival for transit directions as seconds since midnight, January 1, 1970 UTC. """ # noqa if optimize_waypoints: waypoints.insert(0, "optimize:true") parameters = dict( origin=self.assume_latlon_or_address(origin), destination=self.assume_latlon_or_address(destination), mode=mode, alternatives=alternatives, waypoints=waypoints or [], avoid=avoid, language=language, units=units, region=region, departure_time=departure_time, arrival_time=arrival_time, sensor=sensor, ) return self._make_request(self.DIRECTIONS_URL, parameters, "routes")
Get directions between locations :param origin: Origin location - string address; (latitude, longitude) two-tuple, dict with ("lat", "lon") keys or object with (lat, lon) attributes :param destination: Destination location - type same as origin :param mode: Travel mode as string, defaults to "driving". See `google docs details <https://developers.google.com/maps/documentation/directions/#TravelModes>`_ :param alternatives: True if provide it has to return more then one route alternative :param waypoints: Iterable with set of intermediate stops, like ("Munich", "Dallas") See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param optimize_waypoints: if true will attempt to re-order supplied waypoints to minimize overall cost of the route. If waypoints are optimized, the route returned will show the optimized order under "waypoint_order". See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param avoid: Iterable with set of restrictions, like ("tolls", "highways"). For full list refer to `google docs details <https://developers.google.com/maps/documentation/directions/#Restrictions>`_ :param language: The language in which to return results. See `list of supported languages <https://developers.google.com/maps/faq#languagesupport>`_ :param units: Unit system for result. Defaults to unit system of origin's country. See `google docs details <https://developers.google.com/maps/documentation/directions/#UnitSystems>`_ :param region: The region code. Affects geocoding of origin and destination (see `gmaps.Geocoding.geocode` region parameter) :param departure_time: Desired time of departure as seconds since midnight, January 1, 1970 UTC :param arrival_time: Desired time of arrival for transit directions as seconds since midnight, January 1, 1970 UTC.
def shift(self, modelResult): """Shift the model result and return the new instance. Queues up the T(i+1) prediction value and emits a T(i) input/prediction pair, if possible. E.g., if the previous T(i-1) iteration was learn-only, then we would not have a T(i) prediction in our FIFO and would not be able to emit a meaningful input/prediction pair. :param modelResult: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance to shift. :return: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance that has been shifted """ inferencesToWrite = {} if self._inferenceBuffer is None: maxDelay = InferenceElement.getMaxDelay(modelResult.inferences) self._inferenceBuffer = collections.deque(maxlen=maxDelay + 1) self._inferenceBuffer.appendleft(copy.deepcopy(modelResult.inferences)) for inferenceElement, inference in modelResult.inferences.iteritems(): if isinstance(inference, dict): inferencesToWrite[inferenceElement] = {} for key, _ in inference.iteritems(): delay = InferenceElement.getTemporalDelay(inferenceElement, key) if len(self._inferenceBuffer) > delay: prevInference = self._inferenceBuffer[delay][inferenceElement][key] inferencesToWrite[inferenceElement][key] = prevInference else: inferencesToWrite[inferenceElement][key] = None else: delay = InferenceElement.getTemporalDelay(inferenceElement) if len(self._inferenceBuffer) > delay: inferencesToWrite[inferenceElement] = ( self._inferenceBuffer[delay][inferenceElement]) else: if type(inference) in (list, tuple): inferencesToWrite[inferenceElement] = [None] * len(inference) else: inferencesToWrite[inferenceElement] = None shiftedResult = ModelResult(rawInput=modelResult.rawInput, sensorInput=modelResult.sensorInput, inferences=inferencesToWrite, metrics=modelResult.metrics, predictedFieldIdx=modelResult.predictedFieldIdx, predictedFieldName=modelResult.predictedFieldName) return shiftedResult
Shift the model result and return the new instance. Queues up the T(i+1) prediction value and emits a T(i) input/prediction pair, if possible. E.g., if the previous T(i-1) iteration was learn-only, then we would not have a T(i) prediction in our FIFO and would not be able to emit a meaningful input/prediction pair. :param modelResult: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance to shift. :return: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance that has been shifted
def create(self, validated_data): """ Create a new email and send a confirmation to it. Returns: The newly creating ``EmailAddress`` instance. """ email_query = models.EmailAddress.objects.filter( email=self.validated_data["email"] ) if email_query.exists(): email = email_query.get() email.send_duplicate_notification() else: email = super(EmailSerializer, self).create(validated_data) email.send_confirmation() user = validated_data.get("user") query = models.EmailAddress.objects.filter( is_primary=True, user=user ) if not query.exists(): email.set_primary() return email
Create a new email and send a confirmation to it. Returns: The newly creating ``EmailAddress`` instance.
def htmlFormat(output, pathParts = (), statDict = None, query = None): """Formats as HTML, writing to the given object.""" statDict = statDict or scales.getStats() if query: statDict = runQuery(statDict, query) _htmlRenderDict(pathParts, statDict, output)
Formats as HTML, writing to the given object.
def get_variable_scope_name(value): """Returns the name of the variable scope indicated by the given value. Args: value: String, variable scope, or object with `variable_scope` attribute (e.g., Sonnet module). Returns: The name (a string) of the corresponding variable scope. Raises: ValueError: If `value` does not identify a variable scope. """ # If the object has a "variable_scope" property, use it. value = getattr(value, "variable_scope", value) if isinstance(value, tf.VariableScope): return value.name elif isinstance(value, six.string_types): return value else: raise ValueError("Not a variable scope: {}".format(value))
Returns the name of the variable scope indicated by the given value. Args: value: String, variable scope, or object with `variable_scope` attribute (e.g., Sonnet module). Returns: The name (a string) of the corresponding variable scope. Raises: ValueError: If `value` does not identify a variable scope.
def formfield_for_dbfield(self, db_field, **kwargs): ''' Offer only gradings that are not used by other schemes, which means they are used by this scheme or not at all.''' if db_field.name == "gradings": request=kwargs['request'] try: #TODO: MockRequst object from unit test does not contain path information, so an exception occurs during test. # Find a test-suite compatible solution here. obj=resolve(request.path).args[0] filterexpr=Q(schemes=obj) | Q(schemes=None) kwargs['queryset'] = Grading.objects.filter(filterexpr).distinct() except: pass return super(GradingSchemeAdmin, self).formfield_for_dbfield(db_field, **kwargs)
Offer only gradings that are not used by other schemes, which means they are used by this scheme or not at all.
def add_filehandler(level, fmt, filename, mode, backup_count, limit, when): """Add a file handler to the global logger.""" kwargs = {} # If the filename is not set, use the default filename if filename is None: filename = getattr(sys.modules['__main__'], '__file__', 'log.py') filename = os.path.basename(filename.replace('.py', '.log')) filename = os.path.join('/tmp', filename) if not os.path.exists(os.path.dirname(filename)): os.mkdir(os.path.dirname(filename)) kwargs['filename'] = filename # Choose the filehandler based on the passed arguments if backup_count == 0: # Use FileHandler cls = logging.FileHandler kwargs['mode'] = mode elif when is None: # Use RotatingFileHandler cls = logging.handlers.RotatingFileHandler kwargs['maxBytes'] = limit kwargs['backupCount'] = backup_count kwargs['mode'] = mode else: # Use TimedRotatingFileHandler cls = logging.handlers.TimedRotatingFileHandler kwargs['when'] = when kwargs['interval'] = limit kwargs['backupCount'] = backup_count return add_handler(cls, level, fmt, False, **kwargs)
Add a file handler to the global logger.
def svd_convolution(inp, outmaps, kernel, r, pad=None, stride=None, dilation=None, uv_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True): """SVD convolution is a low rank approximation of the convolution layer. It can be seen as a depth wise convolution followed by a 1x1 convolution. The flattened kernels for the i-th input map are expressed by their low rank approximation. The kernels for the i-th input :math:`{\\mathbf W_i}` are approximated with the singular value decomposition (SVD) and by selecting the :math:`{R}` dominant singular values and the corresponding singular vectors. .. math:: {\\mathbf W_{:,i,:}} ~ {\\mathbf U_i} {\\mathbf V_i}. :math:`{\\mathbf U}` contains the weights of the depthwise convolution with multiplier :math:`{R}` and :math:`{\\mathbf V}` contains the weights of the 1x1 convolution. If `uv_init` is a numpy array, :math:`{\\mathbf U}` and :math:`{\\mathbf V}` are computed such that `uv_init` is approximated by :math:`{\\mathbf{UV}}`. If `uv_init` is `None` or an initializer, the product of :math:`{\\mathbf U}` and :math:`{\\mathbf V}` approximates the random initialization. If :math:`{\\mathbf U}` and :math:`{\\mathbf V}` exist in the context, they take precedence over `uv_init`. Suppose the kernel tensor of the convolution is of :math:`{O \\times I \\times K \\times K}` and the compression rate you want to specify is :math:`{CR}`, then you set :math:`{R}` as .. math:: R = \\left\\lfloor \\frac{(1 - CR)OIK^2}{I(O + K^2)} \\right\\rfloor. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (tuple): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3, 5). r (int): Rank of the factorized layer. pad (tuple): Padding sizes (`int`) for dimensions. stride (tuple): Stride sizes (`int`) for dimensions. dilation (tuple): Dilation sizes (`int`) for dimensions. uv_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`) """ assert r > 0, "svd_convolution: The rank must larger than zero" if uv_init is None: uv_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if type(uv_init) is np.ndarray: # TODO: Assert that size of uv_init is correct # uv is initialize with numpy array uv = uv_init else: # uv is initialize from initializer uv = uv_init((outmaps, inp.shape[base_axis]) + tuple(kernel)) # flatten kernels uv = uv.reshape((outmaps, inp.shape[base_axis], np.prod(kernel))) u = get_parameter('U') v = get_parameter('V') if (u is None) or (v is None): inmaps = inp.shape[base_axis] u_low_rank = np.zeros((inmaps, np.prod(kernel), r)) v_low_rank = np.zeros((inmaps, r, outmaps)) for i in range(inmaps): K = np.transpose(uv[:, i, :]) u_, s_, v_ = np.linalg.svd(K, full_matrices=False) u_low_rank[i, :, :] = np.dot(u_[:, :r], np.diag(s_[:r])) v_low_rank[i, :, :] = v_[:r, :] # reshape U : (I,K*K,r) -> (I*r,K,K) for depthwise conv u = nn.Variable((inmaps * r,) + tuple(kernel), need_grad=True) u.d = (np.transpose(u_low_rank, axes=(0, 2, 1)) .reshape((inmaps * r,) + tuple(kernel))) nn.parameter.set_parameter("U", u) # reshape V : (I,r,O) -> (O,I*r,1,1) for 1X1 conv kernel_one = (1,) * len(kernel) # 1x1 for 2D convolution v = nn.Variable((outmaps, inmaps * r) + kernel_one, need_grad=True) v.d = (np.transpose(v_low_rank, axes=(2, 0, 1)) .reshape((outmaps, inmaps * r) + kernel_one)) nn.parameter.set_parameter("V", v) if fix_parameters == u.need_grad: u = u.get_unlinked_variable(need_grad=not fix_parameters) if fix_parameters == v.need_grad: v = v.get_unlinked_variable(need_grad=not fix_parameters) if with_bias and b_init is None: b_init = ConstantInitializer() b = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) y = F.depthwise_convolution(inp, u, bias=None, base_axis=base_axis, pad=pad, stride=stride, dilation=dilation, multiplier=r) y = F.convolution(y, v, bias=b, base_axis=base_axis, pad=None, stride=None, dilation=None, group=1) return y
SVD convolution is a low rank approximation of the convolution layer. It can be seen as a depth wise convolution followed by a 1x1 convolution. The flattened kernels for the i-th input map are expressed by their low rank approximation. The kernels for the i-th input :math:`{\\mathbf W_i}` are approximated with the singular value decomposition (SVD) and by selecting the :math:`{R}` dominant singular values and the corresponding singular vectors. .. math:: {\\mathbf W_{:,i,:}} ~ {\\mathbf U_i} {\\mathbf V_i}. :math:`{\\mathbf U}` contains the weights of the depthwise convolution with multiplier :math:`{R}` and :math:`{\\mathbf V}` contains the weights of the 1x1 convolution. If `uv_init` is a numpy array, :math:`{\\mathbf U}` and :math:`{\\mathbf V}` are computed such that `uv_init` is approximated by :math:`{\\mathbf{UV}}`. If `uv_init` is `None` or an initializer, the product of :math:`{\\mathbf U}` and :math:`{\\mathbf V}` approximates the random initialization. If :math:`{\\mathbf U}` and :math:`{\\mathbf V}` exist in the context, they take precedence over `uv_init`. Suppose the kernel tensor of the convolution is of :math:`{O \\times I \\times K \\times K}` and the compression rate you want to specify is :math:`{CR}`, then you set :math:`{R}` as .. math:: R = \\left\\lfloor \\frac{(1 - CR)OIK^2}{I(O + K^2)} \\right\\rfloor. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (tuple): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3, 5). r (int): Rank of the factorized layer. pad (tuple): Padding sizes (`int`) for dimensions. stride (tuple): Stride sizes (`int`) for dimensions. dilation (tuple): Dilation sizes (`int`) for dimensions. uv_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
def _merge_statement_lists(stmsA: List["HdlStatement"], stmsB: List["HdlStatement"])\ -> List["HdlStatement"]: """ Merge two lists of statements into one :return: list of merged statements """ if stmsA is None and stmsB is None: return None tmp = [] a_it = iter(stmsA) b_it = iter(stmsB) a = None b = None a_empty = False b_empty = False while not a_empty and not b_empty: while not a_empty: a = next(a_it, None) if a is None: a_empty = True break elif a.rank == 0: # simple statement does not require merging tmp.append(a) a = None else: break while not b_empty: b = next(b_it, None) if b is None: b_empty = True break elif b.rank == 0: # simple statement does not require merging tmp.append(b) b = None else: break if a is not None or b is not None: a._merge_with_other_stm(b) tmp.append(a) a = None b = None return tmp
Merge two lists of statements into one :return: list of merged statements
def get_new_call(group_name, app_name, search_path, filename, require_load, version, secure): # type: (str, str, Optional[str], str, bool, Optional[str], bool) -> str ''' Build a call to use the new ``get_config`` function from args passed to ``Config.__init__``. ''' new_call_kwargs = { 'group_name': group_name, 'filename': filename } # type: Dict[str, Any] new_call_lookup_options = {} # type: Dict[str, Any] new_call_lookup_options['secure'] = secure if search_path: new_call_lookup_options['search_path'] = search_path if require_load: new_call_lookup_options['require_load'] = require_load if version: new_call_lookup_options['version'] = version if new_call_lookup_options: new_call_kwargs['lookup_options'] = new_call_lookup_options output = build_call_str('get_config', (app_name,), new_call_kwargs) return output
Build a call to use the new ``get_config`` function from args passed to ``Config.__init__``.
def from_rgb(cls, r: int, g: int, b: int) -> 'ColorCode': """ Return a ColorCode from a RGB tuple. """ c = cls() c._init_rgb(r, g, b) return c
Return a ColorCode from a RGB tuple.
def verify(self, type_): """ Check whether a type implements ``self``. Parameters ---------- type_ : type The type to check. Raises ------ TypeError If ``type_`` doesn't conform to our interface. Returns ------- None """ raw_missing, mistyped, mismatched = self._diff_signatures(type_) # See if we have defaults for missing methods. missing = [] defaults_to_use = {} for name in raw_missing: try: defaults_to_use[name] = self._defaults[name].implementation except KeyError: missing.append(name) if not any((missing, mistyped, mismatched)): return defaults_to_use raise self._invalid_implementation(type_, missing, mistyped, mismatched)
Check whether a type implements ``self``. Parameters ---------- type_ : type The type to check. Raises ------ TypeError If ``type_`` doesn't conform to our interface. Returns ------- None
def pix2canvas(self, pt): """Takes a 2-tuple of (x, y) in window coordinates and gives the (cx, cy, cz) coordinates on the canvas. """ x, y = pt[:2] #print('p2c in', x, y) mm = gl.glGetDoublev(gl.GL_MODELVIEW_MATRIX) pm = gl.glGetDoublev(gl.GL_PROJECTION_MATRIX) vp = gl.glGetIntegerv(gl.GL_VIEWPORT) win_x, win_y = float(x), float(vp[3] - y) win_z = gl.glReadPixels(int(win_x), int(win_y), 1, 1, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT) pos = glu.gluUnProject(win_x, win_y, win_z, mm, pm, vp) #print('out', pos) return pos
Takes a 2-tuple of (x, y) in window coordinates and gives the (cx, cy, cz) coordinates on the canvas.
def get_parser(): """Return the parser object for this script.""" from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("-m", "--model", dest="model", help="where is the model folder (with a info.yml)?", metavar="FOLDER", type=lambda x: utils.is_valid_folder(parser, x), default=utils.default_model()) return parser
Return the parser object for this script.
def real_time_statistics(self): """ Access the real_time_statistics :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsList :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsList """ if self._real_time_statistics is None: self._real_time_statistics = TaskQueueRealTimeStatisticsList( self._version, workspace_sid=self._solution['workspace_sid'], task_queue_sid=self._solution['sid'], ) return self._real_time_statistics
Access the real_time_statistics :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsList :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsList
def from_cmdstan( posterior=None, *, posterior_predictive=None, prior=None, prior_predictive=None, observed_data=None, observed_data_var=None, log_likelihood=None, coords=None, dims=None ): """Convert CmdStan data into an InferenceData object. Parameters ---------- posterior : List[str] List of paths to output.csv files. CSV file can be stacked csv containing all the chains cat output*.csv > combined_output.csv posterior_predictive : str, List[Str] Posterior predictive samples for the fit. If endswith ".csv" assumes file. prior : List[str] List of paths to output.csv files CSV file can be stacked csv containing all the chains. cat output*.csv > combined_output.csv prior_predictive : str, List[Str] Prior predictive samples for the fit. If endswith ".csv" assumes file. observed_data : str Observed data used in the sampling. Path to data file in Rdump format. observed_data_var : str, List[str] Variable(s) used for slicing observed_data. If not defined, all data variables are imported. log_likelihood : str Pointwise log_likelihood for the data. coords : dict[str, iterable] A dictionary containing the values that are used as index. The key is the name of the dimension, the values are the index values. dims : dict[str, List(str)] A mapping from variables to a list of coordinate names for the variable. Returns ------- InferenceData object """ return CmdStanConverter( posterior=posterior, posterior_predictive=posterior_predictive, prior=prior, prior_predictive=prior_predictive, observed_data=observed_data, observed_data_var=observed_data_var, log_likelihood=log_likelihood, coords=coords, dims=dims, ).to_inference_data()
Convert CmdStan data into an InferenceData object. Parameters ---------- posterior : List[str] List of paths to output.csv files. CSV file can be stacked csv containing all the chains cat output*.csv > combined_output.csv posterior_predictive : str, List[Str] Posterior predictive samples for the fit. If endswith ".csv" assumes file. prior : List[str] List of paths to output.csv files CSV file can be stacked csv containing all the chains. cat output*.csv > combined_output.csv prior_predictive : str, List[Str] Prior predictive samples for the fit. If endswith ".csv" assumes file. observed_data : str Observed data used in the sampling. Path to data file in Rdump format. observed_data_var : str, List[str] Variable(s) used for slicing observed_data. If not defined, all data variables are imported. log_likelihood : str Pointwise log_likelihood for the data. coords : dict[str, iterable] A dictionary containing the values that are used as index. The key is the name of the dimension, the values are the index values. dims : dict[str, List(str)] A mapping from variables to a list of coordinate names for the variable. Returns ------- InferenceData object
def get_path(self, path, query=None): """Make a GET request, optionally including a query, to a relative path. The path of the request includes a path on top of the base URL assigned to the endpoint. Parameters ---------- path : str The path to request, relative to the endpoint query : DataQuery, optional The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request See Also -------- get_query, get, url_path """ return self.get(self.url_path(path), query)
Make a GET request, optionally including a query, to a relative path. The path of the request includes a path on top of the base URL assigned to the endpoint. Parameters ---------- path : str The path to request, relative to the endpoint query : DataQuery, optional The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request See Also -------- get_query, get, url_path
def mapfi(ol,map_func_args,**kwargs): ''' #mapfi 共享相同的o,v不作为map_func参数 # share common other_args,NOT take value as a param for map_func #map_func diff_func(index,*common_args) ''' diff_funcs_arr = kwargs['map_funcs'] lngth = ol.__len__() rslt = [] for i in range(0,lngth): index = i value = ol[i] func = diff_funcs_arr[i] args = map_func_args ele = func(index,*args) rslt.append(ele) return(rslt)
#mapfi 共享相同的o,v不作为map_func参数 # share common other_args,NOT take value as a param for map_func #map_func diff_func(index,*common_args)
def update(): # type: () -> None """ Update the feature with updates committed to develop. This will merge current develop into the current branch. """ branch = git.current_branch(refresh=True) develop = conf.get('git.devel_branch', 'develop') common.assert_branch_type('feature') common.git_checkout(develop) common.git_pull(develop) common.git_checkout(branch.name) common.git_merge(branch.name, develop)
Update the feature with updates committed to develop. This will merge current develop into the current branch.
def copyText( self ): """ Copies the selected text to the clipboard. """ view = self.currentWebView() QApplication.clipboard().setText(view.page().selectedText())
Copies the selected text to the clipboard.
def setupTable_head(self): """ Make the head table. **This should not be called externally.** Subclasses may override or supplement this method to handle the table creation in a different way if desired. """ if "head" not in self.tables: return self.otf["head"] = head = newTable("head") font = self.ufo head.checkSumAdjustment = 0 head.tableVersion = 1.0 head.magicNumber = 0x5F0F3CF5 # version numbers # limit minor version to 3 digits as recommended in OpenType spec: # https://www.microsoft.com/typography/otspec/recom.htm versionMajor = getAttrWithFallback(font.info, "versionMajor") versionMinor = getAttrWithFallback(font.info, "versionMinor") fullFontRevision = float("%d.%03d" % (versionMajor, versionMinor)) head.fontRevision = round(fullFontRevision, 3) if head.fontRevision != fullFontRevision: logger.warning( "Minor version in %s has too many digits and won't fit into " "the head table's fontRevision field; rounded to %s.", fullFontRevision, head.fontRevision) # upm head.unitsPerEm = otRound(getAttrWithFallback(font.info, "unitsPerEm")) # times head.created = dateStringToTimeValue(getAttrWithFallback(font.info, "openTypeHeadCreated")) - mac_epoch_diff head.modified = dateStringToTimeValue(dateStringForNow()) - mac_epoch_diff # bounding box xMin, yMin, xMax, yMax = self.fontBoundingBox head.xMin = otRound(xMin) head.yMin = otRound(yMin) head.xMax = otRound(xMax) head.yMax = otRound(yMax) # style mapping styleMapStyleName = getAttrWithFallback(font.info, "styleMapStyleName") macStyle = [] if styleMapStyleName == "bold": macStyle = [0] elif styleMapStyleName == "bold italic": macStyle = [0, 1] elif styleMapStyleName == "italic": macStyle = [1] head.macStyle = intListToNum(macStyle, 0, 16) # misc head.flags = intListToNum(getAttrWithFallback(font.info, "openTypeHeadFlags"), 0, 16) head.lowestRecPPEM = otRound(getAttrWithFallback(font.info, "openTypeHeadLowestRecPPEM")) head.fontDirectionHint = 2 head.indexToLocFormat = 0 head.glyphDataFormat = 0
Make the head table. **This should not be called externally.** Subclasses may override or supplement this method to handle the table creation in a different way if desired.
def reserve(self, doc): """Reserve a DOI (amounts to upload metadata, but not to mint). :param doc: Set metadata for DOI. :returns: `True` if is reserved successfully. """ # Only registered PIDs can be updated. try: self.pid.reserve() self.api.metadata_post(doc) except (DataCiteError, HttpError): logger.exception("Failed to reserve in DataCite", extra=dict(pid=self.pid)) raise logger.info("Successfully reserved in DataCite", extra=dict(pid=self.pid)) return True
Reserve a DOI (amounts to upload metadata, but not to mint). :param doc: Set metadata for DOI. :returns: `True` if is reserved successfully.
def get_current_structure(self): """ Returns a dictionary with model field objects. :return: dict """ struct = self.__class__.get_structure() struct.update(self.__field_types__) return struct
Returns a dictionary with model field objects. :return: dict
def ticket_tags(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/tags#show-tags" api_path = "/api/v2/tickets/{id}/tags.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/tags#show-tags
def _estimate_centers_widths( self, unique_R, inds, X, W, init_centers, init_widths, template_centers, template_widths, template_centers_mean_cov, template_widths_mean_var_reci): """Estimate centers and widths Parameters ---------- unique_R : a list of array, Each element contains unique value in one dimension of coordinate matrix R. inds : a list of array, Each element contains the indices to reconstruct one dimension of original cooridnate matrix from the unique array. X : 2D array, with shape [n_voxel, n_tr] fMRI data from one subject. W : 2D array, with shape [K, n_tr] The weight matrix. init_centers : 2D array, with shape [K, n_dim] The initial values of centers. init_widths : 1D array The initial values of widths. template_centers: 1D array The template prior on centers template_widths: 1D array The template prior on widths template_centers_mean_cov: 2D array, with shape [K, cov_size] The template prior on centers' mean template_widths_mean_var_reci: 1D array The reciprocal of template prior on variance of widths' mean Returns ------- final_estimate.x: 1D array The newly estimated centers and widths. final_estimate.cost: float The cost value. """ # least_squares only accept x in 1D format init_estimate = np.hstack( (init_centers.ravel(), init_widths.ravel())) # .copy() data_sigma = 1.0 / math.sqrt(2.0) * np.std(X) final_estimate = least_squares( self._residual_multivariate, init_estimate, args=( unique_R, inds, X, W, template_centers, template_widths, template_centers_mean_cov, template_widths_mean_var_reci, data_sigma), method=self.nlss_method, loss=self.nlss_loss, bounds=self.bounds, verbose=0, x_scale=self.x_scale, tr_solver=self.tr_solver) return final_estimate.x, final_estimate.cost
Estimate centers and widths Parameters ---------- unique_R : a list of array, Each element contains unique value in one dimension of coordinate matrix R. inds : a list of array, Each element contains the indices to reconstruct one dimension of original cooridnate matrix from the unique array. X : 2D array, with shape [n_voxel, n_tr] fMRI data from one subject. W : 2D array, with shape [K, n_tr] The weight matrix. init_centers : 2D array, with shape [K, n_dim] The initial values of centers. init_widths : 1D array The initial values of widths. template_centers: 1D array The template prior on centers template_widths: 1D array The template prior on widths template_centers_mean_cov: 2D array, with shape [K, cov_size] The template prior on centers' mean template_widths_mean_var_reci: 1D array The reciprocal of template prior on variance of widths' mean Returns ------- final_estimate.x: 1D array The newly estimated centers and widths. final_estimate.cost: float The cost value.
def run(self): """Run thread to listen for jobs and reschedule successful ones.""" try: self.listen() except Exception as e: logger.critical("JobListener instence crashed. Error: %s", str(e)) logger.critical(traceback.format_exc())
Run thread to listen for jobs and reschedule successful ones.
def merge_tops(self, tops): ''' Cleanly merge the top files ''' top = collections.defaultdict(OrderedDict) orders = collections.defaultdict(OrderedDict) for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): if saltenv == 'include': continue for tgt in targets: matches = [] states = OrderedDict() orders[saltenv][tgt] = 0 ignore_missing = False # handle a pillar sls target written in shorthand form if isinstance(ctop[saltenv][tgt], six.string_types): ctop[saltenv][tgt] = [ctop[saltenv][tgt]] for comp in ctop[saltenv][tgt]: if isinstance(comp, dict): if 'match' in comp: matches.append(comp) if 'order' in comp: order = comp['order'] if not isinstance(order, int): try: order = int(order) except ValueError: order = 0 orders[saltenv][tgt] = order if comp.get('ignore_missing', False): ignore_missing = True if isinstance(comp, six.string_types): states[comp] = True if ignore_missing: if saltenv not in self.ignored_pillars: self.ignored_pillars[saltenv] = [] self.ignored_pillars[saltenv].extend(states.keys()) top[saltenv][tgt] = matches top[saltenv][tgt].extend(states) return self.sort_top_targets(top, orders)
Cleanly merge the top files
def _compile_models(models): """ Convert ``models`` into a list of tasks. Each task is tuple ``(name, data)`` where ``name`` indicates the task task and ``data`` is the relevant data for that task. Supported tasks and data: - ``'fit'`` and list of models - ``'update-kargs'`` and ``None`` - ``'update-prior'`` and ``None`` - ``'wavg'`` and number of (previous) fits to average """ tasklist = [] for m in models: if isinstance(m, MultiFitterModel): tasklist += [('fit', [m])] tasklist += [('update-prior', None)] elif hasattr(m, 'keys'): tasklist += [('update-kargs', m)] elif isinstance(m, tuple): tasklist += [('fit', list(m))] tasklist += [('update-prior', None)] elif isinstance(m, list): for sm in m: if isinstance(sm, MultiFitterModel): tasklist += [('fit', [sm])] elif isinstance(sm, tuple): tasklist += [('fit', list(sm))] else: raise ValueError( 'type {} not allowed in sublists '.format( str(type(sm)) ) ) tasklist += [('wavg', len(m))] tasklist += [('update-prior', None)] else: raise RuntimeError('bad model list') return tasklist
Convert ``models`` into a list of tasks. Each task is tuple ``(name, data)`` where ``name`` indicates the task task and ``data`` is the relevant data for that task. Supported tasks and data: - ``'fit'`` and list of models - ``'update-kargs'`` and ``None`` - ``'update-prior'`` and ``None`` - ``'wavg'`` and number of (previous) fits to average
def polygon(self): '''return a polygon for the fence''' points = [] for fp in self.points[1:]: points.append((fp.lat, fp.lng)) return points
return a polygon for the fence
def backoff( max_tries=constants.BACKOFF_DEFAULT_MAXTRIES, delay=constants.BACKOFF_DEFAULT_DELAY, factor=constants.BACKOFF_DEFAULT_FACTOR, exceptions=None): """Implements an exponential backoff decorator which will retry decorated function upon given exceptions. This implementation is based on `Retry <https://wiki.python.org/moin/PythonDecoratorLibrary#Retry>`_ from the *Python Decorator Library*. :param int max_tries: Number of tries before give up. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_MAXTRIES`. :param int delay: Delay between retries (in seconds). Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_DELAY`. :param int factor: Multiply factor in which delay will be increased for the next retry. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_FACTOR`. :param exceptions: Tuple of exception types to catch that triggers retry. Any exception not listed will break the decorator and retry routines will not run. :type exceptions: tuple[Exception] """ if max_tries <= 0: raise ValueError('Max tries must be greater than 0; got {!r}'.format(max_tries)) if delay <= 0: raise ValueError('Delay must be greater than 0; got {!r}'.format(delay)) if factor <= 1: raise ValueError('Backoff factor must be greater than 1; got {!r}'.format(factor)) def outter(f): def inner(*args, **kwargs): m_max_tries, m_delay = max_tries, delay # make mutable while m_max_tries > 0: try: retval = f(*args, **kwargs) except exceptions: logger.exception('backoff retry for: %r (max_tries=%r, delay=%r, ' 'factor=%r, exceptions=%r)', f, max_tries, delay, factor, exceptions) m_max_tries -= 1 # consume an attempt if m_max_tries <= 0: raise # run out of tries time.sleep(m_delay) # wait... m_delay *= factor # make future wait longer else: # we're done without errors return retval return inner return outter
Implements an exponential backoff decorator which will retry decorated function upon given exceptions. This implementation is based on `Retry <https://wiki.python.org/moin/PythonDecoratorLibrary#Retry>`_ from the *Python Decorator Library*. :param int max_tries: Number of tries before give up. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_MAXTRIES`. :param int delay: Delay between retries (in seconds). Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_DELAY`. :param int factor: Multiply factor in which delay will be increased for the next retry. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_FACTOR`. :param exceptions: Tuple of exception types to catch that triggers retry. Any exception not listed will break the decorator and retry routines will not run. :type exceptions: tuple[Exception]
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FeedbackContext for this FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext """ if self._context is None: self._context = FeedbackContext( self._version, account_sid=self._solution['account_sid'], call_sid=self._solution['call_sid'], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FeedbackContext for this FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
def get_or_create_candidate(self, row, party, race): """ Gets or creates the Candidate object for the given row of AP data. In order to tie with live data, this will synthesize the proper AP candidate id. This function also calls `get_or_create_person` to get a Person object to pass to Django. """ person = self.get_or_create_person(row) id_components = row["id"].split("-") candidate_id = "{0}-{1}".format(id_components[1], id_components[2]) defaults = {"party": party, "incumbent": row.get("incumbent")} if person.last_name == "None of these candidates": candidate_id = "{0}-{1}".format(id_components[0], candidate_id) candidate, created = election.Candidate.objects.update_or_create( person=person, race=race, ap_candidate_id=candidate_id, defaults=defaults, ) return candidate
Gets or creates the Candidate object for the given row of AP data. In order to tie with live data, this will synthesize the proper AP candidate id. This function also calls `get_or_create_person` to get a Person object to pass to Django.
def find_related(self, fullname): """ Return a list of non-stdlib modules that are imported directly or indirectly by `fullname`, plus their parents. This method is like :py:meth:`find_related_imports`, but also recursively searches any modules which are imported by `fullname`. :param fullname: Fully qualified name of an _already imported_ module for which source code can be retrieved :type fullname: str """ stack = [fullname] found = set() while stack: name = stack.pop(0) names = self.find_related_imports(name) stack.extend(set(names).difference(set(found).union(stack))) found.update(names) found.discard(fullname) return sorted(found)
Return a list of non-stdlib modules that are imported directly or indirectly by `fullname`, plus their parents. This method is like :py:meth:`find_related_imports`, but also recursively searches any modules which are imported by `fullname`. :param fullname: Fully qualified name of an _already imported_ module for which source code can be retrieved :type fullname: str
def set_release_description(self, description, **kwargs): """Set the release notes on the tag. If the release doesn't exist yet, it will be created. If it already exists, its description will be updated. Args: description (str): Description of the release. **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server fails to create the release GitlabUpdateError: If the server fails to update the release """ id = self.get_id().replace('/', '%2F') path = '%s/%s/release' % (self.manager.path, id) data = {'description': description} if self.release is None: try: server_data = self.manager.gitlab.http_post(path, post_data=data, **kwargs) except exc.GitlabHttpError as e: raise exc.GitlabCreateError(e.response_code, e.error_message) else: try: server_data = self.manager.gitlab.http_put(path, post_data=data, **kwargs) except exc.GitlabHttpError as e: raise exc.GitlabUpdateError(e.response_code, e.error_message) self.release = server_data
Set the release notes on the tag. If the release doesn't exist yet, it will be created. If it already exists, its description will be updated. Args: description (str): Description of the release. **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server fails to create the release GitlabUpdateError: If the server fails to update the release
def with_metaclass(meta, *bases): """ Create a base class with a metaclass. For example, if you have the metaclass >>> class Meta(type): ... pass Use this as the metaclass by doing >>> from symengine.compatibility import with_metaclass >>> class MyClass(with_metaclass(Meta, object)): ... pass This is equivalent to the Python 2:: class MyClass(object): __metaclass__ = Meta or Python 3:: class MyClass(object, metaclass=Meta): pass That is, the first argument is the metaclass, and the remaining arguments are the base classes. Note that if the base class is just ``object``, you may omit it. >>> MyClass.__mro__ (<class 'MyClass'>, <... 'object'>) >>> type(MyClass) <class 'Meta'> """ class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass("NewBase", None, {})
Create a base class with a metaclass. For example, if you have the metaclass >>> class Meta(type): ... pass Use this as the metaclass by doing >>> from symengine.compatibility import with_metaclass >>> class MyClass(with_metaclass(Meta, object)): ... pass This is equivalent to the Python 2:: class MyClass(object): __metaclass__ = Meta or Python 3:: class MyClass(object, metaclass=Meta): pass That is, the first argument is the metaclass, and the remaining arguments are the base classes. Note that if the base class is just ``object``, you may omit it. >>> MyClass.__mro__ (<class 'MyClass'>, <... 'object'>) >>> type(MyClass) <class 'Meta'>
def _find_v1_settings(self, settings): """Parse a v1 module_settings.json file. V1 is the older file format that requires a modules dictionary with a module_name and modules key that could in theory hold information on multiple modules in a single directory. """ if 'module_name' in settings: modname = settings['module_name'] if 'modules' not in settings or len(settings['modules']) == 0: raise DataError("No modules defined in module_settings.json file") elif len(settings['modules']) > 1: raise DataError("Multiple modules defined in module_settings.json file", modules=[x for x in settings['modules']]) else: modname = list(settings['modules'])[0] if modname not in settings['modules']: raise DataError("Module name does not correspond with an entry in the modules directory", name=modname, modules=[x for x in settings['modules']]) release_info = self._load_release_info(settings) modsettings = settings['modules'][modname] architectures = settings.get('architectures', {}) target_defs = settings.get('module_targets', {}) targets = target_defs.get(modname, []) return TileInfo(modname, modsettings, architectures, targets, release_info)
Parse a v1 module_settings.json file. V1 is the older file format that requires a modules dictionary with a module_name and modules key that could in theory hold information on multiple modules in a single directory.
def _create(self): """Create new callback resampler.""" from samplerate.lowlevel import ffi, src_callback_new, src_delete from samplerate.exceptions import ResamplingError state, handle, error = src_callback_new( self._callback, self._converter_type.value, self._channels) if error != 0: raise ResamplingError(error) self._state = ffi.gc(state, src_delete) self._handle = handle
Create new callback resampler.
def setcontents(source, identifier, pointer): """Patch existing bibliographic record.""" record = Record.get_record(identifier) Document(record, pointer).setcontents(source)
Patch existing bibliographic record.
def get_nn_info(self, structure, n): """ Get all near-neighbor sites as well as the associated image locations and weights of the site with index n using the closest relative neighbor distance-based method with VIRE atomic/ionic radii. Args: structure (Structure): input structure. n (integer): index of site for which to determine near neighbors. Returns: siw (list of tuples (Site, array, float)): tuples, each one of which represents a neighbor site, its image location, and its weight. """ vire = ValenceIonicRadiusEvaluator(structure) site = vire.structure[n] neighs_dists = vire.structure.get_neighbors(site, self.cutoff) rn = vire.radii[vire.structure[n].species_string] reldists_neighs = [] for neigh, dist in neighs_dists: reldists_neighs.append([dist / ( vire.radii[neigh.species_string] + rn), neigh]) siw = [] min_reldist = min([reldist for reldist, neigh in reldists_neighs]) for reldist, s in reldists_neighs: if reldist < (1.0 + self.tol) * min_reldist: w = min_reldist / reldist siw.append({'site': s, 'image': self._get_image(vire.structure, s), 'weight': w, 'site_index': self._get_original_site( vire.structure, s)}) return siw
Get all near-neighbor sites as well as the associated image locations and weights of the site with index n using the closest relative neighbor distance-based method with VIRE atomic/ionic radii. Args: structure (Structure): input structure. n (integer): index of site for which to determine near neighbors. Returns: siw (list of tuples (Site, array, float)): tuples, each one of which represents a neighbor site, its image location, and its weight.
def convert_table(self, block): """"Converts a table to grid table format""" lines_orig = block.split('\n') lines_orig.pop() # Remove extra newline at end of block widest_cell = [] # Will hold the width of the widest cell for each column widest_word = [] # Will hold the width of the widest word for each column widths = [] # Will hold the computed widths of grid table columns rows = [] # Will hold table cells during processing lines = [] # Will hold the finished table has_border = False # Will be set to True if this is a bordered table width_unit = 0.0 # This number is used to divide up self.width according # to the following formula: # # self.width = width_unit * maxwidth # # Where maxwidth is the sum over all elements of # widest_cell. # Only process tables, leave everything else untouched if not self.test(None, block): return lines_orig if lines_orig[0].startswith('|'): has_border = True # Initialize width arrays for i in range(0, len(self._split_row(lines_orig[0], has_border))): widest_cell.append(0) widest_word.append(0) widths.append(0) # Parse lines into array of cells and record width of widest cell/word for line in lines_orig: row = self._split_row(line, has_border) # pad widest_cell to account for under length first row for i in range(0, len(row) - len(widest_cell)): widest_cell.append(0) widest_word.append(0) widths.append(0) for i in range(0, len(row)): # Record cell width if len(row[i]) > widest_cell[i]: widest_cell[i] = len(row[i]) # Record longest word words = row[i].split() for word in words: # Keep URLs from throwing the word length count off too badly. match = re.match(r'\[(.*?)\]\(.*?\)', word) if match: word = match.group(1) if len(word) > widest_word[i]: widest_word[i] = len(word) rows.append(row) # Remove table header divider line from rows rows.pop(1) # Compute first approximation of column widths based on maximum cell width for width in widest_cell: width_unit += float(width) width_unit = self.width / width_unit for i in range(0, len(widest_cell)): widths[i] = int(widest_cell[i] * width_unit) # Add rounding errors to narrowest column if sum(widths) < self.width: widths[widths.index(min(widths))] += self.width - sum(widths) # Attempt to correct first approximation of column widths based on # words that fail to fit their cell's width (if this fails textwrap # will break up long words but since it does not add hyphens this # should be avoided) for i in range(0, len(widths)): if widths[i] < widest_word[i]: offset = widest_word[i] - widths[i] for j in range(0, len(widths)): if widths[j] - widest_word[j] >= offset: widths[j] -= offset widths[i] += offset offset = 0 lines.append(self.ruler_line(widths, linetype='-')) # Only add header row if it contains more than just whitespace if ''.join(rows[0]).strip() != '': lines.extend(self.wrap_row(widths, rows[0])) lines.append(self.ruler_line(widths, linetype='=')) for row in rows[1:]: # Skip empty rows if ''.join(row).strip() == '': continue lines.extend(self.wrap_row(widths, row)) lines.append(self.ruler_line(widths, linetype='-')) # Append empty line after table lines.append('') return lines
Converts a table to grid table format
def dev_null_wrapper(func, *a, **kwargs): """ Temporarily swap stdout with /dev/null, and execute given function while stdout goes to /dev/null. This is useful because netsnmp writes to stdout and disturbes Icinga result in some cases. """ os.dup2(dev_null, sys.stdout.fileno()) return_object = func(*a, **kwargs) sys.stdout.flush() os.dup2(tmp_stdout, sys.stdout.fileno()) return return_object
Temporarily swap stdout with /dev/null, and execute given function while stdout goes to /dev/null. This is useful because netsnmp writes to stdout and disturbes Icinga result in some cases.
def prepare(self): """Behaves like a middleware between raw request and handling process, If `PREPARES` is defined on handler class, which should be a list, for example, ['auth', 'context'], method whose name is constitute by prefix '_prepare_' and string in this list will be executed by sequence. In this example, those methods are `_prepare_auth` and `_prepare_context` """ if settings['LOG_REQUEST']: log_request(self) for i in self.PREPARES: getattr(self, 'prepare_' + i)() if self._finished: return
Behaves like a middleware between raw request and handling process, If `PREPARES` is defined on handler class, which should be a list, for example, ['auth', 'context'], method whose name is constitute by prefix '_prepare_' and string in this list will be executed by sequence. In this example, those methods are `_prepare_auth` and `_prepare_context`
def randstr(self): """ -> #str result of :func:gen_rand_str """ return gen_rand_str( 4, 10, use=self.random, keyspace=list(string.ascii_letters))
-> #str result of :func:gen_rand_str
def ceilpow2(n): """convenience function to determine a power-of-2 upper frequency limit""" signif,exponent = frexp(n) if (signif < 0): return 1; if (signif == 0.5): exponent -= 1; return (1) << exponent;
convenience function to determine a power-of-2 upper frequency limit
def lint(filename, options=()): """Pylint the given file. When run from emacs we will be in the directory of a file, and passed its filename. If this file is part of a package and is trying to import other modules from within its own package or another package rooted in a directory below it, pylint will classify it as a failed import. To get around this, we traverse down the directory tree to find the root of the package this module is in. We then invoke pylint from this directory. Finally, we must correct the filenames in the output generated by pylint so Emacs doesn't become confused (it will expect just the original filename, while pylint may extend it with extra directories if we've traversed down the tree) """ # traverse downwards until we are out of a python package full_path = osp.abspath(filename) parent_path = osp.dirname(full_path) child_path = osp.basename(full_path) while parent_path != "/" and osp.exists(osp.join(parent_path, "__init__.py")): child_path = osp.join(osp.basename(parent_path), child_path) parent_path = osp.dirname(parent_path) # Start pylint # Ensure we use the python and pylint associated with the running epylint run_cmd = "import sys; from pylint.lint import Run; Run(sys.argv[1:])" cmd = ( [sys.executable, "-c", run_cmd] + [ "--msg-template", "{path}:{line}: {category} ({msg_id}, {symbol}, {obj}) {msg}", "-r", "n", child_path, ] + list(options) ) process = Popen( cmd, stdout=PIPE, cwd=parent_path, env=_get_env(), universal_newlines=True ) for line in process.stdout: # remove pylintrc warning if line.startswith("No config file found"): continue # modify the file name thats output to reverse the path traversal we made parts = line.split(":") if parts and parts[0] == child_path: line = ":".join([filename] + parts[1:]) print(line, end=" ") process.wait() return process.returncode
Pylint the given file. When run from emacs we will be in the directory of a file, and passed its filename. If this file is part of a package and is trying to import other modules from within its own package or another package rooted in a directory below it, pylint will classify it as a failed import. To get around this, we traverse down the directory tree to find the root of the package this module is in. We then invoke pylint from this directory. Finally, we must correct the filenames in the output generated by pylint so Emacs doesn't become confused (it will expect just the original filename, while pylint may extend it with extra directories if we've traversed down the tree)
def _start_element (self, tag, attrs, end): """ Print HTML element with end string. @param tag: tag name @type tag: string @param attrs: tag attributes @type attrs: dict @param end: either > or /> @type end: string @return: None """ tag = tag.encode(self.encoding, "ignore") self.fd.write("<%s" % tag.replace("/", "")) for key, val in attrs.items(): key = key.encode(self.encoding, "ignore") if val is None: self.fd.write(" %s" % key) else: val = val.encode(self.encoding, "ignore") self.fd.write(' %s="%s"' % (key, quote_attrval(val))) self.fd.write(end)
Print HTML element with end string. @param tag: tag name @type tag: string @param attrs: tag attributes @type attrs: dict @param end: either > or /> @type end: string @return: None
def cee_map_priority_table_map_cos1_pgid(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map") name_key = ET.SubElement(cee_map, "name") name_key.text = kwargs.pop('name') priority_table = ET.SubElement(cee_map, "priority-table") map_cos1_pgid = ET.SubElement(priority_table, "map-cos1-pgid") map_cos1_pgid.text = kwargs.pop('map_cos1_pgid') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def members(self): """ Access the members :returns: twilio.rest.chat.v1.service.channel.member.MemberList :rtype: twilio.rest.chat.v1.service.channel.member.MemberList """ if self._members is None: self._members = MemberList( self._version, service_sid=self._solution['service_sid'], channel_sid=self._solution['sid'], ) return self._members
Access the members :returns: twilio.rest.chat.v1.service.channel.member.MemberList :rtype: twilio.rest.chat.v1.service.channel.member.MemberList
def get_folders(self): """Return list of user's folders. :rtype: list """ path = 'folders/list' response = self.request(path) items = response['data'] folders = [] for item in items: if item.get('type') == 'error': raise Exception(item.get('message')) elif item.get('type') == 'folder': folders.append(Folder(self, **item)) return folders
Return list of user's folders. :rtype: list
def _set_offset_base1(self, v, load=False): """ Setter method for offset_base1, mapped from YANG variable /uda_key/profile/uda_profile_offsets/offset_base1 (uda-offset-base-type) If this variable is read-only (config: false) in the source YANG file, then _set_offset_base1 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_offset_base1() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'first-header': {'value': 1}, u'packet-start': {'value': 0}, u'fourth-header': {'value': 4}, u'second-header': {'value': 2}, u'third-header': {'value': 3}},), is_leaf=True, yang_name="offset-base1", rest_name="offset-base1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None, u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='uda-offset-base-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """offset_base1 must be of a type compatible with uda-offset-base-type""", 'defined-type': "brocade-uda-access-list:uda-offset-base-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'first-header': {'value': 1}, u'packet-start': {'value': 0}, u'fourth-header': {'value': 4}, u'second-header': {'value': 2}, u'third-header': {'value': 3}},), is_leaf=True, yang_name="offset-base1", rest_name="offset-base1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None, u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-uda-access-list', defining_module='brocade-uda-access-list', yang_type='uda-offset-base-type', is_config=True)""", }) self.__offset_base1 = t if hasattr(self, '_set'): self._set()
Setter method for offset_base1, mapped from YANG variable /uda_key/profile/uda_profile_offsets/offset_base1 (uda-offset-base-type) If this variable is read-only (config: false) in the source YANG file, then _set_offset_base1 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_offset_base1() directly.
def setBackground(self,bg): """ Sets the background of the Container. Similar to :py:meth:`peng3d.gui.SubMenu.setBackground()`\ , but only effects the region covered by the Container. """ self.bg = bg if isinstance(bg,list) or isinstance(bg,tuple): if len(bg)==3 and isinstance(bg,list): bg.append(255) self.bg_vlist.colors = bg*4 elif bg in ["flat","gradient","oldshadow","material"]: self.bg = ContainerButtonBackground(self,borderstyle=bg,batch=self.batch2d) self.redraw()
Sets the background of the Container. Similar to :py:meth:`peng3d.gui.SubMenu.setBackground()`\ , but only effects the region covered by the Container.
def check_style(value): """ Validate a logging format style. :param value: The logging format style to validate (any value). :returns: The logging format character (a string of one character). :raises: :exc:`~exceptions.ValueError` when the given style isn't supported. On Python 3.2+ this function accepts the logging format styles ``%``, ``{`` and ``$`` while on older versions only ``%`` is accepted (because older Python versions don't support alternative logging format styles). """ if sys.version_info[:2] >= (3, 2): if value not in FORMAT_STYLE_PATTERNS: msg = "Unsupported logging format style! (%r)" raise ValueError(format(msg, value)) elif value != DEFAULT_FORMAT_STYLE: msg = "Format string styles other than %r require Python 3.2+!" raise ValueError(msg, DEFAULT_FORMAT_STYLE) return value
Validate a logging format style. :param value: The logging format style to validate (any value). :returns: The logging format character (a string of one character). :raises: :exc:`~exceptions.ValueError` when the given style isn't supported. On Python 3.2+ this function accepts the logging format styles ``%``, ``{`` and ``$`` while on older versions only ``%`` is accepted (because older Python versions don't support alternative logging format styles).
def detect_complexity(bam_in, genome, out): """ genome coverage of small RNA """ if not genome: logger.info("No genome given. skipping.") return None out_file = op.join(out, op.basename(bam_in) + "_cov.tsv") if file_exists(out_file): return None fai = genome + ".fai" cov = pybedtools.BedTool(bam_in).genome_coverage(g=fai, max=1) cov.saveas(out_file) total = 0 for region in cov: if region[0] == "genome" and int(region[1]) != 0: total += float(region[4]) logger.info("Total genome with sequences: %s " % total)
genome coverage of small RNA
def sign_decorated(self, data): """Sign a bytes-like object and return the decorated signature. Sign a bytes-like object by signing the data using the signing (private) key, and return a decorated signature, which includes the last four bytes of the public key as a signature hint to go along with the signature as an XDR DecoratedSignature object. :param bytes data: A sequence of bytes to sign, typically a transaction. """ signature = self.sign(data) hint = self.signature_hint() return Xdr.types.DecoratedSignature(hint, signature)
Sign a bytes-like object and return the decorated signature. Sign a bytes-like object by signing the data using the signing (private) key, and return a decorated signature, which includes the last four bytes of the public key as a signature hint to go along with the signature as an XDR DecoratedSignature object. :param bytes data: A sequence of bytes to sign, typically a transaction.
def lookup_ids(self, keys): """Lookup the integer ID associated with each (namespace, key) in the keys list""" keys_len = len(keys) ids = {namespace_key: None for namespace_key in keys} start = 0 bulk_insert = self.bulk_insert query = 'SELECT namespace, key, id FROM gauged_keys WHERE ' check = '(namespace = %s AND key = %s) ' cursor = self.cursor execute = cursor.execute while start < keys_len: rows = keys[start:start+bulk_insert] params = [param for params in rows for param in params] id_query = query + (check + ' OR ') * (len(rows) - 1) + check execute(id_query, params) for namespace, key, id_ in cursor: ids[(namespace, key)] = id_ start += bulk_insert return ids
Lookup the integer ID associated with each (namespace, key) in the keys list
def column_coordinates(self, X): """The column principal coordinates.""" utils.validation.check_is_fitted(self, 'V_') _, _, _, col_names = util.make_labels_and_names(X) if isinstance(X, pd.SparseDataFrame): X = X.to_coo() elif isinstance(X, pd.DataFrame): X = X.to_numpy() if self.copy: X = X.copy() # Transpose and make sure the rows sum up to 1 if isinstance(X, np.ndarray): X = X.T / X.T.sum(axis=1)[:, None] else: X = X.T / X.T.sum(axis=1) return pd.DataFrame( data=X @ sparse.diags(self.row_masses_.to_numpy() ** -0.5) @ self.U_, index=col_names )
The column principal coordinates.
def from_frame(klass, frame, connection): """ Create a new BuildStateChange event from a Stompest Frame. """ event = frame.headers['new'] data = json.loads(frame.body) info = data['info'] build = Build.fromDict(info) build.connection = connection return klass(build, event)
Create a new BuildStateChange event from a Stompest Frame.
def _buildvgrid(self,R,phi,nsigma,t,sigmaR1,sigmaT1,meanvR,meanvT, gridpoints,print_progress,integrate_method,deriv): """Internal function to grid the vDF at a given location""" out= evolveddiskdfGrid() out.sigmaR1= sigmaR1 out.sigmaT1= sigmaT1 out.meanvR= meanvR out.meanvT= meanvT out.vRgrid= nu.linspace(meanvR-nsigma*sigmaR1,meanvR+nsigma*sigmaR1, gridpoints) out.vTgrid= nu.linspace(meanvT-nsigma*sigmaT1,meanvT+nsigma*sigmaT1, gridpoints) if isinstance(t,(list,nu.ndarray)): nt= len(t) out.df= nu.zeros((gridpoints,gridpoints,nt)) for ii in range(gridpoints): for jj in range(gridpoints-1,-1,-1):#Reverse, so we get the peak before we get to the extreme lags NOT NECESSARY if print_progress: #pragma: no cover sys.stdout.write('\r'+"Velocity gridpoint %i out of %i" % \ (jj+ii*gridpoints+1,gridpoints*gridpoints)) sys.stdout.flush() thiso= Orbit([R,out.vRgrid[ii],out.vTgrid[jj],phi]) out.df[ii,jj,:]= self(thiso,nu.array(t).flatten(), integrate_method=integrate_method, deriv=deriv,use_physical=False) out.df[ii,jj,nu.isnan(out.df[ii,jj,:])]= 0. #BOVY: for now if print_progress: sys.stdout.write('\n') #pragma: no cover else: out.df= nu.zeros((gridpoints,gridpoints)) for ii in range(gridpoints): for jj in range(gridpoints): if print_progress: #pragma: no cover sys.stdout.write('\r'+"Velocity gridpoint %i out of %i" % \ (jj+ii*gridpoints+1,gridpoints*gridpoints)) sys.stdout.flush() thiso= Orbit([R,out.vRgrid[ii],out.vTgrid[jj],phi]) out.df[ii,jj]= self(thiso,t, integrate_method=integrate_method, deriv=deriv,use_physical=False) if nu.isnan(out.df[ii,jj]): out.df[ii,jj]= 0. #BOVY: for now if print_progress: sys.stdout.write('\n') #pragma: no cover return out
Internal function to grid the vDF at a given location
def routes(family=None): ''' Return currently configured routes from routing table .. versionchanged:: 2015.8.0 Added support for SunOS (Solaris 10, Illumos, SmartOS) .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' network.routes ''' if family != 'inet' and family != 'inet6' and family is not None: raise CommandExecutionError('Invalid address family {0}'.format(family)) if __grains__['kernel'] == 'Linux': if not salt.utils.path.which('netstat'): routes_ = _ip_route_linux() else: routes_ = _netstat_route_linux() elif __grains__['kernel'] == 'SunOS': routes_ = _netstat_route_sunos() elif __grains__['os'] in ['FreeBSD', 'MacOS', 'Darwin']: routes_ = _netstat_route_freebsd() elif __grains__['os'] in ['NetBSD']: routes_ = _netstat_route_netbsd() elif __grains__['os'] in ['OpenBSD']: routes_ = _netstat_route_openbsd() elif __grains__['os'] in ['AIX']: routes_ = _netstat_route_aix() else: raise CommandExecutionError('Not yet supported on this platform') if not family: return routes_ else: ret = [route for route in routes_ if route['addr_family'] == family] return ret
Return currently configured routes from routing table .. versionchanged:: 2015.8.0 Added support for SunOS (Solaris 10, Illumos, SmartOS) .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' network.routes
def report_parsing_problems(parsing_out): """Output message about potential parsing problems.""" _, empty, faulty = parsing_out if CONFIG_FILE in empty or CONFIG_FILE in faulty: print('Unable to read global config file', CONFIG_FILE, file=sys.stderr) print('Please run stagpy config --create', sep='\n', end='\n\n', file=sys.stderr) if CONFIG_LOCAL in faulty: print('Unable to read local config file', CONFIG_LOCAL, file=sys.stderr) print('Please run stagpy config --create_local', sep='\n', end='\n\n', file=sys.stderr)
Output message about potential parsing problems.
def set_locs(self, locs): 'Sets the locations of the ticks' # don't actually use the locs. This is just needed to work with # matplotlib. Force to use vmin, vmax _check_implicitly_registered() self.locs = locs (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) if vi != self.plot_obj.view_interval: self.plot_obj.date_axis_info = None self.plot_obj.view_interval = vi if vmax < vmin: (vmin, vmax) = (vmax, vmin) self._set_default_format(vmin, vmax)
Sets the locations of the ticks
def get(self, key, value): """Get a single record by id Supports resource cache .. versionchanged:: 2.17.0 Added option to retrieve record by tracking_id Keyword Args: id (str): Full record ID tracking_id (str): Record Tracking ID Returns: Record: Matching Record instance returned from API Raises: TypeError: No id argument provided """ if key == 'id': response = self._swimlane.request('get', "app/{0}/record/{1}".format(self._app.id, value)) return Record(self._app, response.json()) if key == 'tracking_id': response = self._swimlane.request('get', "app/{0}/record/tracking/{1}".format(self._app.id, value)) return Record(self._app, response.json())
Get a single record by id Supports resource cache .. versionchanged:: 2.17.0 Added option to retrieve record by tracking_id Keyword Args: id (str): Full record ID tracking_id (str): Record Tracking ID Returns: Record: Matching Record instance returned from API Raises: TypeError: No id argument provided