code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def weighted_mean_and_std(values, weights): """ Returns the weighted average and standard deviation. values, weights -- numpy ndarrays with the same shape. """ average = np.average(values, weights=weights, axis=0) variance = np.dot(weights, (values - average) ** 2) / weights.sum() # Fast and numerically precise return (average, np.sqrt(variance))
Returns the weighted average and standard deviation. values, weights -- numpy ndarrays with the same shape.
def compute_plot_size(plot): """ Computes the size of bokeh models that make up a layout such as figures, rows, columns, widgetboxes and Plot. """ if isinstance(plot, GridBox): ndmapping = NdMapping({(x, y): fig for fig, y, x in plot.children}, kdims=['x', 'y']) cols = ndmapping.groupby('x') rows = ndmapping.groupby('y') width = sum([max([compute_plot_size(f)[0] for f in col]) for col in cols]) height = sum([max([compute_plot_size(f)[1] for f in row]) for row in rows]) return width, height elif isinstance(plot, (Div, ToolbarBox)): # Cannot compute size for Div or ToolbarBox return 0, 0 elif isinstance(plot, (Row, Column, WidgetBox, Tabs)): if not plot.children: return 0, 0 if isinstance(plot, Row) or (isinstance(plot, ToolbarBox) and plot.toolbar_location not in ['right', 'left']): w_agg, h_agg = (np.sum, np.max) elif isinstance(plot, Tabs): w_agg, h_agg = (np.max, np.max) else: w_agg, h_agg = (np.max, np.sum) widths, heights = zip(*[compute_plot_size(child) for child in plot.children]) return w_agg(widths), h_agg(heights) elif isinstance(plot, (Figure, Chart)): if plot.plot_width: width = plot.plot_width else: width = plot.frame_width + plot.min_border_right + plot.min_border_left if plot.plot_height: height = plot.plot_height else: height = plot.frame_height + plot.min_border_bottom + plot.min_border_top return width, height elif isinstance(plot, (Plot, DataTable, Spacer)): return plot.width, plot.height else: return 0, 0
Computes the size of bokeh models that make up a layout such as figures, rows, columns, widgetboxes and Plot.
def create_protocol(self): """Create a new protocol via the :attr:`protocol_factory` This method increase the count of :attr:`sessions` and build the protocol passing ``self`` as the producer. """ self.sessions += 1 protocol = self.protocol_factory(self) protocol.copy_many_times_events(self) return protocol
Create a new protocol via the :attr:`protocol_factory` This method increase the count of :attr:`sessions` and build the protocol passing ``self`` as the producer.
def integrate(self, wavelengths=None, **kwargs): """Perform integration. This uses any analytical integral that the underlying model has (i.e., ``self.model.integral``). If unavailable, it uses the default fall-back integrator set in the ``default_integrator`` configuration item. If wavelengths are provided, flux or throughput is first resampled. This is useful when user wants to integrate at specific end points or use custom spacing; In that case, user can pass in desired sampling array generated with :func:`numpy.linspace`, :func:`numpy.logspace`, etc. If not provided, then `waveset` is used. Parameters ---------- wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None` Wavelength values for integration. If not a Quantity, assumed to be in Angstrom. If `None`, `waveset` is used. kwargs : dict Optional keywords to ``__call__`` for sampling. Returns ------- result : `~astropy.units.quantity.Quantity` Integrated result. Raises ------ NotImplementedError Invalid default integrator. synphot.exceptions.SynphotError `waveset` is needed but undefined or cannot integrate natively in the given ``flux_unit``. """ # Cannot integrate per Hz units natively across wavelength # without converting them to per Angstrom unit first, so # less misleading to just disallow that option for now. if 'flux_unit' in kwargs: self._validate_flux_unit(kwargs['flux_unit'], wav_only=True) x = self._validate_wavelengths(wavelengths) # TODO: When astropy.modeling.models supports this, need to # make sure that this actually works, and gives correct unit. # https://github.com/astropy/astropy/issues/5033 # https://github.com/astropy/astropy/pull/5108 try: m = self.model.integral except (AttributeError, NotImplementedError): if conf.default_integrator == 'trapezoid': y = self(x, **kwargs) result = abs(np.trapz(y.value, x=x.value)) result_unit = y.unit else: # pragma: no cover raise NotImplementedError( 'Analytic integral not available and default integrator ' '{0} is not supported'.format(conf.default_integrator)) else: # pragma: no cover start = x[0].value stop = x[-1].value result = (m(stop) - m(start)) result_unit = self._internal_flux_unit # Ensure final unit takes account of integration across wavelength if result_unit != units.THROUGHPUT: if result_unit == units.PHOTLAM: result_unit = u.photon / (u.cm**2 * u.s) elif result_unit == units.FLAM: result_unit = u.erg / (u.cm**2 * u.s) else: # pragma: no cover raise NotImplementedError( 'Integration of {0} is not supported'.format(result_unit)) else: # Ideally flux can use this too but unfortunately this # operation results in confusing output unit for flux. result_unit *= self._internal_wave_unit return result * result_unit
Perform integration. This uses any analytical integral that the underlying model has (i.e., ``self.model.integral``). If unavailable, it uses the default fall-back integrator set in the ``default_integrator`` configuration item. If wavelengths are provided, flux or throughput is first resampled. This is useful when user wants to integrate at specific end points or use custom spacing; In that case, user can pass in desired sampling array generated with :func:`numpy.linspace`, :func:`numpy.logspace`, etc. If not provided, then `waveset` is used. Parameters ---------- wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None` Wavelength values for integration. If not a Quantity, assumed to be in Angstrom. If `None`, `waveset` is used. kwargs : dict Optional keywords to ``__call__`` for sampling. Returns ------- result : `~astropy.units.quantity.Quantity` Integrated result. Raises ------ NotImplementedError Invalid default integrator. synphot.exceptions.SynphotError `waveset` is needed but undefined or cannot integrate natively in the given ``flux_unit``.
async def get_guild_count(self, bot_id: int=None): """This function is a coroutine. Gets a guild count from discordbots.org Parameters ========== bot_id: int[Optional] The bot_id of the bot you want to lookup. Defaults to the Bot provided in Client init Returns ======= stats: dict The guild count and shards of a bot. The date object is returned in a datetime.datetime object """ if bot_id is None: bot_id = self.bot_id return await self.http.get_guild_count(bot_id)
This function is a coroutine. Gets a guild count from discordbots.org Parameters ========== bot_id: int[Optional] The bot_id of the bot you want to lookup. Defaults to the Bot provided in Client init Returns ======= stats: dict The guild count and shards of a bot. The date object is returned in a datetime.datetime object
def pr0_to_likelihood_array(outcomes, pr0): """ Assuming a two-outcome measurement with probabilities given by the array ``pr0``, returns an array of the form expected to be returned by ``likelihood`` method. :param numpy.ndarray outcomes: Array of integers indexing outcomes. :param numpy.ndarray pr0: Array of shape ``(n_models, n_experiments)`` describing the probability of obtaining outcome ``0`` from each set of model parameters and experiment parameters. """ pr0 = pr0[np.newaxis, ...] pr1 = 1 - pr0 if len(np.shape(outcomes)) == 0: outcomes = np.array(outcomes)[None] return np.concatenate([ pr0 if outcomes[idx] == 0 else pr1 for idx in range(safe_shape(outcomes)) ])
Assuming a two-outcome measurement with probabilities given by the array ``pr0``, returns an array of the form expected to be returned by ``likelihood`` method. :param numpy.ndarray outcomes: Array of integers indexing outcomes. :param numpy.ndarray pr0: Array of shape ``(n_models, n_experiments)`` describing the probability of obtaining outcome ``0`` from each set of model parameters and experiment parameters.
def search_grouping(stmt, name): """Search for a grouping in scope First search the hierarchy, then the module and its submodules.""" mod = stmt.i_orig_module while stmt is not None: if name in stmt.i_groupings: g = stmt.i_groupings[name] if (mod is not None and mod != g.i_orig_module and g.i_orig_module.keyword == 'submodule'): # make sure this submodule is included if mod.search_one('include', g.i_orig_module.arg) is None: return None return g stmt = stmt.parent return None
Search for a grouping in scope First search the hierarchy, then the module and its submodules.
def parse_file_name_starting_position(self): """ Returns (latitude, longitude) of lower left point of the file """ groups = mod_re.findall('([NS])(\d+)([EW])(\d+)\.hgt', self.file_name) assert groups and len(groups) == 1 and len(groups[0]) == 4, 'Invalid file name {0}'.format(self.file_name) groups = groups[0] if groups[0] == 'N': latitude = float(groups[1]) else: latitude = - float(groups[1]) if groups[2] == 'E': longitude = float(groups[3]) else: longitude = - float(groups[3]) self.latitude = latitude self.longitude = longitude
Returns (latitude, longitude) of lower left point of the file
def connect(servers=None, framed_transport=False, timeout=None, retry_time=60, recycle=None, round_robin=None, max_retries=3): """ Constructs a single ElasticSearch connection. Connects to a randomly chosen server on the list. If the connection fails, it will attempt to connect to each server on the list in turn until one succeeds. If it is unable to find an active server, it will throw a NoServerAvailable exception. Failing servers are kept on a separate list and eventually retried, no sooner than `retry_time` seconds after failure. :keyword servers: [server] List of ES servers with format: "hostname:port" Default: [("127.0.0.1",9500)] :keyword framed_transport: If True, use a TFramedTransport instead of a TBufferedTransport :keyword timeout: Timeout in seconds (e.g. 0.5) Default: None (it will stall forever) :keyword retry_time: Minimum time in seconds until a failed server is reinstated. (e.g. 0.5) Default: 60 :keyword recycle: Max time in seconds before an open connection is closed and returned to the pool. Default: None (Never recycle) :keyword max_retries: Max retry time on connection down :keyword round_robin: *DEPRECATED* :return ES client """ if servers is None: servers = [DEFAULT_SERVER] return ThreadLocalConnection(servers, framed_transport, timeout, retry_time, recycle, max_retries=max_retries)
Constructs a single ElasticSearch connection. Connects to a randomly chosen server on the list. If the connection fails, it will attempt to connect to each server on the list in turn until one succeeds. If it is unable to find an active server, it will throw a NoServerAvailable exception. Failing servers are kept on a separate list and eventually retried, no sooner than `retry_time` seconds after failure. :keyword servers: [server] List of ES servers with format: "hostname:port" Default: [("127.0.0.1",9500)] :keyword framed_transport: If True, use a TFramedTransport instead of a TBufferedTransport :keyword timeout: Timeout in seconds (e.g. 0.5) Default: None (it will stall forever) :keyword retry_time: Minimum time in seconds until a failed server is reinstated. (e.g. 0.5) Default: 60 :keyword recycle: Max time in seconds before an open connection is closed and returned to the pool. Default: None (Never recycle) :keyword max_retries: Max retry time on connection down :keyword round_robin: *DEPRECATED* :return ES client
def assert_credentials_match(self, verifier, authc_token, account): """ :type verifier: authc_abcs.CredentialsVerifier :type authc_token: authc_abcs.AuthenticationToken :type account: account_abcs.Account :returns: account_abcs.Account :raises IncorrectCredentialsException: when authentication fails, including unix epoch timestamps of recently failed attempts """ cred_type = authc_token.token_info['cred_type'] try: verifier.verify_credentials(authc_token, account['authc_info']) except IncorrectCredentialsException: updated_account = self.update_failed_attempt(authc_token, account) failed_attempts = updated_account['authc_info'][cred_type].\ get('failed_attempts', []) raise IncorrectCredentialsException(failed_attempts) except ConsumedTOTPToken: account['authc_info'][cred_type]['consumed_token'] = authc_token.credentials self.cache_handler.set(domain='authentication:' + self.name, identifier=authc_token.identifier, value=account)
:type verifier: authc_abcs.CredentialsVerifier :type authc_token: authc_abcs.AuthenticationToken :type account: account_abcs.Account :returns: account_abcs.Account :raises IncorrectCredentialsException: when authentication fails, including unix epoch timestamps of recently failed attempts
def console_exec(thread_id, frame_id, expression, dbg): """returns 'False' in case expression is partially correct """ frame = dbg.find_frame(thread_id, frame_id) is_multiline = expression.count('@LINE@') > 1 expression = str(expression.replace('@LINE@', '\n')) # Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329 # (Names not resolved in generator expression in method) # See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html updated_globals = {} updated_globals.update(frame.f_globals) updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals if IPYTHON: need_more = exec_code(CodeFragment(expression), updated_globals, frame.f_locals, dbg) if not need_more: pydevd_save_locals.save_locals(frame) return need_more interpreter = ConsoleWriter() if not is_multiline: try: code = compile_command(expression) except (OverflowError, SyntaxError, ValueError): # Case 1 interpreter.showsyntaxerror() return False if code is None: # Case 2 return True else: code = expression # Case 3 try: Exec(code, updated_globals, frame.f_locals) except SystemExit: raise except: interpreter.showtraceback() else: pydevd_save_locals.save_locals(frame) return False
returns 'False' in case expression is partially correct
def item_properties(self, handle): """Return properties of the item with the given handle.""" logger.debug("Getting properties for handle: {}".format(handle)) properties = { 'size_in_bytes': self.get_size_in_bytes(handle), 'utc_timestamp': self.get_utc_timestamp(handle), 'hash': self.get_hash(handle), 'relpath': self.get_relpath(handle) } logger.debug("{} properties: {}".format(handle, properties)) return properties
Return properties of the item with the given handle.
def _set_show_system_info(self, v, load=False): """ Setter method for show_system_info, mapped from YANG variable /brocade_ras_ext_rpc/show_system_info (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_system_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_system_info() directly. YANG Description: Shows the system information MAC etc. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_system_info.show_system_info, is_leaf=True, yang_name="show-system-info", rest_name="show-system-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showSystemInfo'}}, namespace='urn:brocade.com:mgmt:brocade-ras-ext', defining_module='brocade-ras-ext', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """show_system_info must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=show_system_info.show_system_info, is_leaf=True, yang_name="show-system-info", rest_name="show-system-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showSystemInfo'}}, namespace='urn:brocade.com:mgmt:brocade-ras-ext', defining_module='brocade-ras-ext', yang_type='rpc', is_config=True)""", }) self.__show_system_info = t if hasattr(self, '_set'): self._set()
Setter method for show_system_info, mapped from YANG variable /brocade_ras_ext_rpc/show_system_info (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_system_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_system_info() directly. YANG Description: Shows the system information MAC etc.
def linspace(self, start, stop, n): """ Simple replacement for numpy linspace""" if n == 1: return [start] L = [0.0] * n nm1 = n - 1 nm1inv = 1.0 / nm1 for i in range(n): L[i] = nm1inv * (start*(nm1 - i) + stop*i) return L
Simple replacement for numpy linspace
def report(self): """ Create reports of the findings """ # Initialise a variable to store the results data = '' for sample in self.metadata: if sample[self.analysistype].primers != 'NA': # Set the name of the strain-specific report sample[self.analysistype].report = os.path.join(sample[self.analysistype].reportdir, '{}_{}.csv'.format(sample.name, self.analysistype)) # Populate the strain-specific string with header, and strain name strainspecific = 'Strain,{},\n{},'.format(','.join(sorted(sample[self.analysistype].targets)), sample.name) # Iterate through all the genes in the organism-specific analysis for gene in sorted(sample[self.analysistype].targets): try: # Extract the percent identity percentidentity = sample[self.analysistype].blastresults[gene]['percent_identity'] # If the % identity is greater than the cutoff of 50%, the gene is considered to be present if percentidentity > 50: strainspecific += '{},'.format(percentidentity) else: strainspecific += '-,' # If there are no BLAST results, then the gene is absent except KeyError: strainspecific += '-,' strainspecific += '\n' # Open and write the data to the strain-specific report with open(sample[self.analysistype].report, 'w') as specificreport: specificreport.write(strainspecific) # Add all the data from each strain to the cumulative data string data += strainspecific # Open and write the cumulative data to the cumulative report with open(os.path.join(self.reportdir, '{}.csv'.format(self.analysistype)), 'w') as report: report.write(data)
Create reports of the findings
def distribution_compatible(dist, supported_tags=None): """Is this distribution compatible with the given interpreter/platform combination? :param supported_tags: A list of tag tuples specifying which tags are supported by the platform in question. :returns: True if the distribution is compatible, False if it is unrecognized or incompatible. """ if supported_tags is None: supported_tags = get_supported() package = Package.from_href(dist.location) if not package: return False return package.compatible(supported_tags)
Is this distribution compatible with the given interpreter/platform combination? :param supported_tags: A list of tag tuples specifying which tags are supported by the platform in question. :returns: True if the distribution is compatible, False if it is unrecognized or incompatible.
def add_empty_fields(untl_dict): """Add empty values if UNTL fields don't have values.""" # Iterate the ordered UNTL XML element list to determine # which elements are missing from the untl_dict. for element in UNTL_XML_ORDER: if element not in untl_dict: # Try to create an element with content and qualifier. try: py_object = PYUNTL_DISPATCH[element]( content='', qualifier='', ) except: # Try to create an element with content. try: py_object = PYUNTL_DISPATCH[element](content='') except: # Try to create an element without content. try: py_object = PYUNTL_DISPATCH[element]() except: raise PyuntlException( 'Could not add empty element field.' ) else: untl_dict[element] = [{'content': {}}] else: # Handle element without children. if not py_object.contained_children: untl_dict[element] = [{'content': ''}] else: untl_dict[element] = [{'content': {}}] else: # Handle element without children. if not py_object.contained_children: untl_dict[element] = [{'content': '', 'qualifier': ''}] else: untl_dict[element] = [{'content': {}, 'qualifier': ''}] # Add empty contained children. for child in py_object.contained_children: untl_dict[element][0].setdefault('content', {}) untl_dict[element][0]['content'][child] = '' return untl_dict
Add empty values if UNTL fields don't have values.
def parse_task_declaration(self, declaration_subAST): ''' Parses the declaration section of the WDL task AST subtree. Examples: String my_name String your_name Int two_chains_i_mean_names = 0 :param declaration_subAST: Some subAST representing a task declaration like: 'String file_name' :return: var_name, var_type, var_value Example: Input subAST representing: 'String file_name' Output: var_name='file_name', var_type='String', var_value=None ''' var_name = self.parse_declaration_name(declaration_subAST.attr("name")) var_type = self.parse_declaration_type(declaration_subAST.attr("type")) var_expressn = self.parse_declaration_expressn(declaration_subAST.attr("expression"), es='') return (var_name, var_type, var_expressn)
Parses the declaration section of the WDL task AST subtree. Examples: String my_name String your_name Int two_chains_i_mean_names = 0 :param declaration_subAST: Some subAST representing a task declaration like: 'String file_name' :return: var_name, var_type, var_value Example: Input subAST representing: 'String file_name' Output: var_name='file_name', var_type='String', var_value=None
def indent(lines, amount, ch=' '): """Indent the lines in a string by padding each one with proper number of pad characters""" padding = amount * ch return padding + ('\n' + padding).join(lines.split('\n'))
Indent the lines in a string by padding each one with proper number of pad characters
def _gassist_any(self,dg,dt,dt2,name,na=None,nodiag=False,memlimit=-1): """Calculates probability of gene i regulating gene j with genotype data assisted method, with the recommended combination of multiple tests. dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data. Entry dg[i,j] is genotype i's value for sample j. Each value must be among 0,1,...,na. Genotype i must be best (and significant) eQTL of gene i (in dt). dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A Entry dt[i,j] is gene i's expression level for sample j. Genotype i (in dg) must be best (and significant) eQTL of gene i. dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B. dt2 has the same format as dt, and can be identical with, different from, or a superset of dt. When dt2 is a superset of (or identical with) dt, dt2 must be arranged to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and set parameter nodiag = 1. name: actual C function name to call na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically determined as the maximum of dg. nodiag: skip diagonal regulations, i.e. regulation A->B for A=B. This should be set to True when A is a subset of B and aligned correspondingly. memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage. Return: dictionary with following keys: ret:0 iff execution succeeded. p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability function from for recommended combination of multiple tests. For more information on tests, see paper. ftype and gtype can be found in auto.py. """ if self.lib is None: raise ValueError("Not initialized.") import numpy as np from .auto import ftype_np,gtype_np from .types import isint if dg.dtype.char!=gtype_np: raise ValueError('Wrong input dtype for genotype data: dg.dtype.char is '+dg.dtype.char+'!='+gtype_np) if dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np: raise ValueError('Wrong input dtype for gene expression data') if len(dg.shape)!=2 or len(dt.shape)!=2 or len(dt2.shape)!=2: raise ValueError('Wrong input shape') if type(nodiag) is not bool: raise ValueError('Wrong nodiag type') if not isint(memlimit): raise ValueError('Wrong memlimit type') if not (na is None or isint(na)): raise ValueError('Wrong na type') if na is not None and na<=0: raise ValueError('Input requires na>0.') ng=dg.shape[0] nt=dt2.shape[0] ns=dg.shape[1] nvx=na+1 if na else dg.max()+1 nd=1 if nodiag else 0 if nvx<2: raise ValueError('Invalid genotype values') if dt.shape!=dg.shape or dt2.shape[1]!=ns: raise ValueError('Wrong input shape') if np.isnan(dt).sum()+np.isnan(dt2).sum()>0: raise ValueError('NaN found.') func=self.cfunc(name,rettype='int',argtypes=['const MATRIXG*','const MATRIXF*','const MATRIXF*','MATRIXF*','size_t','byte','size_t']) d=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=['A','C','O','W']) dgr=np.require(dg,requirements=['A','C','O','W']) dtr=np.require(dt,requirements=['A','C','O','W']) dt2r=np.require(dt2,requirements=['A','C','O','W']) ret=func(dgr,dtr,dt2r,d,nvx,nd,memlimit) ans={'ret':ret,'p':d} return ans
Calculates probability of gene i regulating gene j with genotype data assisted method, with the recommended combination of multiple tests. dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data. Entry dg[i,j] is genotype i's value for sample j. Each value must be among 0,1,...,na. Genotype i must be best (and significant) eQTL of gene i (in dt). dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A Entry dt[i,j] is gene i's expression level for sample j. Genotype i (in dg) must be best (and significant) eQTL of gene i. dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B. dt2 has the same format as dt, and can be identical with, different from, or a superset of dt. When dt2 is a superset of (or identical with) dt, dt2 must be arranged to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and set parameter nodiag = 1. name: actual C function name to call na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically determined as the maximum of dg. nodiag: skip diagonal regulations, i.e. regulation A->B for A=B. This should be set to True when A is a subset of B and aligned correspondingly. memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage. Return: dictionary with following keys: ret:0 iff execution succeeded. p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability function from for recommended combination of multiple tests. For more information on tests, see paper. ftype and gtype can be found in auto.py.
def main(path_dir, requirements_name): """Console script for imports.""" click.echo("\nWARNING: Uninstall libs it's at your own risk!") click.echo('\nREMINDER: After uninstall libs, update your requirements ' 'file.\nUse the `pip freeze > requirements.txt` command.') click.echo('\n\nList of installed libs and your dependencies added on ' 'project\nrequirements that are not being used:\n') check(path_dir, requirements_name)
Console script for imports.
def NotificationsPost(self, parameters): """ Create a notification on CommonSense. If successful the result, including the notification_id, can be obtained from getResponse(), and should be a json string. @param parameters (dictionary) - Dictionary containing the notification to create. @note - @return (bool) - Boolean indicating whether NotificationsPost was successful. """ if self.__SenseApiCall__('/notifications.json', 'POST', parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
Create a notification on CommonSense. If successful the result, including the notification_id, can be obtained from getResponse(), and should be a json string. @param parameters (dictionary) - Dictionary containing the notification to create. @note - @return (bool) - Boolean indicating whether NotificationsPost was successful.
def add_job(self, job): """Adds a new job into the cache. :param dict job: The job dictionary :returns: True """ self.cur.execute("INSERT INTO jobs VALUES(?,?,?,?,?)", ( job["id"], job["description"], job["last-run"], job["next-run"], job["last-run-result"])) return True
Adds a new job into the cache. :param dict job: The job dictionary :returns: True
def _geolocation_extract(response): """ Mimics the exception handling logic in ``client._get_body``, but for geolocation which uses a different response format. """ body = response.json() if response.status_code in (200, 404): return body try: error = body["error"]["errors"][0]["reason"] except KeyError: error = None if response.status_code == 403: raise exceptions._OverQueryLimit(response.status_code, error) else: raise exceptions.ApiError(response.status_code, error)
Mimics the exception handling logic in ``client._get_body``, but for geolocation which uses a different response format.
def get_ips_by_equipment_and_environment(self, equip_nome, id_ambiente): """Search Group Equipment from by the identifier. :param id_egroup: Identifier of the Group Equipment. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'equipaments': [{'nome': < name_equipament >, 'grupos': < id_group >, 'mark': {'id': < id_mark >, 'nome': < name_mark >},'modelo': < id_model >, 'tipo_equipamento': < id_type >, 'model': {'nome': , 'id': < id_model >, 'marca': < id_mark >}, 'type': {id': < id_type >, 'tipo_equipamento': < name_type >}, 'id': < id_equipment >}, ... ]} :raise InvalidParameterError: Group Equipment is null and invalid. :raise GrupoEquipamentoNaoExisteError: Group Equipment not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if id_ambiente is None: raise InvalidParameterError( u'The environment id is invalid or was not informed.') url = 'equipment/getipsbyambiente/' + str(equip_nome) + '/' + str(id_ambiente) code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
Search Group Equipment from by the identifier. :param id_egroup: Identifier of the Group Equipment. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'equipaments': [{'nome': < name_equipament >, 'grupos': < id_group >, 'mark': {'id': < id_mark >, 'nome': < name_mark >},'modelo': < id_model >, 'tipo_equipamento': < id_type >, 'model': {'nome': , 'id': < id_model >, 'marca': < id_mark >}, 'type': {id': < id_type >, 'tipo_equipamento': < name_type >}, 'id': < id_equipment >}, ... ]} :raise InvalidParameterError: Group Equipment is null and invalid. :raise GrupoEquipamentoNaoExisteError: Group Equipment not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def dict_to_numpy_array(d): """ Convert a dict of 1d array to a numpy recarray """ return fromarrays(d.values(), np.dtype([(str(k), v.dtype) for k, v in d.items()]))
Convert a dict of 1d array to a numpy recarray
def _set_backreferences(self, context, items, **kwargs): """Set the back references on the linked items This will set an annotation storage on the referenced items which point to the current context. """ # Don't set any references during initialization. # This might cause a recursion error when calling `getRaw` to fetch the # current set UIDs! initializing = kwargs.get('_initializing_', False) if initializing: return # UID of the current object uid = api.get_uid(context) # current set UIDs raw = self.getRaw(context) or [] # handle single reference fields if isinstance(raw, basestring): raw = [raw, ] cur = set(raw) # UIDs to be set new = set(map(api.get_uid, items)) # removed UIDs removed = cur.difference(new) # Unlink removed UIDs from the source for uid in removed: source = api.get_object_by_uid(uid, None) if source is None: logger.warn("UID {} does not exist anymore".format(uid)) continue self.unlink_reference(source, context) # Link backrefs for item in items: self.link_reference(item, context)
Set the back references on the linked items This will set an annotation storage on the referenced items which point to the current context.
def save(self_or_cls, obj, basename, fmt='auto', key={}, info={}, options=None, **kwargs): """ Save a HoloViews object to file, either using an explicitly supplied format or to the appropriate default. """ if info or key: raise Exception('Renderer does not support saving metadata to file.') if isinstance(obj, (Plot, NdWidget)): plot = obj else: with StoreOptions.options(obj, options, **kwargs): plot = self_or_cls.get_plot(obj) if (fmt in list(self_or_cls.widgets.keys())+['auto']) and len(plot) > 1: with StoreOptions.options(obj, options, **kwargs): if isinstance(basename, basestring): basename = basename+'.html' self_or_cls.export_widgets(plot, basename, fmt) return rendered = self_or_cls(plot, fmt) if rendered is None: return (data, info) = rendered encoded = self_or_cls.encode(rendered) prefix = self_or_cls._save_prefix(info['file-ext']) if prefix: encoded = prefix + encoded if isinstance(basename, (BytesIO, StringIO)): basename.write(encoded) basename.seek(0) else: filename ='%s.%s' % (basename, info['file-ext']) with open(filename, 'wb') as f: f.write(encoded)
Save a HoloViews object to file, either using an explicitly supplied format or to the appropriate default.
def validate_args(args): """ Call all required validation functions :param args: :return: """ if not os.path.isdir(args.directory): print "Directory {} does not exist".format(args.directory) sys.exit(5) return args
Call all required validation functions :param args: :return:
def run(path, code=None, params=None, **meta): """pydocstyle code checking. :return list: List of errors. """ if 'ignore_decorators' in params: ignore_decorators = params['ignore_decorators'] else: ignore_decorators = None check_source_args = (code, path, ignore_decorators) if THIRD_ARG else (code, path) return [{ 'lnum': e.line, # Remove colon after error code ("D403: ..." => "D403 ..."). 'text': (e.message[0:4] + e.message[5:] if e.message[4] == ':' else e.message), 'type': 'D', 'number': e.code } for e in PyDocChecker().check_source(*check_source_args)]
pydocstyle code checking. :return list: List of errors.
def _variant_po_to_dict(tokens) -> CentralDogma: """Convert a PyParsing data dictionary to a central dogma abundance (i.e., Protein, RNA, miRNA, Gene). :type tokens: ParseResult """ dsl = FUNC_TO_DSL.get(tokens[FUNCTION]) if dsl is None: raise ValueError('invalid tokens: {}'.format(tokens)) return dsl( namespace=tokens[NAMESPACE], name=tokens[NAME], variants=[ _variant_to_dsl_helper(variant_tokens) for variant_tokens in tokens[VARIANTS] ], )
Convert a PyParsing data dictionary to a central dogma abundance (i.e., Protein, RNA, miRNA, Gene). :type tokens: ParseResult
def _filter_insane_successors(self, successors): """ Throw away all successors whose target doesn't make sense This method is called after we resolve an indirect jump using an unreliable method (like, not through one of the indirect jump resolvers, but through either pure concrete execution or backward slicing) to filter out the obviously incorrect successors. :param list successors: A collection of successors. :return: A filtered list of successors :rtype: list """ old_successors = successors[::] successors = [ ] for i, suc in enumerate(old_successors): if suc.solver.symbolic(suc.ip): # It's symbolic. Take it, and hopefully we can resolve it later successors.append(suc) else: ip_int = suc.solver.eval_one(suc.ip) if self._is_address_executable(ip_int) or \ self.project.is_hooked(ip_int) or \ self.project.simos.is_syscall_addr(ip_int): successors.append(suc) else: l.debug('An obviously incorrect successor %d/%d (%#x) is ditched', i + 1, len(old_successors), ip_int ) return successors
Throw away all successors whose target doesn't make sense This method is called after we resolve an indirect jump using an unreliable method (like, not through one of the indirect jump resolvers, but through either pure concrete execution or backward slicing) to filter out the obviously incorrect successors. :param list successors: A collection of successors. :return: A filtered list of successors :rtype: list
def stop_process(self): """ Stop the process (by killing it). """ if self.process is not None: self._user_stop = True self.process.kill() self.setReadOnly(True) self._running = False
Stop the process (by killing it).
def csv_row_cleaner(rows): """ Clean row checking: - Not empty row. - >=1 element different in a row. - row allready in cleaned row result. """ result = [] for row in rows: # check not empty row check_empty = len(exclude_empty_values(row)) > 1 # check more or eq than 1 unique element in row check_set = len(set(exclude_empty_values(row))) > 1 # check row not into result cleaned rows. check_last_allready = (result and result[-1] == row) if check_empty and check_set and not check_last_allready: result.append(row) return result
Clean row checking: - Not empty row. - >=1 element different in a row. - row allready in cleaned row result.
def _gather_from_files(self, config): """ gathers from the files in a way that is convienent to use """ command_file = config.get_help_files() cache_path = os.path.join(config.get_config_dir(), 'cache') cols = _get_window_columns() with open(os.path.join(cache_path, command_file), 'r') as help_file: data = json.load(help_file) self.add_exit() commands = data.keys() for command in commands: branch = self.command_tree for word in command.split(): if word not in self.completable: self.completable.append(word) if not branch.has_child(word): branch.add_child(CommandBranch(word)) branch = branch.get_child(word) description = data[command]['help'] self.descrip[command] = add_new_lines(description, line_min=int(cols) - 2 * TOLERANCE) if 'examples' in data[command]: examples = [] for example in data[command]['examples']: examples.append([ add_new_lines(example[0], line_min=int(cols) - 2 * TOLERANCE), add_new_lines(example[1], line_min=int(cols) - 2 * TOLERANCE)]) self.command_example[command] = examples command_params = data[command].get('parameters', {}) for param in command_params: if '==SUPPRESS==' not in command_params[param]['help']: param_aliases = set() for par in command_params[param]['name']: param_aliases.add(par) self.param_descript[command + " " + par] = \ add_new_lines( command_params[param]['required'] + " " + command_params[param]['help'], line_min=int(cols) - 2 * TOLERANCE) if par not in self.completable_param: self.completable_param.append(par) param_doubles = self.command_param_info.get(command, {}) for alias in param_aliases: param_doubles[alias] = param_aliases self.command_param_info[command] = param_doubles
gathers from the files in a way that is convienent to use
def changeTo(self, path): '''change value Args: path (str): the new environment path ''' dictionary = DictSingle(Pair('PATH', StringSingle(path))) self.value = [dictionary]
change value Args: path (str): the new environment path
def print_stack_trace(proc_obj, count=None, color='plain', opts={}): "Print count entries of the stack trace" if count is None: n=len(proc_obj.stack) else: n=min(len(proc_obj.stack), count) try: for i in range(n): print_stack_entry(proc_obj, i, color=color, opts=opts) except KeyboardInterrupt: pass return
Print count entries of the stack trace
def open_project(self, path=None, restart_consoles=True, save_previous_files=True): """Open the project located in `path`""" self.switch_to_plugin() if path is None: basedir = get_home_dir() path = getexistingdirectory(parent=self, caption=_("Open project"), basedir=basedir) path = encoding.to_unicode_from_fs(path) if not self.is_valid_project(path): if path: QMessageBox.critical(self, _('Error'), _("<b>%s</b> is not a Spyder project!") % path) return else: path = encoding.to_unicode_from_fs(path) self.add_to_recent(path) # A project was not open before if self.current_active_project is None: if save_previous_files and self.main.editor is not None: self.main.editor.save_open_files() if self.main.editor is not None: self.main.editor.set_option('last_working_dir', getcwd_or_home()) if self.get_option('visible_if_project_open'): self.show_explorer() else: # We are switching projects if self.main.editor is not None: self.set_project_filenames( self.main.editor.get_open_filenames()) self.current_active_project = EmptyProject(path) self.latest_project = EmptyProject(path) self.set_option('current_project_path', self.get_active_project_path()) self.setup_menu_actions() self.sig_project_loaded.emit(path) self.sig_pythonpath_changed.emit() if restart_consoles: self.restart_consoles()
Open the project located in `path`
def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer, parse_record_fn, num_epochs=1, num_gpus=None, examples_per_epoch=None, dtype=tf.float32): """Given a Dataset with raw records, return an iterator over the records. Args: dataset: A Dataset representing raw records is_training: A boolean denoting whether the input is for training. batch_size: The number of samples per batch. shuffle_buffer: The buffer size to use when shuffling records. A larger value results in better randomness, but smaller values reduce startup time and use less memory. parse_record_fn: A function that takes a raw record and returns the corresponding (image, label) pair. num_epochs: The number of epochs to repeat the dataset. num_gpus: The number of gpus used for training. examples_per_epoch: The number of examples in an epoch. dtype: Data type to use for images/features. Returns: Dataset of (image, label) pairs ready for iteration. """ # We prefetch a batch at a time, This can help smooth out the time taken to # load input files as we go through shuffling and processing. dataset = dataset.prefetch(buffer_size=batch_size) if is_training: # Shuffle the records. Note that we shuffle before repeating to ensure # that the shuffling respects epoch boundaries. mlperf_log.resnet_print(key=mlperf_log.INPUT_ORDER) dataset = dataset.shuffle(buffer_size=shuffle_buffer) # If we are training over multiple epochs before evaluating, repeat the # dataset for the appropriate number of epochs. dataset = dataset.repeat(num_epochs) # Parse the raw records into images and labels. Testing has shown that setting # num_parallel_batches > 1 produces no improvement in throughput, since # batch_size is almost always much greater than the number of CPU cores. dataset = dataset.apply( tf.contrib.data.map_and_batch( lambda value: parse_record_fn(value, is_training, dtype), batch_size=batch_size, num_parallel_batches=1)) # Operations between the final prefetch and the get_next call to the iterator # will happen synchronously during run time. We prefetch here again to # background all of the above processing work and keep it out of the # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE # allows DistributionStrategies to adjust how many batches to fetch based # on how many devices are present. dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) return dataset
Given a Dataset with raw records, return an iterator over the records. Args: dataset: A Dataset representing raw records is_training: A boolean denoting whether the input is for training. batch_size: The number of samples per batch. shuffle_buffer: The buffer size to use when shuffling records. A larger value results in better randomness, but smaller values reduce startup time and use less memory. parse_record_fn: A function that takes a raw record and returns the corresponding (image, label) pair. num_epochs: The number of epochs to repeat the dataset. num_gpus: The number of gpus used for training. examples_per_epoch: The number of examples in an epoch. dtype: Data type to use for images/features. Returns: Dataset of (image, label) pairs ready for iteration.
def object_info(lcc_server, objectid, db_collection_id): '''This gets information on a single object from the LCC-Server. Returns a dict with all of the available information on an object, including finding charts, comments, object type and variability tags, and period-search results (if available). If you have an LCC-Server API key present in `~/.astrobase/lccs/` that is associated with an LCC-Server user account, objects that are visible to this user will be returned, even if they are not visible to the public. Use this to look up objects that have been marked as 'private' or 'shared'. NOTE: you can pass the result dict returned by this function directly into the `astrobase.checkplot.checkplot_pickle_to_png` function, e.g.:: astrobase.checkplot.checkplot_pickle_to_png(result_dict, 'object-%s-info.png' % result_dict['objectid']) to generate a quick PNG overview of the object information. Parameters ---------- lcc_server : str This is the base URL of the LCC-Server to talk to. objectid : str This is the unique database ID of the object to retrieve info for. This is always returned as the `db_oid` column in LCC-Server search results. db_collection_id : str This is the collection ID which will be searched for the object. This is always returned as the `collection` column in LCC-Server search results. Returns ------- dict A dict containing the object info is returned. Some important items in the result dict: - `objectinfo`: all object magnitude, color, GAIA cross-match, and object type information available for this object - `objectcomments`: comments on the object's variability if available - `varinfo`: variability comments, variability features, type tags, period and epoch information if available - `neighbors`: information on the neighboring objects of this object in its parent light curve collection - `xmatch`: information on any cross-matches to external catalogs (e.g. KIC, EPIC, TIC, APOGEE, etc.) - `finderchart`: a base-64 encoded PNG image of the object's DSS2 RED finder chart. To convert this to an actual PNG, try the function: `astrobase.checkplot.pkl_io._b64_to_file`. - `magseries`: a base-64 encoded PNG image of the object's light curve. To convert this to an actual PNG, try the function: `astrobase.checkplot.pkl_io._b64_to_file`. - `pfmethods`: a list of period-finding methods applied to the object if any. If this list is present, use the keys in it to get to the actual period-finding results for each method. These will contain base-64 encoded PNGs of the periodogram and phased light curves using the best three peaks in the periodogram, as well as period and epoch information. ''' urlparams = { 'objectid':objectid, 'collection':db_collection_id } urlqs = urlencode(urlparams) url = '%s/api/object?%s' % (lcc_server, urlqs) try: LOGINFO( 'getting info for %s in collection %s from %s' % ( objectid, db_collection_id, lcc_server ) ) # check if we have an API key already have_apikey, apikey, expires = check_existing_apikey(lcc_server) # if not, get a new one if not have_apikey: apikey, expires = get_new_apikey(lcc_server) # if apikey is not None, add it in as an Authorization: Bearer [apikey] # header if apikey: headers = {'Authorization':'Bearer: %s' % apikey} else: headers = {} # hit the server req = Request(url, data=None, headers=headers) resp = urlopen(req) objectinfo = json.loads(resp.read())['result'] return objectinfo except HTTPError as e: if e.code == 404: LOGERROR( 'additional info for object %s not ' 'found in collection: %s' % (objectid, db_collection_id) ) else: LOGERROR('could not retrieve object info, ' 'URL used: %s, error code: %s, reason: %s' % (url, e.code, e.reason)) return None
This gets information on a single object from the LCC-Server. Returns a dict with all of the available information on an object, including finding charts, comments, object type and variability tags, and period-search results (if available). If you have an LCC-Server API key present in `~/.astrobase/lccs/` that is associated with an LCC-Server user account, objects that are visible to this user will be returned, even if they are not visible to the public. Use this to look up objects that have been marked as 'private' or 'shared'. NOTE: you can pass the result dict returned by this function directly into the `astrobase.checkplot.checkplot_pickle_to_png` function, e.g.:: astrobase.checkplot.checkplot_pickle_to_png(result_dict, 'object-%s-info.png' % result_dict['objectid']) to generate a quick PNG overview of the object information. Parameters ---------- lcc_server : str This is the base URL of the LCC-Server to talk to. objectid : str This is the unique database ID of the object to retrieve info for. This is always returned as the `db_oid` column in LCC-Server search results. db_collection_id : str This is the collection ID which will be searched for the object. This is always returned as the `collection` column in LCC-Server search results. Returns ------- dict A dict containing the object info is returned. Some important items in the result dict: - `objectinfo`: all object magnitude, color, GAIA cross-match, and object type information available for this object - `objectcomments`: comments on the object's variability if available - `varinfo`: variability comments, variability features, type tags, period and epoch information if available - `neighbors`: information on the neighboring objects of this object in its parent light curve collection - `xmatch`: information on any cross-matches to external catalogs (e.g. KIC, EPIC, TIC, APOGEE, etc.) - `finderchart`: a base-64 encoded PNG image of the object's DSS2 RED finder chart. To convert this to an actual PNG, try the function: `astrobase.checkplot.pkl_io._b64_to_file`. - `magseries`: a base-64 encoded PNG image of the object's light curve. To convert this to an actual PNG, try the function: `astrobase.checkplot.pkl_io._b64_to_file`. - `pfmethods`: a list of period-finding methods applied to the object if any. If this list is present, use the keys in it to get to the actual period-finding results for each method. These will contain base-64 encoded PNGs of the periodogram and phased light curves using the best three peaks in the periodogram, as well as period and epoch information.
def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs)
r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response
def cfg_convert(self, value): """Default converter for the cfg:// protocol.""" rest = value m = self.WORD_PATTERN.match(rest) if m is None: raise ValueError("Unable to convert %r" % value) else: rest = rest[m.end():] d = self.config[m.groups()[0]] # print d, rest while rest: m = self.DOT_PATTERN.match(rest) if m: d = d[m.groups()[0]] else: m = self.INDEX_PATTERN.match(rest) if m: idx = m.groups()[0] if not self.DIGIT_PATTERN.match(idx): d = d[idx] else: try: n = int(idx) d = d[n] except TypeError: d = d[idx] if m: rest = rest[m.end():] else: raise ValueError('Unable to convert ' '%r at %r' % (value, rest)) # rest should be empty return d
Default converter for the cfg:// protocol.
def get(self, key): """Get a key-value from storage according to the key name. """ data = r_kv.get(key) # data = json.dumps(data) if isinstance(data, str) else data # data = json.loads(data) if data else {} return build_response(dict(data=data, code=200))
Get a key-value from storage according to the key name.
def compounding(start, stop, compound): """Yield an infinite series of compounding values. Each time the generator is called, a value is produced by multiplying the previous value by the compound rate. EXAMPLE: >>> sizes = compounding(1., 10., 1.5) >>> assert next(sizes) == 1. >>> assert next(sizes) == 1 * 1.5 >>> assert next(sizes) == 1.5 * 1.5 """ def clip(value): return max(value, stop) if (start > stop) else min(value, stop) curr = float(start) while True: yield clip(curr) curr *= compound
Yield an infinite series of compounding values. Each time the generator is called, a value is produced by multiplying the previous value by the compound rate. EXAMPLE: >>> sizes = compounding(1., 10., 1.5) >>> assert next(sizes) == 1. >>> assert next(sizes) == 1 * 1.5 >>> assert next(sizes) == 1.5 * 1.5
def assert_equals(actual, expected, ignore_order=False, ignore_index=False, all_close=False): ''' Assert 2 series are equal. Like ``assert equals(series1, series2, ...)``, but with better hints at where the series differ. See `equals` for detailed parameter doc. Parameters ---------- actual : ~pandas.Series expected : ~pandas.Series ignore_order : bool ignore_index : bool all_close : bool ''' equals_, reason = equals(actual, expected, ignore_order, ignore_index, all_close, _return_reason=True) assert equals_, '{}\n\n{}\n\n{}'.format(reason, actual.to_string(), expected.to_string())
Assert 2 series are equal. Like ``assert equals(series1, series2, ...)``, but with better hints at where the series differ. See `equals` for detailed parameter doc. Parameters ---------- actual : ~pandas.Series expected : ~pandas.Series ignore_order : bool ignore_index : bool all_close : bool
def collection_choices(): """Return collection choices.""" from invenio_collections.models import Collection return [(0, _('-None-'))] + [ (c.id, c.name) for c in Collection.query.all() ]
Return collection choices.
def complain(error): """Raises in develop; warns in release.""" if callable(error): if DEVELOP: raise error() elif DEVELOP: raise error else: logger.warn_err(error)
Raises in develop; warns in release.
def transcode_to_utf8(filename, encoding): """ Convert a file in some other encoding into a temporary file that's in UTF-8. """ tmp = tempfile.TemporaryFile() for line in io.open(filename, encoding=encoding): tmp.write(line.strip('\uFEFF').encode('utf-8')) tmp.seek(0) return tmp
Convert a file in some other encoding into a temporary file that's in UTF-8.
def download(self): """ Walk from the current ledger index to the genesis ledger index, and download transactions from rippled. """ self.housekeeping() self.rippled_history() if self.resampling_frequencies is not None: self.find_markets() self.resample_time_series()
Walk from the current ledger index to the genesis ledger index, and download transactions from rippled.
def bg_compensate(img, sigma, splinepoints, scale): '''Reads file, subtracts background. Returns [compensated image, background].''' from PIL import Image import pylab from matplotlib.image import pil_to_array from centrosome.filter import canny import matplotlib img = Image.open(img) if img.mode=='I;16': # 16-bit image # deal with the endianness explicitly... I'm not sure # why PIL doesn't get this right. imgdata = np.fromstring(img.tostring(),np.uint8) imgdata.shape=(int(imgdata.shape[0]/2),2) imgdata = imgdata.astype(np.uint16) hi,lo = (0,1) if img.tag.prefix == 'MM' else (1,0) imgdata = imgdata[:,hi]*256 + imgdata[:,lo] img_size = list(img.size) img_size.reverse() new_img = imgdata.reshape(img_size) # The magic # for maximum sample value is 281 if 281 in img.tag: img = new_img.astype(np.float32) / img.tag[281][0] elif np.max(new_img) < 4096: img = new_img.astype(np.float32) / 4095. else: img = new_img.astype(np.float32) / 65535. else: img = pil_to_array(img) pylab.subplot(1,3,1).imshow(img, cmap=matplotlib.cm.Greys_r) pylab.show() if len(img.shape)>2: raise ValueError('Image must be grayscale') ## Create mask that will fix problem when image has black areas outside of well edges = canny(img, np.ones(img.shape, bool), 2, .1, .3) ci = np.cumsum(edges, 0) cj = np.cumsum(edges, 1) i,j = np.mgrid[0:img.shape[0], 0:img.shape[1]] mask = ci > 0 mask = mask & (cj > 0) mask[1:,:] &= (ci[0:-1,:] < ci[-1,j[0:-1,:]]) mask[:,1:] &= (cj[:,0:-1] < cj[i[:,0:-1],-1]) import time t0 = time.clock() bg = backgr(img, mask, MODE_AUTO, sigma, splinepoints=splinepoints, scale=scale) print("Executed in %f sec" % (time.clock() - t0)) bg[~mask] = img[~mask] pylab.subplot(1,3,2).imshow(img - bg, cmap=matplotlib.cm.Greys_r) pylab.subplot(1,3,3).imshow(bg, cmap=matplotlib.cm.Greys_r) pylab.show()
Reads file, subtracts background. Returns [compensated image, background].
def actually_possibly_award(self, **state): """ Does the actual work of possibly awarding a badge. """ user = state["user"] force_timestamp = state.pop("force_timestamp", None) awarded = self.award(**state) if awarded is None: return if awarded.level is None: assert len(self.levels) == 1 awarded.level = 1 # awarded levels are 1 indexed, for conveineince awarded = awarded.level - 1 assert awarded < len(self.levels) if ( not self.multiple and BadgeAward.objects.filter(user=user, slug=self.slug, level=awarded) ): return extra_kwargs = {} if force_timestamp is not None: extra_kwargs["awarded_at"] = force_timestamp badge = BadgeAward.objects.create( user=user, slug=self.slug, level=awarded, **extra_kwargs ) self.send_badge_messages(badge) badge_awarded.send(sender=self, badge_award=badge)
Does the actual work of possibly awarding a badge.
def run_qsnp(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Run qSNP calling on paired tumor/normal. """ if utils.file_exists(out_file): return out_file paired = get_paired_bams(align_bams, items) if paired.normal_bam: region_files = [] regions = _clean_regions(items, region) if regions: for region in regions: out_region_file = out_file.replace(".vcf.gz", _to_str(region) + ".vcf.gz") region_file = _run_qsnp_paired(align_bams, items, ref_file, assoc_files, region, out_region_file) region_files.append(region_file) out_file = combine_variant_files(region_files, out_file, ref_file, items[0]["config"]) if not region: out_file = _run_qsnp_paired(align_bams, items, ref_file, assoc_files, region, out_file) return out_file else: raise ValueError("qSNP only works on paired samples")
Run qSNP calling on paired tumor/normal.
def fill_phenotype_calls(self,phenotypes=None,inplace=False): """ Set the phenotype_calls according to the phenotype names """ if phenotypes is None: phenotypes = list(self['phenotype_label'].unique()) def _get_calls(label,phenos): d = dict([(x,0) for x in phenos]) if label!=label: return d # np.nan case d[label] = 1 return d if inplace: self['phenotype_calls'] = self.apply(lambda x: _get_calls(x['phenotype_label'],phenotypes),1) return fixed = self.copy() fixed['phenotype_calls'] = fixed.apply(lambda x: _get_calls(x['phenotype_label'],phenotypes),1) return fixed
Set the phenotype_calls according to the phenotype names
def _export_project_file(project, path, z, include_images, keep_compute_id, allow_all_nodes, temporary_dir): """ Take a project file (.gns3) and patch it for the export We rename the .gns3 project.gns3 to avoid the task to the client to guess the file name :param path: Path of the .gns3 """ # Image file that we need to include in the exported archive images = [] with open(path) as f: topology = json.load(f) if "topology" in topology: if "nodes" in topology["topology"]: for node in topology["topology"]["nodes"]: compute_id = node.get('compute_id', 'local') if node["node_type"] == "virtualbox" and node.get("properties", {}).get("linked_clone"): raise aiohttp.web.HTTPConflict(text="Topology with a linked {} clone could not be exported. Use qemu instead.".format(node["node_type"])) if not allow_all_nodes and node["node_type"] in ["virtualbox", "vmware", "cloud"]: raise aiohttp.web.HTTPConflict(text="Topology with a {} could not be exported".format(node["node_type"])) if not keep_compute_id: node["compute_id"] = "local" # To make project portable all node by default run on local if "properties" in node and node["node_type"] != "docker": for prop, value in node["properties"].items(): if node["node_type"] == "iou": if not prop == "path": continue elif not prop.endswith("image"): continue if value is None or value.strip() == '': continue if not keep_compute_id: # If we keep the original compute we can keep the image path node["properties"][prop] = os.path.basename(value) if include_images is True: images.append({ 'compute_id': compute_id, 'image': value, 'image_type': node['node_type'] }) if not keep_compute_id: topology["topology"]["computes"] = [] # Strip compute information because could contain secret info like password local_images = set([i['image'] for i in images if i['compute_id'] == 'local']) for image in local_images: _export_local_images(project, image, z) remote_images = set([ (i['compute_id'], i['image_type'], i['image']) for i in images if i['compute_id'] != 'local']) for compute_id, image_type, image in remote_images: yield from _export_remote_images(project, compute_id, image_type, image, z, temporary_dir) z.writestr("project.gns3", json.dumps(topology).encode()) return images
Take a project file (.gns3) and patch it for the export We rename the .gns3 project.gns3 to avoid the task to the client to guess the file name :param path: Path of the .gns3
def version(self, path, postmap=None, **params): """ Return the taskforce version. Supports standard options. """ q = httpd.merge_query(path, postmap) ans = { 'taskforce': taskforce_version, 'python': '.'.join(str(x) for x in sys.version_info[:3]), } ans['platform'] = { 'system': platform.system(), } # Add in some extra details if this is a control path. # These might give away too many details on a public # path. # if self._httpd.allow_control: ans['platform']['platform'] = platform.platform() ans['platform']['release'] = platform.release() return self._format(ans, q)
Return the taskforce version. Supports standard options.
def resolve(self, value=None): """ Resolve the current expression against the supplied value """ # If we still have an uninitialized matcher init it now if self.matcher: self._init_matcher() # Evaluate the current set of matchers forming the expression matcher = self.evaluate() try: value = self._transform(value) self._assertion(matcher, value) except AssertionError as ex: # By re-raising here the exception we reset the traceback raise ex finally: # Reset the state of the object so we can use it again if self.deferred: self.reset()
Resolve the current expression against the supplied value
def list_ip(self, instance_id): """Add all IPs""" output = self.client.describe_instances(InstanceIds=[instance_id]) output = output.get("Reservations")[0].get("Instances")[0] ips = {} ips['PrivateIp'] = output.get("PrivateIpAddress") ips['PublicIp'] = output.get("PublicIpAddress") return ips
Add all IPs
def create_role(self, **kwargs): """Creates and returns a new role from the given parameters.""" role = self.role_model(**kwargs) return self.put(role)
Creates and returns a new role from the given parameters.
def reset_password(self, token): """ View function verify a users reset password token from the email we sent to them. It also handles the form for them to set a new password. Supports html and json requests. """ expired, invalid, user = \ self.security_utils_service.reset_password_token_status(token) if invalid: self.flash( _('flask_unchained.bundles.security:flash.invalid_reset_password_token'), category='error') return self.redirect('SECURITY_INVALID_RESET_TOKEN_REDIRECT') elif expired: self.security_service.send_reset_password_instructions(user) self.flash(_('flask_unchained.bundles.security:flash.password_reset_expired', email=user.email, within=app.config.SECURITY_RESET_PASSWORD_WITHIN), category='error') return self.redirect('SECURITY_EXPIRED_RESET_TOKEN_REDIRECT') spa_redirect = app.config.SECURITY_API_RESET_PASSWORD_HTTP_GET_REDIRECT if request.method == 'GET' and spa_redirect: return self.redirect(spa_redirect, token=token, _external=True) form = self._get_form('SECURITY_RESET_PASSWORD_FORM') if form.validate_on_submit(): self.security_service.reset_password(user, form.password.data) self.security_service.login_user(user) self.after_this_request(self._commit) self.flash(_('flask_unchained.bundles.security:flash.password_reset'), category='success') if request.is_json: return self.jsonify({'token': user.get_auth_token(), 'user': user}) return self.redirect('SECURITY_POST_RESET_REDIRECT_ENDPOINT', 'SECURITY_POST_LOGIN_REDIRECT_ENDPOINT') elif form.errors and request.is_json: return self.errors(form.errors) return self.render('reset_password', reset_password_form=form, reset_password_token=token, **self.security.run_ctx_processor('reset_password'))
View function verify a users reset password token from the email we sent to them. It also handles the form for them to set a new password. Supports html and json requests.
def element_abund_marco(i_decay, stable_isotope_list, stable_isotope_identifier, mass_fractions_array_not_decayed, mass_fractions_array_decayed): ''' Given an array of isotopic abundances not decayed and a similar array of isotopic abundances not decayed, here elements abundances, and production factors for elements are calculated ''' # this way is done in a really simple way. May be done better for sure, in a couple of loops. # I keep this, since I have only to copy over old script. Falk will probably redo it. #import numpy as np #from NuGridPy import utils as u global elem_abund elem_abund = np.zeros(z_bismuth) global elem_abund_decayed elem_abund_decayed = np.zeros(z_bismuth) global elem_prod_fac elem_prod_fac = np.zeros(z_bismuth) global elem_prod_fac_decayed elem_prod_fac_decayed = np.zeros(z_bismuth) # notice that elem_abund include all contribution, both from stables and unstables in # that moment. for i in range(z_bismuth): dummy = 0. for j in range(len(spe)): if znum_int[j] == i+1 and stable_isotope_identifier[j] > 0.5: dummy = dummy + float(mass_fractions_array_not_decayed[j]) elem_abund[i] = dummy for i in range(z_bismuth): if index_stable[i] == 1: elem_prod_fac[i] = float(old_div(elem_abund[i],solar_elem_abund[i])) elif index_stable[i] == 0: elem_prod_fac[i] = 0. if i_decay == 2: for i in range(z_bismuth): dummy = 0. for j in range(len(mass_fractions_array_decayed)): if znum_int[cl[stable_isotope_list[j].capitalize()]] == i+1: #print znum_int[cl[stable[j].capitalize()]],cl[stable[j].capitalize()],stable[j] dummy = dummy + float(mass_fractions_array_decayed[j]) elem_abund_decayed[i] = dummy for i in range(z_bismuth): if index_stable[i] == 1: elem_prod_fac_decayed[i] = float(old_div(elem_abund_decayed[i],solar_elem_abund[i])) elif index_stable[i] == 0: elem_prod_fac_decayed[i] = 0.
Given an array of isotopic abundances not decayed and a similar array of isotopic abundances not decayed, here elements abundances, and production factors for elements are calculated
def _grow(growth, walls, target, i, j, steps, new_steps, res): ''' fills [res] with [distance to next position where target == 1, x coord., y coord. of that position in target] using region growth i,j -> pixel position growth -> a work array, needed to measure the distance steps, new_steps -> current and last positions of the region growth steps using this instead of looking for the right step position in [growth] should speed up the process ''' # clean array: growth[:] = 0 if target[i, j]: # pixel is in target res[0] = 1 res[1] = i res[2] = j return step = 1 s0, s1 = growth.shape step_len = 1 new_step_ind = 0 steps[new_step_ind, 0] = i steps[new_step_ind, 1] = j growth[i, j] = 1 while True: for n in range(step_len): i, j = steps[n] for ii, jj in DIRECT_NEIGHBOURS: pi = i + ii pj = j + jj # if in image: if 0 <= pi < s0 and 0 <= pj < s1: # is growth array is empty and there are no walls: # fill growth with current step if growth[pi, pj] == 0 and not walls[pi, pj]: growth[pi, pj] = step if target[pi, pj]: # found destination res[0] = 1 res[1] = pi res[2] = pj return new_steps[new_step_ind, 0] = pi new_steps[new_step_ind, 1] = pj new_step_ind += 1 if new_step_ind == 0: # couldn't populate any more because growth is full # and all possible steps are gone res[0] = 0 return step += 1 steps, new_steps = new_steps, steps step_len = new_step_ind new_step_ind = 0
fills [res] with [distance to next position where target == 1, x coord., y coord. of that position in target] using region growth i,j -> pixel position growth -> a work array, needed to measure the distance steps, new_steps -> current and last positions of the region growth steps using this instead of looking for the right step position in [growth] should speed up the process
def lchisqprob(chisq,df): """ Returns the (1-tailed) probability value associated with the provided chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat. Usage: lchisqprob(chisq,df) """ BIG = 20.0 def ex(x): BIG = 20.0 if x < -BIG: return 0.0 else: return math.exp(x) if chisq <=0 or df < 1: return 1.0 a = 0.5 * chisq if df%2 == 0: even = 1 else: even = 0 if df > 1: y = ex(-a) if even: s = y else: s = 2.0 * zprob(-math.sqrt(chisq)) if (df > 2): chisq = 0.5 * (df - 1.0) if even: z = 1.0 else: z = 0.5 if a > BIG: if even: e = 0.0 else: e = math.log(math.sqrt(math.pi)) c = math.log(a) while (z <= chisq): e = math.log(z) + e s = s + ex(c*z-a-e) z = z + 1.0 return s else: if even: e = 1.0 else: e = 1.0 / math.sqrt(math.pi) / math.sqrt(a) c = 0.0 while (z <= chisq): e = e * (a/float(z)) c = c + e z = z + 1.0 return (c*y+s) else: return s
Returns the (1-tailed) probability value associated with the provided chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat. Usage: lchisqprob(chisq,df)
def drop_constant_column_levels(df): """ drop the levels of a multi-level column dataframe which are constant operates in place """ columns = df.columns constant_levels = [i for i, level in enumerate(columns.levels) if len(level) <= 1] constant_levels.reverse() for i in constant_levels: columns = columns.droplevel(i) df.columns = columns
drop the levels of a multi-level column dataframe which are constant operates in place
def bridge_to_vlan(br): ''' Returns the VLAN ID of a bridge. Args: br: A string - bridge name Returns: VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake bridge. If the bridge does not exist, False is returned. CLI Example: .. code-block:: bash salt '*' openvswitch.bridge_to_parent br0 ''' cmd = 'ovs-vsctl br-to-vlan {0}'.format(br) result = __salt__['cmd.run_all'](cmd) if result['retcode'] != 0: return False return int(result['stdout'])
Returns the VLAN ID of a bridge. Args: br: A string - bridge name Returns: VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake bridge. If the bridge does not exist, False is returned. CLI Example: .. code-block:: bash salt '*' openvswitch.bridge_to_parent br0
def start_action(logger=None, action_type="", _serializers=None, **fields): """ Create a child L{Action}, figuring out the parent L{Action} from execution context, and log the start message. You can use the result as a Python context manager, or use the L{Action.finish} API to explicitly finish it. with start_action(logger, "yourapp:subsystem:dosomething", entry=x) as action: do(x) result = something(x * 2) action.addSuccessFields(result=result) Or alternatively: action = start_action(logger, "yourapp:subsystem:dosomething", entry=x) with action.context(): do(x) result = something(x * 2) action.addSuccessFields(result=result) action.finish() @param logger: The L{eliot.ILogger} to which to write messages, or C{None} to use the default one. @param action_type: The type of this action, e.g. C{"yourapp:subsystem:dosomething"}. @param _serializers: Either a L{eliot._validation._ActionSerializers} instance or C{None}. In the latter case no validation or serialization will be done for messages generated by the L{Action}. @param fields: Additional fields to add to the start message. @return: A new L{Action}. """ parent = current_action() if parent is None: return startTask(logger, action_type, _serializers, **fields) else: action = parent.child(logger, action_type, _serializers) action._start(fields) return action
Create a child L{Action}, figuring out the parent L{Action} from execution context, and log the start message. You can use the result as a Python context manager, or use the L{Action.finish} API to explicitly finish it. with start_action(logger, "yourapp:subsystem:dosomething", entry=x) as action: do(x) result = something(x * 2) action.addSuccessFields(result=result) Or alternatively: action = start_action(logger, "yourapp:subsystem:dosomething", entry=x) with action.context(): do(x) result = something(x * 2) action.addSuccessFields(result=result) action.finish() @param logger: The L{eliot.ILogger} to which to write messages, or C{None} to use the default one. @param action_type: The type of this action, e.g. C{"yourapp:subsystem:dosomething"}. @param _serializers: Either a L{eliot._validation._ActionSerializers} instance or C{None}. In the latter case no validation or serialization will be done for messages generated by the L{Action}. @param fields: Additional fields to add to the start message. @return: A new L{Action}.
async def _async_get_sshable_ips(self, ip_addresses): """Return list of all IP address that could be pinged.""" async def _async_ping(ip_address): try: reader, writer = await asyncio.wait_for( asyncio.open_connection(ip_address, 22), timeout=5) except (OSError, TimeoutError): return None try: line = await reader.readline() finally: writer.close() if line.startswith(b'SSH-'): return ip_address ssh_ips = await asyncio.gather(*[ _async_ping(ip_address) for ip_address in ip_addresses ]) return [ ip_address for ip_address in ssh_ips if ip_address is not None ]
Return list of all IP address that could be pinged.
def get_section_by_rva(self, rva): """Get the section containing the given address.""" for section in self.sections: if section.contains_rva(rva): return section return None
Get the section containing the given address.
def _resolve_plt(self, addr, irsb, indir_jump): """ Determine if the IRSB at the given address is a PLT stub. If it is, concretely execute the basic block to resolve the jump target. :param int addr: Address of the block. :param irsb: The basic block. :param IndirectJump indir_jump: The IndirectJump instance. :return: True if the IRSB represents a PLT stub and we successfully resolved the target. False otherwise. :rtype: bool """ # is the address identified by CLE as a PLT stub? if self.project.loader.all_elf_objects: # restrict this heuristics to ELF files only if not any([ addr in obj.reverse_plt for obj in self.project.loader.all_elf_objects ]): return False # Make sure the IRSB has statements if not irsb.has_statements: irsb = self.project.factory.block(irsb.addr, size=irsb.size).vex # try to resolve the jump target simsucc = self.project.engines.default_engine.process(self._initial_state, irsb, force_addr=addr) if len(simsucc.successors) == 1: ip = simsucc.successors[0].ip if ip._model_concrete is not ip: target_addr = ip._model_concrete.value if (self.project.loader.find_object_containing(target_addr, membership_check=False) is not self.project.loader.main_object) \ or self.project.is_hooked(target_addr): # resolved! # Fill the IndirectJump object indir_jump.resolved_targets.add(target_addr) l.debug("Address %#x is resolved as a PLT entry, jumping to %#x", addr, target_addr) return True return False
Determine if the IRSB at the given address is a PLT stub. If it is, concretely execute the basic block to resolve the jump target. :param int addr: Address of the block. :param irsb: The basic block. :param IndirectJump indir_jump: The IndirectJump instance. :return: True if the IRSB represents a PLT stub and we successfully resolved the target. False otherwise. :rtype: bool
def set_orthogonal_selection(self, selection, value, fields=None): """Modify data via a selection for each dimension of the array. Parameters ---------- selection : tuple A selection for each dimension of the array. May be any combination of int, slice, integer array or Boolean array. value : scalar or array-like Value to be stored into the array. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to set data for. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> import numpy as np >>> z = zarr.zeros((5, 5), dtype=int) Set data for a selection of rows:: >>> z.set_orthogonal_selection(([1, 4], slice(None)), 1) >>> z[...] array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]) Set data for a selection of columns:: >>> z.set_orthogonal_selection((slice(None), [1, 4]), 2) >>> z[...] array([[0, 2, 0, 0, 2], [1, 2, 1, 1, 2], [0, 2, 0, 0, 2], [0, 2, 0, 0, 2], [1, 2, 1, 1, 2]]) Set data for a selection of rows and columns:: >>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3) >>> z[...] array([[0, 2, 0, 0, 2], [1, 3, 1, 1, 3], [0, 2, 0, 0, 2], [0, 2, 0, 0, 2], [1, 3, 1, 1, 3]]) For convenience, this functionality is also available via the `oindex` property. E.g.:: >>> z.oindex[[1, 4], [1, 4]] = 4 >>> z[...] array([[0, 2, 0, 0, 2], [1, 4, 1, 1, 4], [0, 2, 0, 0, 2], [0, 2, 0, 0, 2], [1, 4, 1, 1, 4]]) Notes ----- Orthogonal indexing is also known as outer indexing. Slices with step > 1 are supported, but slices with negative step are not. See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection, get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection, vindex, oindex, __getitem__, __setitem__ """ # guard conditions if self._read_only: err_read_only() # refresh metadata if not self._cache_metadata: self._load_metadata_nosync() # setup indexer indexer = OrthogonalIndexer(selection, self) self._set_selection(indexer, value, fields=fields)
Modify data via a selection for each dimension of the array. Parameters ---------- selection : tuple A selection for each dimension of the array. May be any combination of int, slice, integer array or Boolean array. value : scalar or array-like Value to be stored into the array. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to set data for. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> import numpy as np >>> z = zarr.zeros((5, 5), dtype=int) Set data for a selection of rows:: >>> z.set_orthogonal_selection(([1, 4], slice(None)), 1) >>> z[...] array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]) Set data for a selection of columns:: >>> z.set_orthogonal_selection((slice(None), [1, 4]), 2) >>> z[...] array([[0, 2, 0, 0, 2], [1, 2, 1, 1, 2], [0, 2, 0, 0, 2], [0, 2, 0, 0, 2], [1, 2, 1, 1, 2]]) Set data for a selection of rows and columns:: >>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3) >>> z[...] array([[0, 2, 0, 0, 2], [1, 3, 1, 1, 3], [0, 2, 0, 0, 2], [0, 2, 0, 0, 2], [1, 3, 1, 1, 3]]) For convenience, this functionality is also available via the `oindex` property. E.g.:: >>> z.oindex[[1, 4], [1, 4]] = 4 >>> z[...] array([[0, 2, 0, 0, 2], [1, 4, 1, 1, 4], [0, 2, 0, 0, 2], [0, 2, 0, 0, 2], [1, 4, 1, 1, 4]]) Notes ----- Orthogonal indexing is also known as outer indexing. Slices with step > 1 are supported, but slices with negative step are not. See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection, get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection, vindex, oindex, __getitem__, __setitem__
def ParseFileObject(self, parser_mediator, file_object): """Parses a NTFS $UsnJrnl metadata file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. """ volume = pyfsntfs.volume() try: volume.open_file_object(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open NTFS volume with error: {0!s}'.format(exception)) try: usn_change_journal = volume.get_usn_change_journal() self._ParseUSNChangeJournal(parser_mediator, usn_change_journal) finally: volume.close()
Parses a NTFS $UsnJrnl metadata file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
def get_category(self, id, **data): """ GET /categories/:id/ Gets a :format:`category` by ID as ``category``. """ return self.get("/categories/{0}/".format(id), data=data)
GET /categories/:id/ Gets a :format:`category` by ID as ``category``.
def log_response(response: str, trim_log_values: bool = False, **kwargs: Any) -> None: """Log a response""" return log_(response, response_logger, logging.INFO, trim=trim_log_values, **kwargs)
Log a response
def on_reset_compat_defaults_clicked(self, bnt): """Reset default values to compat_{backspace,delete} dconf keys. The default values are retrivied from the guake.schemas file. """ self.settings.general.reset('compat-backspace') self.settings.general.reset('compat-delete') self.reload_erase_combos()
Reset default values to compat_{backspace,delete} dconf keys. The default values are retrivied from the guake.schemas file.
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint): """ Overriding naming schemes. """ name = referred_cls.__name__.lower() + "_ref" return name
Overriding naming schemes.
def credit_note(request, note_id, access_code=None): ''' Displays a credit note. If ``request`` is a ``POST`` request, forms for applying or refunding a credit note will be processed. This view requires a login, and the logged in user must be staff. Arguments: note_id (castable to int): The ID of the credit note to view. Returns: render or redirect: If the "apply to invoice" form is correctly processed, redirect to that invoice, otherwise, render ``registration/credit_note.html`` with the following data:: { "credit_note": models.commerce.CreditNote(), "apply_form": form, # A form for applying credit note # to an invoice. "refund_form": form, # A form for applying a *manual* # refund of the credit note. "cancellation_fee_form" : form, # A form for generating an # invoice with a # cancellation fee } ''' note_id = int(note_id) current_note = CreditNoteController.for_id_or_404(note_id) apply_form = forms.ApplyCreditNoteForm( current_note.credit_note.invoice.user, request.POST or None, prefix="apply_note" ) refund_form = forms.ManualCreditNoteRefundForm( request.POST or None, prefix="refund_note" ) cancellation_fee_form = forms.CancellationFeeForm( request.POST or None, prefix="cancellation_fee" ) if request.POST and apply_form.is_valid(): inv_id = apply_form.cleaned_data["invoice"] invoice = commerce.Invoice.objects.get(pk=inv_id) current_note.apply_to_invoice(invoice) messages.success( request, "Applied credit note %d to invoice." % note_id, ) return redirect("invoice", invoice.id) elif request.POST and refund_form.is_valid(): refund_form.instance.entered_by = request.user refund_form.instance.parent = current_note.credit_note refund_form.save() messages.success( request, "Applied manual refund to credit note." ) refund_form = forms.ManualCreditNoteRefundForm( prefix="refund_note", ) elif request.POST and cancellation_fee_form.is_valid(): percentage = cancellation_fee_form.cleaned_data["percentage"] invoice = current_note.cancellation_fee(percentage) messages.success( request, "Generated cancellation fee for credit note %d." % note_id, ) return redirect("invoice", invoice.invoice.id) data = { "credit_note": current_note.credit_note, "apply_form": apply_form, "refund_form": refund_form, "cancellation_fee_form": cancellation_fee_form, } return render(request, "registrasion/credit_note.html", data)
Displays a credit note. If ``request`` is a ``POST`` request, forms for applying or refunding a credit note will be processed. This view requires a login, and the logged in user must be staff. Arguments: note_id (castable to int): The ID of the credit note to view. Returns: render or redirect: If the "apply to invoice" form is correctly processed, redirect to that invoice, otherwise, render ``registration/credit_note.html`` with the following data:: { "credit_note": models.commerce.CreditNote(), "apply_form": form, # A form for applying credit note # to an invoice. "refund_form": form, # A form for applying a *manual* # refund of the credit note. "cancellation_fee_form" : form, # A form for generating an # invoice with a # cancellation fee }
def n_members(self): """ Returns the number of members in the domain if it `is_finite`, otherwise, returns `np.inf`. :type: ``int`` or ``np.inf`` """ if self.is_finite: return reduce(mul, [domain.n_members for domain in self._domains], 1) else: return np.inf
Returns the number of members in the domain if it `is_finite`, otherwise, returns `np.inf`. :type: ``int`` or ``np.inf``
def inter_event_time_distribution(self, u=None, v=None): """Return the distribution of inter event time. If u and v are None the dynamic graph intere event distribution is returned. If u is specified the inter event time distribution of interactions involving u is returned. If u and v are specified the inter event time distribution of (u, v) interactions is returned Parameters ---------- u : node id v : node id Returns ------- nd : dictionary A dictionary from inter event time to number of occurrences """ dist = {} if u is None: # global inter event first = True delta = None for ext in self.stream_interactions(): if first: delta = ext first = False continue disp = ext[-1] - delta[-1] delta = ext if disp in dist: dist[disp] += 1 else: dist[disp] = 1 elif u is not None and v is None: # node inter event delta = (0, 0, 0, 0) flag = False for ext in self.stream_interactions(): if ext[0] == u or ext[1] == u: if flag: disp = ext[-1] - delta[-1] delta = ext if disp in dist: dist[disp] += 1 else: dist[disp] = 1 else: delta = ext flag = True else: # interaction inter event evt = self._adj[u][v]['t'] delta = [] for i in evt: if i[0] != i[1]: for j in [0, 1]: delta.append(i[j]) else: delta.append(i[0]) if len(delta) == 2 and delta[0] == delta[1]: return {} for i in range(0, len(delta) - 1): e = delta[i + 1] - delta[i] if e not in dist: dist[e] = 1 else: dist[e] += 1 return dist
Return the distribution of inter event time. If u and v are None the dynamic graph intere event distribution is returned. If u is specified the inter event time distribution of interactions involving u is returned. If u and v are specified the inter event time distribution of (u, v) interactions is returned Parameters ---------- u : node id v : node id Returns ------- nd : dictionary A dictionary from inter event time to number of occurrences
def create(self, to, from_, method=values.unset, fallback_url=values.unset, fallback_method=values.unset, status_callback=values.unset, status_callback_event=values.unset, status_callback_method=values.unset, send_digits=values.unset, timeout=values.unset, record=values.unset, recording_channels=values.unset, recording_status_callback=values.unset, recording_status_callback_method=values.unset, sip_auth_username=values.unset, sip_auth_password=values.unset, machine_detection=values.unset, machine_detection_timeout=values.unset, recording_status_callback_event=values.unset, trim=values.unset, caller_id=values.unset, machine_detection_speech_threshold=values.unset, machine_detection_speech_end_threshold=values.unset, machine_detection_silence_timeout=values.unset, url=values.unset, application_sid=values.unset): """ Create a new CallInstance :param unicode to: Phone number, SIP address, or client identifier to call :param unicode from_: Twilio number from which to originate the call :param unicode method: HTTP method to use to fetch TwiML :param unicode fallback_url: Fallback URL in case of error :param unicode fallback_method: HTTP Method to use with fallback_url :param unicode status_callback: The URL we should call to send status information to your application :param unicode status_callback_event: The call progress events that we send to the `status_callback` URL. :param unicode status_callback_method: HTTP Method to use with status_callback :param unicode send_digits: The digits to dial after connecting to the number :param unicode timeout: Number of seconds to wait for an answer :param bool record: Whether or not to record the call :param unicode recording_channels: The number of channels in the final recording :param unicode recording_status_callback: The URL that we call when the recording is available to be accessed :param unicode recording_status_callback_method: The HTTP method we should use when calling the `recording_status_callback` URL :param unicode sip_auth_username: The username used to authenticate the caller making a SIP call :param unicode sip_auth_password: The password required to authenticate the user account specified in `sip_auth_username`. :param unicode machine_detection: Enable machine detection or end of greeting detection :param unicode machine_detection_timeout: Number of seconds to wait for machine detection :param unicode recording_status_callback_event: The recording status events that will trigger calls to the URL specified in `recording_status_callback` :param unicode trim: Set this parameter to control trimming of silence on the recording. :param unicode caller_id: The phone number, SIP address, or Client identifier that made this call. Phone numbers are in E.164 format (e.g., +16175551212). SIP addresses are formatted as `[email protected]`. :param unicode machine_detection_speech_threshold: Number of milliseconds for measuring stick for the length of the speech activity :param unicode machine_detection_speech_end_threshold: Number of milliseconds of silence after speech activity :param unicode machine_detection_silence_timeout: Number of milliseconds of initial silence :param unicode url: The absolute URL that returns TwiML for this call :param unicode application_sid: The SID of the Application resource that will handle the call :returns: Newly created CallInstance :rtype: twilio.rest.api.v2010.account.call.CallInstance """ data = values.of({ 'To': to, 'From': from_, 'Url': url, 'ApplicationSid': application_sid, 'Method': method, 'FallbackUrl': fallback_url, 'FallbackMethod': fallback_method, 'StatusCallback': status_callback, 'StatusCallbackEvent': serialize.map(status_callback_event, lambda e: e), 'StatusCallbackMethod': status_callback_method, 'SendDigits': send_digits, 'Timeout': timeout, 'Record': record, 'RecordingChannels': recording_channels, 'RecordingStatusCallback': recording_status_callback, 'RecordingStatusCallbackMethod': recording_status_callback_method, 'SipAuthUsername': sip_auth_username, 'SipAuthPassword': sip_auth_password, 'MachineDetection': machine_detection, 'MachineDetectionTimeout': machine_detection_timeout, 'RecordingStatusCallbackEvent': serialize.map(recording_status_callback_event, lambda e: e), 'Trim': trim, 'CallerId': caller_id, 'MachineDetectionSpeechThreshold': machine_detection_speech_threshold, 'MachineDetectionSpeechEndThreshold': machine_detection_speech_end_threshold, 'MachineDetectionSilenceTimeout': machine_detection_silence_timeout, }) payload = self._version.create( 'POST', self._uri, data=data, ) return CallInstance(self._version, payload, account_sid=self._solution['account_sid'], )
Create a new CallInstance :param unicode to: Phone number, SIP address, or client identifier to call :param unicode from_: Twilio number from which to originate the call :param unicode method: HTTP method to use to fetch TwiML :param unicode fallback_url: Fallback URL in case of error :param unicode fallback_method: HTTP Method to use with fallback_url :param unicode status_callback: The URL we should call to send status information to your application :param unicode status_callback_event: The call progress events that we send to the `status_callback` URL. :param unicode status_callback_method: HTTP Method to use with status_callback :param unicode send_digits: The digits to dial after connecting to the number :param unicode timeout: Number of seconds to wait for an answer :param bool record: Whether or not to record the call :param unicode recording_channels: The number of channels in the final recording :param unicode recording_status_callback: The URL that we call when the recording is available to be accessed :param unicode recording_status_callback_method: The HTTP method we should use when calling the `recording_status_callback` URL :param unicode sip_auth_username: The username used to authenticate the caller making a SIP call :param unicode sip_auth_password: The password required to authenticate the user account specified in `sip_auth_username`. :param unicode machine_detection: Enable machine detection or end of greeting detection :param unicode machine_detection_timeout: Number of seconds to wait for machine detection :param unicode recording_status_callback_event: The recording status events that will trigger calls to the URL specified in `recording_status_callback` :param unicode trim: Set this parameter to control trimming of silence on the recording. :param unicode caller_id: The phone number, SIP address, or Client identifier that made this call. Phone numbers are in E.164 format (e.g., +16175551212). SIP addresses are formatted as `[email protected]`. :param unicode machine_detection_speech_threshold: Number of milliseconds for measuring stick for the length of the speech activity :param unicode machine_detection_speech_end_threshold: Number of milliseconds of silence after speech activity :param unicode machine_detection_silence_timeout: Number of milliseconds of initial silence :param unicode url: The absolute URL that returns TwiML for this call :param unicode application_sid: The SID of the Application resource that will handle the call :returns: Newly created CallInstance :rtype: twilio.rest.api.v2010.account.call.CallInstance
def dataset_path(cache=None, cachefile="~/.io3d_cache.yaml", get_root=False): """Get dataset path. :param cache: CacheFile object :param cachefile: cachefile path, default '~/.io3d_cache.yaml' :return: path to dataset """ local_data_dir = local_dir if cachefile is not None: cache = cachef.CacheFile(cachefile) # cache.update('local_dataset_dir', head) if cache is not None: local_data_dir = cache.get_or_save_default("local_dataset_dir", local_dir) if get_root: local_data_dir else: logger.warning("Parameter") local_data_dir = op.join(local_data_dir, "medical", "orig") return op.expanduser(local_data_dir)
Get dataset path. :param cache: CacheFile object :param cachefile: cachefile path, default '~/.io3d_cache.yaml' :return: path to dataset
def is_dsub_operation(cls, op): """Determine if a pipelines operation is a dsub request. We don't have a rigorous way to identify an operation as being submitted by dsub. Our best option is to check for certain fields that have always been part of dsub operations. - labels: job-id, job-name, and user-id have always existed - envs: _SCRIPT has always existed. In order to keep a simple heuristic this test only uses labels. Args: op: a pipelines operation. Returns: Boolean, true if the pipeline run was generated by dsub. """ if not cls.is_pipelines_operation(op): return False for name in ['job-id', 'job-name', 'user-id']: if not cls.get_operation_label(op, name): return False return True
Determine if a pipelines operation is a dsub request. We don't have a rigorous way to identify an operation as being submitted by dsub. Our best option is to check for certain fields that have always been part of dsub operations. - labels: job-id, job-name, and user-id have always existed - envs: _SCRIPT has always existed. In order to keep a simple heuristic this test only uses labels. Args: op: a pipelines operation. Returns: Boolean, true if the pipeline run was generated by dsub.
def run_nested(self, nlive_init=500, maxiter_init=None, maxcall_init=None, dlogz_init=0.01, logl_max_init=np.inf, nlive_batch=500, wt_function=None, wt_kwargs=None, maxiter_batch=None, maxcall_batch=None, maxiter=None, maxcall=None, maxbatch=None, stop_function=None, stop_kwargs=None, use_stop=True, save_bounds=True, print_progress=True, print_func=None, live_points=None): """ **The main dynamic nested sampling loop.** After an initial "baseline" run using a constant number of live points, dynamically allocates additional (nested) samples to optimize a specified weight function until a specified stopping criterion is reached. Parameters ---------- nlive_init : int, optional The number of live points used during the initial ("baseline") nested sampling run. Default is `500`. maxiter_init : int, optional Maximum number of iterations for the initial baseline nested sampling run. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). maxcall_init : int, optional Maximum number of likelihood evaluations for the initial baseline nested sampling run. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). dlogz_init : float, optional The baseline run will stop when the estimated contribution of the remaining prior volume to the total evidence falls below this threshold. Explicitly, the stopping criterion is `ln(z + z_est) - ln(z) < dlogz`, where `z` is the current evidence from all saved samples and `z_est` is the estimated contribution from the remaining volume. The default is `0.01`. logl_max_init : float, optional The baseline run will stop when the sampled ln(likelihood) exceeds this threshold. Default is no bound (`np.inf`). nlive_batch : int, optional The number of live points used when adding additional samples from a nested sampling run within each batch. Default is `500`. wt_function : func, optional A cost function that takes a :class:`Results` instance and returns a log-likelihood range over which a new batch of samples should be generated. The default function simply computes a weighted average of the posterior and evidence information content as:: weight = pfrac * pweight + (1. - pfrac) * zweight wt_kwargs : dict, optional Extra arguments to be passed to the weight function. maxiter_batch : int, optional Maximum number of iterations for the nested sampling run within each batch. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). maxcall_batch : int, optional Maximum number of likelihood evaluations for the nested sampling run within each batch. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). maxiter : int, optional Maximum number of iterations allowed. Default is `sys.maxsize` (no limit). maxcall : int, optional Maximum number of likelihood evaluations allowed. Default is `sys.maxsize` (no limit). maxbatch : int, optional Maximum number of batches allowed. Default is `sys.maxsize` (no limit). stop_function : func, optional A function that takes a :class:`Results` instance and returns a boolean indicating that we should terminate the run because we've collected enough samples. stop_kwargs : float, optional Extra arguments to be passed to the stopping function. use_stop : bool, optional Whether to evaluate our stopping function after each batch. Disabling this can improve performance if other stopping criteria such as :data:`maxcall` are already specified. Default is `True`. save_bounds : bool, optional Whether or not to save distributions used to bound the live points internally during dynamic live point allocation. Default is `True`. print_progress : bool, optional Whether to output a simple summary of the current run that updates each iteration. Default is `True`. print_func : function, optional A function that prints out the current state of the sampler. If not provided, the default :meth:`results.print_fn` is used. live_points : list of 3 `~numpy.ndarray` each with shape (nlive, ndim) A set of live points used to initialize the nested sampling run. Contains `live_u`, the coordinates on the unit cube, `live_v`, the transformed variables, and `live_logl`, the associated loglikelihoods. By default, if these are not provided the initial set of live points will be drawn from the unit `npdim`-cube. **WARNING: It is crucial that the initial set of live points have been sampled from the prior. Failure to provide a set of valid live points will result in biased results.** """ # Initialize values. if maxcall is None: maxcall = sys.maxsize if maxiter is None: maxiter = sys.maxsize if maxiter_batch is None: maxiter_batch = sys.maxsize if maxcall_batch is None: maxcall_batch = sys.maxsize if maxbatch is None: maxbatch = sys.maxsize if maxiter_init is None: maxiter_init = sys.maxsize if maxcall_init is None: maxcall_init = sys.maxsize if wt_function is None: wt_function = weight_function if wt_kwargs is None: wt_kwargs = dict() if stop_function is None: stop_function = stopping_function if stop_kwargs is None: stop_kwargs = dict() if print_func is None: print_func = print_fn # Run the main dynamic nested sampling loop. ncall = self.ncall niter = self.it - 1 logl_bounds = (-np.inf, np.inf) maxcall_init = min(maxcall_init, maxcall) # set max calls maxiter_init = min(maxiter_init, maxiter) # set max iterations # Baseline run. if not self.base: for results in self.sample_initial(nlive=nlive_init, dlogz=dlogz_init, maxcall=maxcall_init, maxiter=maxiter_init, logl_max=logl_max_init, live_points=live_points): (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, eff, delta_logz) = results ncall += nc niter += 1 # Print progress. if print_progress: print_func(results, niter, ncall, nbatch=0, dlogz=dlogz_init, logl_max=logl_max_init) # Add points in batches. for n in range(self.batch, maxbatch): # Update stopping criteria. res = self.results mcall = min(maxcall - ncall, maxcall_batch) miter = min(maxiter - niter, maxiter_batch) if mcall > 0 and miter > 0 and use_stop: if self.use_pool_stopfn: M = self.M else: M = map stop, stop_vals = stop_function(res, stop_kwargs, rstate=self.rstate, M=M, return_vals=True) stop_post, stop_evid, stop_val = stop_vals else: stop = False stop_val = np.NaN # If we have either likelihood calls or iterations remaining, # run our batch. if mcall > 0 and miter > 0 and not stop: # Compute our sampling bounds using the provided # weight function. passback = self.add_batch(nlive=nlive_batch, wt_function=wt_function, wt_kwargs=wt_kwargs, maxiter=miter, maxcall=mcall, save_bounds=save_bounds, print_progress=print_progress, print_func=print_func, stop_val=stop_val) ncall, niter, logl_bounds, results = passback elif logl_bounds[1] != np.inf: # We ran at least one batch and now we're done! if print_progress: print_func(results, niter, ncall, nbatch=n, stop_val=stop_val, logl_min=logl_bounds[0], logl_max=logl_bounds[1]) break else: # We didn't run a single batch but now we're done! break if print_progress: sys.stderr.write("\n")
**The main dynamic nested sampling loop.** After an initial "baseline" run using a constant number of live points, dynamically allocates additional (nested) samples to optimize a specified weight function until a specified stopping criterion is reached. Parameters ---------- nlive_init : int, optional The number of live points used during the initial ("baseline") nested sampling run. Default is `500`. maxiter_init : int, optional Maximum number of iterations for the initial baseline nested sampling run. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). maxcall_init : int, optional Maximum number of likelihood evaluations for the initial baseline nested sampling run. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). dlogz_init : float, optional The baseline run will stop when the estimated contribution of the remaining prior volume to the total evidence falls below this threshold. Explicitly, the stopping criterion is `ln(z + z_est) - ln(z) < dlogz`, where `z` is the current evidence from all saved samples and `z_est` is the estimated contribution from the remaining volume. The default is `0.01`. logl_max_init : float, optional The baseline run will stop when the sampled ln(likelihood) exceeds this threshold. Default is no bound (`np.inf`). nlive_batch : int, optional The number of live points used when adding additional samples from a nested sampling run within each batch. Default is `500`. wt_function : func, optional A cost function that takes a :class:`Results` instance and returns a log-likelihood range over which a new batch of samples should be generated. The default function simply computes a weighted average of the posterior and evidence information content as:: weight = pfrac * pweight + (1. - pfrac) * zweight wt_kwargs : dict, optional Extra arguments to be passed to the weight function. maxiter_batch : int, optional Maximum number of iterations for the nested sampling run within each batch. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). maxcall_batch : int, optional Maximum number of likelihood evaluations for the nested sampling run within each batch. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). maxiter : int, optional Maximum number of iterations allowed. Default is `sys.maxsize` (no limit). maxcall : int, optional Maximum number of likelihood evaluations allowed. Default is `sys.maxsize` (no limit). maxbatch : int, optional Maximum number of batches allowed. Default is `sys.maxsize` (no limit). stop_function : func, optional A function that takes a :class:`Results` instance and returns a boolean indicating that we should terminate the run because we've collected enough samples. stop_kwargs : float, optional Extra arguments to be passed to the stopping function. use_stop : bool, optional Whether to evaluate our stopping function after each batch. Disabling this can improve performance if other stopping criteria such as :data:`maxcall` are already specified. Default is `True`. save_bounds : bool, optional Whether or not to save distributions used to bound the live points internally during dynamic live point allocation. Default is `True`. print_progress : bool, optional Whether to output a simple summary of the current run that updates each iteration. Default is `True`. print_func : function, optional A function that prints out the current state of the sampler. If not provided, the default :meth:`results.print_fn` is used. live_points : list of 3 `~numpy.ndarray` each with shape (nlive, ndim) A set of live points used to initialize the nested sampling run. Contains `live_u`, the coordinates on the unit cube, `live_v`, the transformed variables, and `live_logl`, the associated loglikelihoods. By default, if these are not provided the initial set of live points will be drawn from the unit `npdim`-cube. **WARNING: It is crucial that the initial set of live points have been sampled from the prior. Failure to provide a set of valid live points will result in biased results.**
def get_objective_bank_admin_session(self, proxy, *args, **kwargs): """Gets the OsidSession associated with the objective bank administration service. :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: an ``ObjectiveBankAdminSession`` :rtype: ``osid.learning.ObjectiveBankAdminSession`` :raise: ``NullArgument`` -- ``proxy`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unimplemented`` -- ``supports_objective_bank_admin() is false`` *compliance: optional -- This method must be implemented if ``supports_objective_bank_admin()`` is true.* """ if not self.supports_objective_bank_admin(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.ObjectiveBankAdminSession(proxy=proxy, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the OsidSession associated with the objective bank administration service. :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: an ``ObjectiveBankAdminSession`` :rtype: ``osid.learning.ObjectiveBankAdminSession`` :raise: ``NullArgument`` -- ``proxy`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unimplemented`` -- ``supports_objective_bank_admin() is false`` *compliance: optional -- This method must be implemented if ``supports_objective_bank_admin()`` is true.*
def _find_max_lag(x, rho_limit=0.05, maxmaxlag=20000, verbose=0): """Automatically find an appropriate maximum lag to calculate IAT""" # Fetch autocovariance matrix acv = autocov(x) # Calculate rho rho = acv[0, 1] / acv[0, 0] lam = -1. / np.log(abs(rho)) # Initial guess at 1.5 times lambda (i.e. 3 times mean life) maxlag = int(np.floor(3. * lam)) + 1 # Jump forward 1% of lambda to look for rholimit threshold jump = int(np.ceil(0.01 * lam)) + 1 T = len(x) while ((abs(rho) > rho_limit) & (maxlag < min(T / 2, maxmaxlag))): acv = autocov(x, maxlag) rho = acv[0, 1] / acv[0, 0] maxlag += jump # Add 30% for good measure maxlag = int(np.floor(1.3 * maxlag)) if maxlag >= min(T / 2, maxmaxlag): maxlag = min(min(T / 2, maxlag), maxmaxlag) "maxlag fixed to %d" % maxlag return maxlag if maxlag <= 1: print_("maxlag = %d, fixing value to 10" % maxlag) return 10 if verbose: print_("maxlag = %d" % maxlag) return maxlag
Automatically find an appropriate maximum lag to calculate IAT
def run_ipython_notebook(notebook_str): """ References: https://github.com/paulgb/runipy >>> from utool.util_ipynb import * # NOQA """ from runipy.notebook_runner import NotebookRunner import nbformat import logging log_format = '%(asctime)s %(levelname)s: %(message)s' log_datefmt = '%m/%d/%Y %I:%M:%S %p' logging.basicConfig( level=logging.INFO, format=log_format, datefmt=log_datefmt ) #fpath = 'tmp.ipynb' #notebook_str = ut.readfrom(fpath) #nb3 = IPython.nbformat.reads(notebook_str, 3) #cell = nb4.cells[1] #self = runner #runner = NotebookRunner(nb3, mpl_inline=True) print('Executing IPython notebook') nb4 = nbformat.reads(notebook_str, 4) runner = NotebookRunner(nb4) runner.run_notebook(skip_exceptions=False) run_nb = runner.nb return run_nb
References: https://github.com/paulgb/runipy >>> from utool.util_ipynb import * # NOQA
def _calc(self, y, w): '''Helper to estimate spatial lag conditioned Markov transition probability matrices based on maximum likelihood techniques. ''' if self.discrete: self.lclass_ids = weights.lag_categorical(w, self.class_ids, ties="tryself") else: ly = weights.lag_spatial(w, y) self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify( ly, self.m, self.lag_cutoffs) self.lclasses = np.arange(self.m) T = np.zeros((self.m, self.k, self.k)) n, t = y.shape for t1 in range(t - 1): t2 = t1 + 1 for i in range(n): T[self.lclass_ids[i, t1], self.class_ids[i, t1], self.class_ids[i, t2]] += 1 P = np.zeros_like(T) for i, mat in enumerate(T): row_sum = mat.sum(axis=1) row_sum = row_sum + (row_sum == 0) p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat)) P[i] = p_i return T, P
Helper to estimate spatial lag conditioned Markov transition probability matrices based on maximum likelihood techniques.
def Ctrl_C(self, delay=0): """Ctrl + C shortcut. """ self._delay(delay) self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1))) self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.C, 1))) self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1)))
Ctrl + C shortcut.
def move_dirty_lock_file(dirty_lock_file, sm_path): """ Move the dirt_lock file to the sm_path and thereby is not found by auto recovery of backup anymore """ if dirty_lock_file is not None \ and not dirty_lock_file == os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]): logger.debug("Move dirty lock from root tmp folder {0} to state machine folder {1}" "".format(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]))) os.rename(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]))
Move the dirt_lock file to the sm_path and thereby is not found by auto recovery of backup anymore
def marv(ctx, config, loglevel, logfilter, verbosity): """Manage a Marv site""" if config is None: cwd = os.path.abspath(os.path.curdir) while cwd != os.path.sep: config = os.path.join(cwd, 'marv.conf') if os.path.exists(config): break cwd = os.path.dirname(cwd) else: config = '/etc/marv/marv.conf' if not os.path.exists(config): config = None ctx.obj = config setup_logging(loglevel, verbosity, logfilter)
Manage a Marv site
def network_info(host=None, admin_username=None, admin_password=None, module=None): ''' Return Network Configuration CLI Example: .. code-block:: bash salt dell dracr.network_info ''' inv = inventory(host=host, admin_username=admin_username, admin_password=admin_password) if inv is None: cmd = {} cmd['retcode'] = -1 cmd['stdout'] = 'Problem getting switch inventory' return cmd if module not in inv.get('switch') and module not in inv.get('server'): cmd = {} cmd['retcode'] = -1 cmd['stdout'] = 'No module {0} found.'.format(module) return cmd cmd = __execute_ret('getniccfg', host=host, admin_username=admin_username, admin_password=admin_password, module=module) if cmd['retcode'] != 0: log.warning('racadm returned an exit code of %s', cmd['retcode']) cmd['stdout'] = 'Network:\n' + 'Device = ' + module + '\n' + \ cmd['stdout'] return __parse_drac(cmd['stdout'])
Return Network Configuration CLI Example: .. code-block:: bash salt dell dracr.network_info
def guess_mime_type(url): """Use the mimetypes module to lookup the type for an extension. This function also adds some extensions required for HTML5 """ (mimetype, _mimeencoding) = mimetypes.guess_type(url) if not mimetype: ext = os.path.splitext(url)[1] mimetype = _MIME_TYPES.get(ext) _logger.debug("mimetype({}): {}".format(url, mimetype)) if not mimetype: mimetype = "application/octet-stream" return mimetype
Use the mimetypes module to lookup the type for an extension. This function also adds some extensions required for HTML5
def generate_confirmation_token(self, user): """ Generates a unique confirmation token for the specified user. :param user: The user to work with """ data = [str(user.id), self.hash_data(user.email)] return self.security.confirm_serializer.dumps(data)
Generates a unique confirmation token for the specified user. :param user: The user to work with
def plot_slab(slab, ax, scale=0.8, repeat=5, window=1.5, draw_unit_cell=True, decay=0.2, adsorption_sites=True): """ Function that helps visualize the slab in a 2-D plot, for convenient viewing of output of AdsorbateSiteFinder. Args: slab (slab): Slab object to be visualized ax (axes): matplotlib axes with which to visualize scale (float): radius scaling for sites repeat (int): number of repeating unit cells to visualize window (float): window for setting the axes limits, is essentially a fraction of the unit cell limits draw_unit_cell (bool): flag indicating whether or not to draw cell decay (float): how the alpha-value decays along the z-axis """ orig_slab = slab.copy() slab = reorient_z(slab) orig_cell = slab.lattice.matrix.copy() if repeat: slab.make_supercell([repeat, repeat, 1]) coords = np.array(sorted(slab.cart_coords, key=lambda x: x[2])) sites = sorted(slab.sites, key=lambda x: x.coords[2]) alphas = 1 - decay * (np.max(coords[:, 2]) - coords[:, 2]) alphas = alphas.clip(min=0) corner = [0, 0, slab.lattice.get_fractional_coords(coords[-1])[-1]] corner = slab.lattice.get_cartesian_coords(corner)[:2] verts = orig_cell[:2, :2] lattsum = verts[0] + verts[1] # Draw circles at sites and stack them accordingly for n, coord in enumerate(coords): r = sites[n].specie.atomic_radius * scale ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2), r, color='w', zorder=2 * n)) color = color_dict[sites[n].species_string] ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2), r, facecolor=color, alpha=alphas[n], edgecolor='k', lw=0.3, zorder=2 * n + 1)) # Adsorption sites if adsorption_sites: asf = AdsorbateSiteFinder(orig_slab) ads_sites = asf.find_adsorption_sites()['all'] sop = get_rot(orig_slab) ads_sites = [sop.operate(ads_site)[:2].tolist() for ads_site in ads_sites] ax.plot(*zip(*ads_sites), color='k', marker='x', markersize=10, mew=1, linestyle='', zorder=10000) # Draw unit cell if draw_unit_cell: verts = np.insert(verts, 1, lattsum, axis=0).tolist() verts += [[0., 0.]] verts = [[0., 0.]] + verts codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY] verts = [(np.array(vert) + corner).tolist() for vert in verts] path = Path(verts, codes) patch = patches.PathPatch(path, facecolor='none', lw=2, alpha=0.5, zorder=2 * n + 2) ax.add_patch(patch) ax.set_aspect("equal") center = corner + lattsum / 2. extent = np.max(lattsum) lim_array = [center - extent * window, center + extent * window] x_lim = [ele[0] for ele in lim_array] y_lim = [ele[1] for ele in lim_array] ax.set_xlim(x_lim) ax.set_ylim(y_lim) return ax
Function that helps visualize the slab in a 2-D plot, for convenient viewing of output of AdsorbateSiteFinder. Args: slab (slab): Slab object to be visualized ax (axes): matplotlib axes with which to visualize scale (float): radius scaling for sites repeat (int): number of repeating unit cells to visualize window (float): window for setting the axes limits, is essentially a fraction of the unit cell limits draw_unit_cell (bool): flag indicating whether or not to draw cell decay (float): how the alpha-value decays along the z-axis
def _setup_cgroups(self, my_cpus, memlimit, memory_nodes, cgroup_values): """ This method creates the CGroups for the following execution. @param my_cpus: None or a list of the CPU cores to use @param memlimit: None or memory limit in bytes @param memory_nodes: None or a list of memory nodes of a NUMA system to use @param cgroup_values: dict of additional values to set @return cgroups: a map of all the necessary cgroups for the following execution. Please add the process of the following execution to all those cgroups! """ logging.debug("Setting up cgroups for run.") # Setup cgroups, need a single call to create_cgroup() for all subsystems subsystems = [BLKIO, CPUACCT, FREEZER, MEMORY] + self._cgroup_subsystems if my_cpus is not None or memory_nodes is not None: subsystems.append(CPUSET) subsystems = [s for s in subsystems if s in self.cgroups] cgroups = self.cgroups.create_fresh_child_cgroup(*subsystems) logging.debug("Created cgroups %s.", cgroups) # First, set user-specified values such that they get overridden by our settings if necessary. for ((subsystem, option), value) in cgroup_values.items(): try: cgroups.set_value(subsystem, option, value) except EnvironmentError as e: cgroups.remove() sys.exit('{} for setting cgroup option {}.{} to "{}" (error code {}).' .format(e.strerror, subsystem, option, value, e.errno)) logging.debug('Cgroup value %s.%s was set to "%s", new value is now "%s".', subsystem, option, value, cgroups.get_value(subsystem, option)) # Setup cpuset cgroup if necessary to limit the CPU cores/memory nodes to be used. if my_cpus is not None: my_cpus_str = ','.join(map(str, my_cpus)) cgroups.set_value(CPUSET, 'cpus', my_cpus_str) my_cpus_str = cgroups.get_value(CPUSET, 'cpus') logging.debug('Using cpu cores [%s].', my_cpus_str) if memory_nodes is not None: cgroups.set_value(CPUSET, 'mems', ','.join(map(str, memory_nodes))) memory_nodesStr = cgroups.get_value(CPUSET, 'mems') logging.debug('Using memory nodes [%s].', memory_nodesStr) # Setup memory limit if memlimit is not None: limit = 'limit_in_bytes' cgroups.set_value(MEMORY, limit, memlimit) swap_limit = 'memsw.limit_in_bytes' # We need swap limit because otherwise the kernel just starts swapping # out our process if the limit is reached. # Some kernels might not have this feature, # which is ok if there is actually no swap. if not cgroups.has_value(MEMORY, swap_limit): if systeminfo.has_swap(): sys.exit('Kernel misses feature for accounting swap memory, but machine has swap. Please set swapaccount=1 on your kernel command line or disable swap with "sudo swapoff -a".') else: try: cgroups.set_value(MEMORY, swap_limit, memlimit) except IOError as e: if e.errno == errno.ENOTSUP: # kernel responds with operation unsupported if this is disabled sys.exit('Memory limit specified, but kernel does not allow limiting swap memory. Please set swapaccount=1 on your kernel command line or disable swap with "sudo swapoff -a".') raise e memlimit = cgroups.get_value(MEMORY, limit) logging.debug('Effective memory limit is %s bytes.', memlimit) if MEMORY in cgroups: try: # Note that this disables swapping completely according to # https://www.kernel.org/doc/Documentation/cgroups/memory.txt # (unlike setting the global swappiness to 0). # Our process might get killed because of this. cgroups.set_value(MEMORY, 'swappiness', '0') except IOError as e: logging.warning('Could not disable swapping for benchmarked process: %s', e) return cgroups
This method creates the CGroups for the following execution. @param my_cpus: None or a list of the CPU cores to use @param memlimit: None or memory limit in bytes @param memory_nodes: None or a list of memory nodes of a NUMA system to use @param cgroup_values: dict of additional values to set @return cgroups: a map of all the necessary cgroups for the following execution. Please add the process of the following execution to all those cgroups!
def query_extensions(self, extension_query, account_token=None, account_token_header=None): """QueryExtensions. [Preview API] :param :class:`<ExtensionQuery> <azure.devops.v5_1.gallery.models.ExtensionQuery>` extension_query: :param str account_token: :param String account_token_header: Header to pass the account token :rtype: :class:`<ExtensionQueryResult> <azure.devops.v5_1.gallery.models.ExtensionQueryResult>` """ query_parameters = {} if account_token is not None: query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str') content = self._serialize.body(extension_query, 'ExtensionQuery') response = self._send(http_method='POST', location_id='eb9d5ee1-6d43-456b-b80e-8a96fbc014b6', version='5.1-preview.1', query_parameters=query_parameters, content=content) return self._deserialize('ExtensionQueryResult', response)
QueryExtensions. [Preview API] :param :class:`<ExtensionQuery> <azure.devops.v5_1.gallery.models.ExtensionQuery>` extension_query: :param str account_token: :param String account_token_header: Header to pass the account token :rtype: :class:`<ExtensionQueryResult> <azure.devops.v5_1.gallery.models.ExtensionQueryResult>`
def splits(self): ''' Splits ''' if not self.__splits_aggregate: self.__splits_aggregate = SplitsAggregate(self.book) return self.__splits_aggregate
Splits
def encode(self): """Encode the packet's buffer from the instance variables.""" tftpassert(self.filename, "filename required in initial packet") tftpassert(self.mode, "mode required in initial packet") # Make sure filename and mode are bytestrings. filename = self.filename mode = self.mode if not isinstance(filename, bytes): filename = filename.encode('ascii') if not isinstance(self.mode, bytes): mode = mode.encode('ascii') ptype = None if self.opcode == 1: ptype = "RRQ" else: ptype = "WRQ" log.debug("Encoding %s packet, filename = %s, mode = %s", ptype, filename, mode) for key in self.options: log.debug(" Option %s = %s", key, self.options[key]) fmt = b"!H" fmt += b"%dsx" % len(filename) if mode == b"octet": fmt += b"5sx" else: raise AssertionError("Unsupported mode: %s" % mode) # Add options. Note that the options list must be bytes. options_list = [] if len(list(self.options.keys())) > 0: log.debug("there are options to encode") for key in self.options: # Populate the option name name = key if not isinstance(name, bytes): name = name.encode('ascii') options_list.append(name) fmt += b"%dsx" % len(name) # Populate the option value value = self.options[key] # Work with all strings. if isinstance(value, int): value = str(value) if not isinstance(value, bytes): value = value.encode('ascii') options_list.append(value) fmt += b"%dsx" % len(value) log.debug("fmt is %s", fmt) log.debug("options_list is %s", options_list) log.debug("size of struct is %d", struct.calcsize(fmt)) self.buffer = struct.pack(fmt, self.opcode, filename, mode, *options_list) log.debug("buffer is %s", repr(self.buffer)) return self
Encode the packet's buffer from the instance variables.
def iMath(image, operation, *args): """ Perform various (often mathematical) operations on the input image/s. Additional parameters should be specific for each operation. See the the full iMath in ANTs, on which this function is based. ANTsR function: `iMath` Arguments --------- image : ANTsImage input object, usually antsImage operation a string e.g. "GetLargestComponent" ... the special case of "GetOperations" or "GetOperationsFull" will return a list of operations and brief description. Some operations may not be valid (WIP), but most are. *args : non-keyword arguments additional parameters specific to the operation Example ------- >>> import ants >>> img = ants.image_read(ants.get_ants_data('r16')) >>> img2 = ants.iMath(img, 'Canny', 1, 5, 12) """ if operation not in _iMathOps: raise ValueError('Operation not recognized') imagedim = image.dimension outimage = image.clone() args = [imagedim, outimage, operation, image] + [a for a in args] processed_args = _int_antsProcessArguments(args) libfn = utils.get_lib_fn('iMath') libfn(processed_args) return outimage
Perform various (often mathematical) operations on the input image/s. Additional parameters should be specific for each operation. See the the full iMath in ANTs, on which this function is based. ANTsR function: `iMath` Arguments --------- image : ANTsImage input object, usually antsImage operation a string e.g. "GetLargestComponent" ... the special case of "GetOperations" or "GetOperationsFull" will return a list of operations and brief description. Some operations may not be valid (WIP), but most are. *args : non-keyword arguments additional parameters specific to the operation Example ------- >>> import ants >>> img = ants.image_read(ants.get_ants_data('r16')) >>> img2 = ants.iMath(img, 'Canny', 1, 5, 12)
def validate_single_matching_uri(all_blockchain_uris: List[str], w3: Web3) -> str: """ Return a single block URI after validating that it is the *only* URI in all_blockchain_uris that matches the w3 instance. """ matching_uris = [ uri for uri in all_blockchain_uris if check_if_chain_matches_chain_uri(w3, uri) ] if not matching_uris: raise ValidationError("Package has no matching URIs on chain.") elif len(matching_uris) != 1: raise ValidationError( f"Package has too many ({len(matching_uris)}) matching URIs: {matching_uris}." ) return matching_uris[0]
Return a single block URI after validating that it is the *only* URI in all_blockchain_uris that matches the w3 instance.
def _uniqualize(d): ''' d = {1:'a',2:'b',3:'c',4:'b'} _uniqualize(d) ''' pt = copy.deepcopy(d) seqs_for_del =[] vset = set({}) for k in pt: vset.add(pt[k]) tslen = vset.__len__() freq = {} for k in pt: v = pt[k] if(v in freq): freq[v] = freq[v] + 1 seqs_for_del.append(k) else: freq[v] = 0 npt = {} for k in pt: if(k in seqs_for_del): pass else: npt[k] = pt[k] pt = npt return(npt)
d = {1:'a',2:'b',3:'c',4:'b'} _uniqualize(d)
def _onDocstring( self, docstr, line ): " Memorizes a function/class/module docstring " if self.objectsStack: self.objectsStack[ -1 ].docstring = \ Docstring( trim_docstring( docstr ), line ) return self.docstring = Docstring( trim_docstring( docstr ), line ) return
Memorizes a function/class/module docstring