code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def check_file_for_tabs(filename, verbose=True): """identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints information about issues :param filename: the filename :rtype: True if there are tabs in the file """ file_contains_tabs = False with open(filename) as f: lines = f.read().split("\n") line_no = 1 for line in lines: if "\t" in line: file_contains_tabs = True location = [ i for i in range(len(line)) if line.startswith('\t', i)] if verbose: Console.error("Tab found in line {} and column(s) {}" .format(line_no, str(location).replace("[", "").replace( "]", "")), traceflag=False) line_no += 1 return file_contains_tabs
identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints information about issues :param filename: the filename :rtype: True if there are tabs in the file
def new_filename(data, file_kind, ext): """Returns an available filename. :param file_kind: Name under which numbering is recorded, such as 'img' or 'table'. :type file_kind: str :param ext: Filename extension. :type ext: str :returns: (filename, rel_filepath) where filename is a path in the filesystem and rel_filepath is the path to be used in the tex code. """ nb_key = file_kind + "number" if nb_key not in data.keys(): data[nb_key] = -1 if not data["override externals"]: # Make sure not to overwrite anything. file_exists = True while file_exists: data[nb_key] = data[nb_key] + 1 filename, name = _gen_filename(data, nb_key, ext) file_exists = os.path.isfile(filename) else: data[nb_key] = data[nb_key] + 1 filename, name = _gen_filename(data, nb_key, ext) if data["rel data path"]: rel_filepath = posixpath.join(data["rel data path"], name) else: rel_filepath = name return filename, rel_filepath
Returns an available filename. :param file_kind: Name under which numbering is recorded, such as 'img' or 'table'. :type file_kind: str :param ext: Filename extension. :type ext: str :returns: (filename, rel_filepath) where filename is a path in the filesystem and rel_filepath is the path to be used in the tex code.
def indent(text: str, num: int = 2) -> str: """Indent a piece of text.""" lines = text.splitlines() return "\n".join(indent_iterable(lines, num=num))
Indent a piece of text.
def ligolw_add(xmldoc, urls, non_lsc_tables_ok = False, verbose = False, contenthandler = DefaultContentHandler): """ An implementation of the LIGO LW add algorithm. urls is a list of URLs (or filenames) to load, xmldoc is the XML document tree to which they should be added. """ # Input for n, url in enumerate(urls): if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), utils.load_url(url, verbose = verbose, xmldoc = xmldoc, contenthandler = contenthandler) # ID reassignment if not non_lsc_tables_ok and lsctables.HasNonLSCTables(xmldoc): raise ValueError("non-LSC tables found. Use --non-lsc-tables-ok to force") reassign_ids(xmldoc, verbose = verbose) # Document merge if verbose: print >>sys.stderr, "merging elements ..." merge_ligolws(xmldoc) merge_compatible_tables(xmldoc) return xmldoc
An implementation of the LIGO LW add algorithm. urls is a list of URLs (or filenames) to load, xmldoc is the XML document tree to which they should be added.
def load_extra_emacs_page_navigation_bindings(): """ Key bindings, for scrolling up and down through pages. This are separate bindings, because GNU readline doesn't have them. """ registry = ConditionalRegistry(Registry(), EmacsMode()) handle = registry.add_binding handle(Keys.ControlV)(scroll_page_down) handle(Keys.PageDown)(scroll_page_down) handle(Keys.Escape, 'v')(scroll_page_up) handle(Keys.PageUp)(scroll_page_up) return registry
Key bindings, for scrolling up and down through pages. This are separate bindings, because GNU readline doesn't have them.
def terminate_bits(self, payload): """This method adds zeros to the end of the encoded data so that the encoded data is of the correct length. It returns a binary string containing the bits to be added. """ data_capacity = tables.data_capacity[self.version][self.error][0] if len(payload) > data_capacity: raise ValueError('The supplied data will not fit ' 'within this version of a QR code.') #We must add up to 4 zeros to make up for any shortfall in the #length of the data field. if len(payload) == data_capacity: return None elif len(payload) <= data_capacity-4: bits = self.binary_string(0,4) else: #Make up any shortfall need with less than 4 zeros bits = self.binary_string(0, data_capacity - len(payload)) return bits
This method adds zeros to the end of the encoded data so that the encoded data is of the correct length. It returns a binary string containing the bits to be added.
def status(self): """ Collects the instances state and returns a list. .. important:: Molecule assumes all instances were created successfully by Ansible, otherwise Ansible would return an error on create. This may prove to be a bad assumption. However, configuring Molecule's driver to match the options passed to the playbook may prove difficult. Especially in cases where the user is provisioning instances off localhost. :returns: list """ status_list = [] for platform in self._config.platforms.instances: instance_name = platform['name'] driver_name = self.name provisioner_name = self._config.provisioner.name scenario_name = self._config.scenario.name status_list.append( Status( instance_name=instance_name, driver_name=driver_name, provisioner_name=provisioner_name, scenario_name=scenario_name, created=self._created(), converged=self._converged(), )) return status_list
Collects the instances state and returns a list. .. important:: Molecule assumes all instances were created successfully by Ansible, otherwise Ansible would return an error on create. This may prove to be a bad assumption. However, configuring Molecule's driver to match the options passed to the playbook may prove difficult. Especially in cases where the user is provisioning instances off localhost. :returns: list
def walk_egg(egg_dir): """Walk an unpacked egg's contents, skipping the metadata directory""" walker = sorted_walk(egg_dir) base, dirs, files = next(walker) if 'EGG-INFO' in dirs: dirs.remove('EGG-INFO') yield base, dirs, files for bdf in walker: yield bdf
Walk an unpacked egg's contents, skipping the metadata directory
def _downloaded_filename(self): """Download the package's archive if necessary, and return its filename. --no-deps is implied, as we have reimplemented the bits that would ordinarily do dependency resolution. """ # Peep doesn't support requirements that don't come down as a single # file, because it can't hash them. Thus, it doesn't support editable # requirements, because pip itself doesn't support editable # requirements except for "local projects or a VCS url". Nor does it # support VCS requirements yet, because we haven't yet come up with a # portable, deterministic way to hash them. In summary, all we support # is == requirements and tarballs/zips/etc. # TODO: Stop on reqs that are editable or aren't ==. # If the requirement isn't already specified as a URL, get a URL # from an index: link = self._link() or self._finder.find_requirement(self._req, upgrade=False) if link: lower_scheme = link.scheme.lower() # pip lower()s it for some reason. if lower_scheme == 'http' or lower_scheme == 'https': file_path = self._download(link) return basename(file_path) elif lower_scheme == 'file': # The following is inspired by pip's unpack_file_url(): link_path = url_to_path(link.url_without_fragment) if isdir(link_path): raise UnsupportedRequirementError( "%s: %s is a directory. So that it can compute " "a hash, peep supports only filesystem paths which " "point to files" % (self._req, link.url_without_fragment)) else: copy(link_path, self._temp_path) return basename(link_path) else: raise UnsupportedRequirementError( "%s: The download link, %s, would not result in a file " "that can be hashed. Peep supports only == requirements, " "file:// URLs pointing to files (not folders), and " "http:// and https:// URLs pointing to tarballs, zips, " "etc." % (self._req, link.url)) else: raise UnsupportedRequirementError( "%s: couldn't determine where to download this requirement from." % (self._req,))
Download the package's archive if necessary, and return its filename. --no-deps is implied, as we have reimplemented the bits that would ordinarily do dependency resolution.
def operator(func=None, *, pipable=False): """Create a stream operator from an asynchronous generator (or any function returning an asynchronous iterable). Decorator usage:: @operator async def random(offset=0., width=1.): while True: yield offset + width * random.random() Decorator usage for pipable operators:: @operator(pipable=True) async def multiply(source, factor): async with streamcontext(source) as streamer: async for item in streamer: yield factor * item In the case of pipable operators, the first argument is expected to be the asynchronous iteratable used for piping. The return value is a dynamically created class. It has the same name, module and doc as the original function. A new stream is created by simply instanciating the operator:: xs = random() ys = multiply(xs, 2) The original function is called at instanciation to check that signature match. In the case of pipable operators, the source is also checked for asynchronous iteration. The operator also have a pipe class method that can be used along with the piping synthax:: xs = random() ys = xs | multiply.pipe(2) This is strictly equivalent to the previous example. Other methods are available: - `original`: the original function as a static method - `raw`: same as original but add extra checking The raw method is useful to create new operators from existing ones:: @operator(pipable=True) def double(source): return multiply.raw(source, 2) """ def decorator(func): """Inner decorator for stream operator.""" # Gather data bases = (Stream,) name = func.__name__ module = func.__module__ extra_doc = func.__doc__ doc = extra_doc or f'Regular {name} stream operator.' # Extract signature signature = inspect.signature(func) parameters = list(signature.parameters.values()) if parameters and parameters[0].name in ('self', 'cls'): raise ValueError( 'An operator cannot be created from a method, ' 'since the decorated function becomes an operator class') # Injected parameters self_parameter = inspect.Parameter( 'self', inspect.Parameter.POSITIONAL_OR_KEYWORD) cls_parameter = inspect.Parameter( 'cls', inspect.Parameter.POSITIONAL_OR_KEYWORD) # Wrapped static method original = func original.__qualname__ = name + '.original' # Raw static method raw = func raw.__qualname__ = name + '.raw' # Init method def init(self, *args, **kwargs): if pipable and args: assert_async_iterable(args[0]) factory = functools.partial(self.raw, *args, **kwargs) return Stream.__init__(self, factory) # Customize init signature new_parameters = [self_parameter] + parameters init.__signature__ = signature.replace(parameters=new_parameters) # Customize init method init.__qualname__ = name + '.__init__' init.__name__ = '__init__' init.__module__ = module init.__doc__ = f'Initialize the {name} stream.' if pipable: # Raw static method def raw(*args, **kwargs): if args: assert_async_iterable(args[0]) return func(*args, **kwargs) # Custonize raw method raw.__signature__ = signature raw.__qualname__ = name + '.raw' raw.__module__ = module raw.__doc__ = doc # Pipe class method def pipe(cls, *args, **kwargs): return lambda source: cls(source, *args, **kwargs) # Customize pipe signature if parameters and parameters[0].kind in ( inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD): new_parameters = [cls_parameter] + parameters[1:] else: new_parameters = [cls_parameter] + parameters pipe.__signature__ = signature.replace(parameters=new_parameters) # Customize pipe method pipe.__qualname__ = name + '.pipe' pipe.__module__ = module pipe.__doc__ = f'Pipable "{name}" stream operator.' if extra_doc: pipe.__doc__ += "\n\n " + extra_doc # Gather attributes attrs = { '__init__': init, '__module__': module, '__doc__': doc, 'raw': staticmethod(raw), 'original': staticmethod(original), 'pipe': classmethod(pipe) if pipable else None} # Create operator class return type(name, bases, attrs) return decorator if func is None else decorator(func)
Create a stream operator from an asynchronous generator (or any function returning an asynchronous iterable). Decorator usage:: @operator async def random(offset=0., width=1.): while True: yield offset + width * random.random() Decorator usage for pipable operators:: @operator(pipable=True) async def multiply(source, factor): async with streamcontext(source) as streamer: async for item in streamer: yield factor * item In the case of pipable operators, the first argument is expected to be the asynchronous iteratable used for piping. The return value is a dynamically created class. It has the same name, module and doc as the original function. A new stream is created by simply instanciating the operator:: xs = random() ys = multiply(xs, 2) The original function is called at instanciation to check that signature match. In the case of pipable operators, the source is also checked for asynchronous iteration. The operator also have a pipe class method that can be used along with the piping synthax:: xs = random() ys = xs | multiply.pipe(2) This is strictly equivalent to the previous example. Other methods are available: - `original`: the original function as a static method - `raw`: same as original but add extra checking The raw method is useful to create new operators from existing ones:: @operator(pipable=True) def double(source): return multiply.raw(source, 2)
def list_runids(s3_client, full_path): """Return list of all run ids inside S3 folder. It does not respect S3 pagination (`MaxKeys`) and returns **all** keys from bucket and won't list any prefixes with object archived to AWS Glacier Arguments: s3_client - boto3 S3 client (not service) full_path - full valid S3 path to events (such as enriched-archive) example: s3://acme-events-bucket/main-pipeline/enriched-archive """ listing_finished = False # last response was not truncated run_ids_buffer = [] last_continuation_token = None (bucket, prefix) = split_full_path(full_path) while not listing_finished: options = clean_dict({ 'Bucket': bucket, 'Prefix': prefix, 'Delimiter': '/', 'ContinuationToken': last_continuation_token }) response = s3_client.list_objects_v2(**options) keys = [extract_run_id(key['Prefix']) for key in response.get('CommonPrefixes', [])] run_ids_buffer.extend([key for key in keys if key is not None]) last_continuation_token = response.get('NextContinuationToken', None) if not response['IsTruncated']: listing_finished = True non_archived_run_ids = [run_id for run_id in run_ids_buffer if not is_glacier(s3_client, bucket, run_id)] return non_archived_run_ids
Return list of all run ids inside S3 folder. It does not respect S3 pagination (`MaxKeys`) and returns **all** keys from bucket and won't list any prefixes with object archived to AWS Glacier Arguments: s3_client - boto3 S3 client (not service) full_path - full valid S3 path to events (such as enriched-archive) example: s3://acme-events-bucket/main-pipeline/enriched-archive
def configure_settings(settings, environment_settings=True): ''' Given a settings object, run automatic configuration of all the apps in INSTALLED_APPS. ''' changes = 1 iterations = 0 while changes: changes = 0 app_names = ['django_autoconfig'] + list(settings['INSTALLED_APPS']) if environment_settings: app_names.append('django_autoconfig.environment_settings') for app_name in app_names: import django_autoconfig.contrib if autoconfig_module_exists(app_name): module = importlib.import_module("%s.autoconfig" % (app_name,)) elif app_name in django_autoconfig.contrib.CONTRIB_CONFIGS: module = django_autoconfig.contrib.CONTRIB_CONFIGS[app_name] else: continue changes += merge_dictionaries( settings, getattr(module, 'SETTINGS', {}), template_special_case=True, ) changes += merge_dictionaries( settings, getattr(module, 'DEFAULT_SETTINGS', {}), only_defaults=True, ) for relationship in getattr(module, 'RELATIONSHIPS', []): changes += relationship.apply_changes(settings) if iterations >= MAX_ITERATIONS: raise ImproperlyConfigured( 'Autoconfiguration could not reach a consistent state' ) iterations += 1 LOGGER.debug("Autoconfiguration took %d iterations.", iterations)
Given a settings object, run automatic configuration of all the apps in INSTALLED_APPS.
def _do_refresh_session(self): """:returns: `!True` if it had to create new session""" if self._session and self._last_session_refresh + self._loop_wait > time.time(): return False if self._session: try: self._client.session.renew(self._session) except NotFound: self._session = None ret = not self._session if ret: try: self._session = self._client.session.create(name=self._scope + '-' + self._name, checks=self.__session_checks, lock_delay=0.001, behavior='delete') except InvalidSessionTTL: logger.exception('session.create') self.adjust_ttl() raise self._last_session_refresh = time.time() return ret
:returns: `!True` if it had to create new session
def precedence(item): """Returns the precedence of a given object.""" try: mro = item.__class__.__mro__ except AttributeError: return PRECEDENCE["Atom"] for i in mro: n = i.__name__ if n in PRECEDENCE_FUNCTIONS: return PRECEDENCE_FUNCTIONS[n](item) elif n in PRECEDENCE_VALUES: return PRECEDENCE_VALUES[n] return PRECEDENCE["Atom"]
Returns the precedence of a given object.
def flatten(nested_iterable): """ Flattens arbitrarily nested lists/tuples. Code partially taken from https://stackoverflow.com/a/10824420. Parameters ---------- nested_iterable A list or tuple of arbitrarily nested values. Yields ------ any Non-list and non-tuple values in `nested_iterable`. """ # don't just check if something is iterable here, because then strings # and arrays will be split into their characters and components if not isinstance(nested_iterable, (list, tuple)): yield nested_iterable else: for i in nested_iterable: if isinstance(i, (list, tuple)): for j in flatten(i): yield j else: yield i
Flattens arbitrarily nested lists/tuples. Code partially taken from https://stackoverflow.com/a/10824420. Parameters ---------- nested_iterable A list or tuple of arbitrarily nested values. Yields ------ any Non-list and non-tuple values in `nested_iterable`.
def remove_override(self, key): """Remove a setting override, if one exists.""" keys = key.split('.') if len(keys) > 1: raise NotImplementedError elif key in self.overrides: del self.overrides[key] self._uncache(key)
Remove a setting override, if one exists.
def spielman_wr(self, norm=True): """Returns a list of site-specific omega values calculated from the `ExpCM`. Args: `norm` (bool) If `True`, normalize the `omega_r` values by the ExpCM gene-wide `omega`. Returns: `wr` (list) list of `omega_r` values of length `nsites` Following `Spielman and Wilke, MBE, 32:1097-1108 <https://doi.org/10.1093/molbev/msv003>`_, we can predict the `dN/dS` value for each site `r`, :math:`\\rm{spielman}\\omega_r`, from the `ExpCM`. When `norm` is `False`, the `omega_r` values are defined as :math:`\\rm{spielman}\\omega_r = \\frac{\\sum_x \\sum_{y \\in N_x}p_{r,x}\ P_{r,xy}}{\\sum_x \\sum_{y \\in Nx}p_{r,x}Q_{xy}}`, where `r,x,y`, :math:`p_{r,x}`, :math:`P_{r,xy}`, and :math:`Q_{x,y}` have the same definitions as in the main `ExpCM` doc string and :math:`N_{x}` is the set of codons which are non-synonymous to codon `x` and differ from `x` by one nucleotide. When `norm` is `True`, the `omega_r` values above are divided by the ExpCM `omega` value.""" wr = [] for r in range(self.nsites): num = 0 den = 0 for i in range(N_CODON): j = scipy.intersect1d(scipy.where(CODON_SINGLEMUT[i]==True)[0], scipy.where(CODON_NONSYN[i]==True)[0]) p_i = self.stationarystate[r][i] P_xy = self.Prxy[r][i][j].sum() if norm: P_xy = P_xy/self.omega Q_xy = self.Qxy[i][j].sum() num += (p_i * P_xy) den += (p_i * Q_xy) result = num/den wr.append(result) return wr
Returns a list of site-specific omega values calculated from the `ExpCM`. Args: `norm` (bool) If `True`, normalize the `omega_r` values by the ExpCM gene-wide `omega`. Returns: `wr` (list) list of `omega_r` values of length `nsites` Following `Spielman and Wilke, MBE, 32:1097-1108 <https://doi.org/10.1093/molbev/msv003>`_, we can predict the `dN/dS` value for each site `r`, :math:`\\rm{spielman}\\omega_r`, from the `ExpCM`. When `norm` is `False`, the `omega_r` values are defined as :math:`\\rm{spielman}\\omega_r = \\frac{\\sum_x \\sum_{y \\in N_x}p_{r,x}\ P_{r,xy}}{\\sum_x \\sum_{y \\in Nx}p_{r,x}Q_{xy}}`, where `r,x,y`, :math:`p_{r,x}`, :math:`P_{r,xy}`, and :math:`Q_{x,y}` have the same definitions as in the main `ExpCM` doc string and :math:`N_{x}` is the set of codons which are non-synonymous to codon `x` and differ from `x` by one nucleotide. When `norm` is `True`, the `omega_r` values above are divided by the ExpCM `omega` value.
def _match_line(self, city_name, lines): """ The lookup is case insensitive and returns the first matching line, stripped. :param city_name: str :param lines: list of str :return: str """ for line in lines: toponym = line.split(',')[0] if toponym.lower() == city_name.lower(): return line.strip() return None
The lookup is case insensitive and returns the first matching line, stripped. :param city_name: str :param lines: list of str :return: str
def get_default_config(self): """ Return the default config for the handler """ config = super(rmqHandler, self).get_default_config() config.update({ 'server': '127.0.0.1', 'rmq_exchange': 'diamond', }) return config
Return the default config for the handler
def setImage(self, img, autoRange=True, useAutoLevels=None, levels=None, axes=None, pos=None, scale=None, transform=None, ): """ Set the image to be displayed in the widget. ================== =========================================================================== **Arguments:** img (numpy array) the image to be displayed. See :func:`ImageItem.setImage` and *notes* below. xvals (numpy array) 1D array of z-axis values corresponding to the third axis in a 3D image. For video, this array should contain the time of each frame. autoRange (bool) whether to scale/pan the view to fit the image. useAutoLevels (bool) whether to update the white/black levels to fit the image. levels (min, max); the white and black level values to use. axes Dictionary indicating the interpretation for each axis. This is only needed to override the default guess. Format is:: {'t':0, 'x':1, 'y':2, 'c':3}; pos Change the position of the displayed image scale Change the scale of the displayed image transform Set the transform of the displayed image. This option overrides *pos* and *scale*. autoHistogramRange If True, the histogram y-range is automatically scaled to fit the image data. ================== =========================================================================== **Notes:** For backward compatibility, image data is assumed to be in column-major order (column, row). However, most image data is stored in row-major order (row, column) and will need to be transposed before calling setImage():: imageview.setImage(imagedata.T) This requirement can be changed by the ``imageAxisOrder`` :ref:`global configuration option <apiref_config>`. """ if hasattr(img, 'implements') and img.implements('MetaArray'): img = img.asarray() if not isinstance(img, np.ndarray): required = ['dtype', 'max', 'min', 'ndim', 'shape', 'size'] if not all([hasattr(img, attr) for attr in required]): raise TypeError("Image must be NumPy array or any object " "that provides compatible attributes/methods:\n" " %s" % str(required)) self.image = img self.imageDisp = None if axes is None: x, y = (0, 1) if self.imageItem.axisOrder == 'col-major' else (1, 0) if img.ndim == 2: self.axes = {'t': None, 'x': x, 'y': y, 'c': None} elif img.ndim == 3: # Ambiguous case; make a guess if img.shape[2] <= 4: self.axes = {'t': None, 'x': x, 'y': y, 'c': 2} else: self.axes = {'t': 0, 'x': x+ 1, 'y': y+ 1, 'c': None} elif img.ndim == 4: # Even more ambiguous; just assume the default self.axes = {'t': 0, 'x': x+ 1, 'y': y+ 1, 'c': 3} else: raise Exception( "Can not interpret image with dimensions %s" % (str(img.shape))) elif isinstance(axes, dict): self.axes = axes.copy() elif isinstance(axes, list) or isinstance(axes, tuple): self.axes = {} for i in range(len(axes)): self.axes[axes[i]] = i else: raise Exception( "Can not interpret axis specification %s. " "Must be like {'t': 2, 'x': 0, 'y': 1} or " "('t', 'x', 'y', 'c')" % (str(axes))) for x in ['t', 'x', 'y', 'c']: self.axes[x] = self.axes.get(x, None) axes = self.axes self.currentIndex = 0 if levels is None and useAutoLevels: self._useAutoLevels= useAutoLevels if levels is not None: # this does nothing since getProcessedImage sets these values again. self.setLevels(*levels) self._updateImage() self._updateLabelInfo() self.imageItem.resetTransform() if scale is not None: self.imageItem.scale(*scale) if pos is not None: self.imageItem.setPos(*pos) if transform is not None: self.imageItem.setTransform(transform)
Set the image to be displayed in the widget. ================== =========================================================================== **Arguments:** img (numpy array) the image to be displayed. See :func:`ImageItem.setImage` and *notes* below. xvals (numpy array) 1D array of z-axis values corresponding to the third axis in a 3D image. For video, this array should contain the time of each frame. autoRange (bool) whether to scale/pan the view to fit the image. useAutoLevels (bool) whether to update the white/black levels to fit the image. levels (min, max); the white and black level values to use. axes Dictionary indicating the interpretation for each axis. This is only needed to override the default guess. Format is:: {'t':0, 'x':1, 'y':2, 'c':3}; pos Change the position of the displayed image scale Change the scale of the displayed image transform Set the transform of the displayed image. This option overrides *pos* and *scale*. autoHistogramRange If True, the histogram y-range is automatically scaled to fit the image data. ================== =========================================================================== **Notes:** For backward compatibility, image data is assumed to be in column-major order (column, row). However, most image data is stored in row-major order (row, column) and will need to be transposed before calling setImage():: imageview.setImage(imagedata.T) This requirement can be changed by the ``imageAxisOrder`` :ref:`global configuration option <apiref_config>`.
def regularizer(name, regularization_fn, name_filter='weights'): """Wraps a regularizer in a parameter-function. Args: name: The name scope for this regularizer. regularization_fn: A function with signature: fn(variable) -> loss `Tensor` or `None`. name_filter: A regex that will be used to filter variables by name. Returns: A parameter modification function that adds the loss to the REGULARIZATION_LOSSES graph key. """ regex = re.compile(name_filter) def fn(var_name, variable, phase): if phase is pt.Phase.train and regex.search(var_name): with tf.name_scope(None, name, [variable]): loss = regularization_fn(variable) if loss is not None: tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, loss) return variable return fn
Wraps a regularizer in a parameter-function. Args: name: The name scope for this regularizer. regularization_fn: A function with signature: fn(variable) -> loss `Tensor` or `None`. name_filter: A regex that will be used to filter variables by name. Returns: A parameter modification function that adds the loss to the REGULARIZATION_LOSSES graph key.
def concat_batch_variantcalls(items, region_block=True, skip_jointcheck=False): """CWL entry point: combine variant calls from regions into single VCF. """ items = [utils.to_single_data(x) for x in items] batch_name = _get_batch_name(items, skip_jointcheck) variantcaller = _get_batch_variantcaller(items) # Pre-called input variant files if not variantcaller and all(d.get("vrn_file") for d in items): return {"vrn_file": items[0]["vrn_file"]} out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, "%s.vcf.gz" % (batch_name)) utils.safe_makedir(os.path.dirname(out_file)) if region_block: regions = [_region_to_coords(rs[0]) for rs in items[0]["region_block"]] else: regions = [_region_to_coords(r) for r in items[0]["region"]] vrn_file_regions = items[0]["vrn_file_region"] out_file = vcfutils.concat_variant_files(vrn_file_regions, out_file, regions, dd.get_ref_file(items[0]), items[0]["config"]) return {"vrn_file": out_file}
CWL entry point: combine variant calls from regions into single VCF.
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] C_PGA = self.COEFFS[PGA()] imt_per = 0 if imt.name == 'PGV' else imt.period pga_rock = self._get_pga_on_rock(C_PGA, rup, dists) mean = (self._get_magnitude_scaling_term(C, rup) + self._get_path_scaling(C, dists, rup.mag) + self._get_site_scaling(C, pga_rock, sites, imt_per, dists.rjb)) stddevs = self._get_stddevs(C, rup, dists, sites, stddev_types) return mean, stddevs
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
def process_large_file(self, local_file, parent): """ Upload a single file using multiple processes to upload multiple chunks at the same time. Updates local_file with it's remote_id when done. :param local_file: LocalFile: file we are uploading :param parent: LocalFolder/LocalProject: parent of the file """ file_content_sender = FileUploader(self.settings.config, self.settings.data_service, local_file, self.settings.watcher, self.settings.file_upload_post_processor) remote_id = file_content_sender.upload(self.settings.project_id, parent.kind, parent.remote_id) local_file.set_remote_id_after_send(remote_id)
Upload a single file using multiple processes to upload multiple chunks at the same time. Updates local_file with it's remote_id when done. :param local_file: LocalFile: file we are uploading :param parent: LocalFolder/LocalProject: parent of the file
def get_variant_type(variant_source): """Try to find out what type of variants that exists in a variant source Args: variant_source (str): Path to variant source source_mode (str): 'vcf' or 'gemini' Returns: variant_type (str): 'sv' or 'snv' """ file_type = get_file_type(variant_source) variant_type = 'sv' if file_type == 'vcf': variants = VCF(variant_source) elif file_type == 'gemini': variants = GeminiQuery(variant_source) gemini_query = "SELECT * from variants" variants.run(gemini_query) # Check 1000 first variants, if anyone is a snv we set the variant_type # to 'snv' for i,variant in enumerate(variants): if file_type == 'vcf': if variant.is_snp: variant_type = 'snv' elif file_type == 'gemini': if variant['type'] == 'snp': variant_type = 'snv' if i > 1000: break return variant_type
Try to find out what type of variants that exists in a variant source Args: variant_source (str): Path to variant source source_mode (str): 'vcf' or 'gemini' Returns: variant_type (str): 'sv' or 'snv'
def twoQ_gates(self): """Get list of 2-qubit gates. Ignore snapshot, barriers, and the like.""" two_q_gates = [] for node in self.gate_nodes(): if len(node.qargs) == 2: two_q_gates.append(node) return two_q_gates
Get list of 2-qubit gates. Ignore snapshot, barriers, and the like.
def get_build_controllers(self, name=None): """GetBuildControllers. Gets controller, optionally filtered by name :param str name: :rtype: [BuildController] """ query_parameters = {} if name is not None: query_parameters['name'] = self._serialize.query('name', name, 'str') response = self._send(http_method='GET', location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6', version='5.0', query_parameters=query_parameters) return self._deserialize('[BuildController]', self._unwrap_collection(response))
GetBuildControllers. Gets controller, optionally filtered by name :param str name: :rtype: [BuildController]
def _assert_path_is_rw(self): """ Make sure, that `self.path` exists, is directory a readable/writeable. Raises: IOError: In case that any of the assumptions failed. ValueError: In case that `self.path` is not set. """ if not self.path: raise ValueError("`path` argument must be set!") if not os.path.exists(self.path): raise IOError("`%s` not found." % self.path) if not os.path.isdir(self.path): raise IOError("`%s` is not a directory!" % self.path) if not os.access(self.path, (os.R_OK or os.W_OK)): raise IOError( "Can't access `%s`, please check permissions." % self.path )
Make sure, that `self.path` exists, is directory a readable/writeable. Raises: IOError: In case that any of the assumptions failed. ValueError: In case that `self.path` is not set.
def clone(self, name=None): """Creates a new MLP with the same structure. Args: name: Optional string specifying the name of the new module. The default name is constructed by appending "_clone" to the original name. Returns: A cloned `MLP` module. """ if name is None: name = self.module_name + "_clone" return MLP( name=name, output_sizes=self.output_sizes, activation=self.activation, activate_final=self.activate_final, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, use_bias=self.use_bias, use_dropout=self.use_dropout)
Creates a new MLP with the same structure. Args: name: Optional string specifying the name of the new module. The default name is constructed by appending "_clone" to the original name. Returns: A cloned `MLP` module.
def _GetShowID(self, showName): """ Get epguides show id for a given show name. Attempts to match the given show name against a show title in self._showTitleList and, if found, returns the corresponding index in self._showIDList. Parameters ---------- showName : string Show name to get show ID for. Returns ---------- int or None If a show id is found this will be returned, otherwise None is returned. """ self._GetTitleList() self._GetIDList() for index, showTitle in enumerate(self._showTitleList): if showName == showTitle: return self._showIDList[index] return None
Get epguides show id for a given show name. Attempts to match the given show name against a show title in self._showTitleList and, if found, returns the corresponding index in self._showIDList. Parameters ---------- showName : string Show name to get show ID for. Returns ---------- int or None If a show id is found this will be returned, otherwise None is returned.
def writePIDFile(self): """ Write a the pid of this process to a file in the jobstore. Overwriting the current contents of pid.log is a feature, not a bug of this method. Other methods will rely on always having the most current pid available. So far there is no reason to store any old pids. """ with self._jobStore.writeSharedFileStream('pid.log') as f: f.write(str(os.getpid()).encode('utf-8'))
Write a the pid of this process to a file in the jobstore. Overwriting the current contents of pid.log is a feature, not a bug of this method. Other methods will rely on always having the most current pid available. So far there is no reason to store any old pids.
def proximity_metric(self, a, b): """Return the weight of the dependency from a to b. Higher weights usually have shorter straighter edges. Return 1 if it has normal weight. A value of 4 is usually good for ensuring that a related pair of modules are drawn next to each other. Returns an int between 1 (unknown, default), and 4 (very related). """ # if self._is_pylib(a) and self._is_pylib(b): # return 1 res = 1 for ap, bp, n in zip(a.path_parts, b.path_parts, list(range(4))): res += ap == bp if n >= 3: break return res
Return the weight of the dependency from a to b. Higher weights usually have shorter straighter edges. Return 1 if it has normal weight. A value of 4 is usually good for ensuring that a related pair of modules are drawn next to each other. Returns an int between 1 (unknown, default), and 4 (very related).
def taskfileinfo_descriptor_data(tfi, role): """Return the data for descriptor :param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data :type tfi: :class:`jukeboxcore.filesys.TaskFileInfo` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the descriptor :rtype: depending on role :raises: None """ if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: return tfi.descriptor
Return the data for descriptor :param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data :type tfi: :class:`jukeboxcore.filesys.TaskFileInfo` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the descriptor :rtype: depending on role :raises: None
def import_apps_submodule(submodule): """ Look for a submodule is a series of packages, e.g. ".pagetype_plugins" in all INSTALLED_APPS. """ found_apps = [] for appconfig in apps.get_app_configs(): app = appconfig.name if import_module_or_none('{0}.{1}'.format(app, submodule)) is not None: found_apps.append(app) return found_apps
Look for a submodule is a series of packages, e.g. ".pagetype_plugins" in all INSTALLED_APPS.
def promote(self, content): """ Promote (replace) the content.data with the first attribute of the current content.data that is a I{list}. Note: the content.data may be empty or contain only _x attributes. In either case, the content.data is assigned an empty list. @param content: An array content. @type content: L{Content} """ for n, v in content.data: if isinstance(v, list): content.data = v return content.data = []
Promote (replace) the content.data with the first attribute of the current content.data that is a I{list}. Note: the content.data may be empty or contain only _x attributes. In either case, the content.data is assigned an empty list. @param content: An array content. @type content: L{Content}
def _set_pos(self, pos): """ Set current position for scroll bar. """ if self._canvas.height < self._max_height: pos *= self._max_height - self._canvas.height + 1 pos = int(round(max(0, pos), 0)) self._canvas.scroll_to(pos)
Set current position for scroll bar.
def match(tgt, opts=None): ''' Runs the compound target check ''' if not opts: opts = __opts__ nodegroups = opts.get('nodegroups', {}) matchers = salt.loader.matchers(opts) if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)): log.error('Compound target received that is neither string, list nor tuple') return False log.debug('compound_match: %s ? %s', opts['id'], tgt) ref = {'G': 'grain', 'P': 'grain_pcre', 'I': 'pillar', 'J': 'pillar_pcre', 'L': 'list', 'N': None, # Nodegroups should already be expanded 'S': 'ipcidr', 'E': 'pcre'} if HAS_RANGE: ref['R'] = 'range' results = [] opers = ['and', 'or', 'not', '(', ')'] if isinstance(tgt, six.string_types): words = tgt.split() else: # we make a shallow copy in order to not affect the passed in arg words = tgt[:] while words: word = words.pop(0) target_info = salt.utils.minions.parse_target(word) # Easy check first if word in opers: if results: if results[-1] == '(' and word in ('and', 'or'): log.error('Invalid beginning operator after "(": %s', word) return False if word == 'not': if not results[-1] in ('and', 'or', '('): results.append('and') results.append(word) else: # seq start with binary oper, fail if word not in ['(', 'not']: log.error('Invalid beginning operator: %s', word) return False results.append(word) elif target_info and target_info['engine']: if 'N' == target_info['engine']: # if we encounter a node group, just evaluate it in-place decomposed = salt.utils.minions.nodegroup_comp(target_info['pattern'], nodegroups) if decomposed: words = decomposed + words continue engine = ref.get(target_info['engine']) if not engine: # If an unknown engine is called at any time, fail out log.error( 'Unrecognized target engine "%s" for target ' 'expression "%s"', target_info['engine'], word ) return False engine_args = [target_info['pattern']] engine_kwargs = {} if target_info['delimiter']: engine_kwargs['delimiter'] = target_info['delimiter'] results.append( six.text_type(matchers['{0}_match.match'.format(engine)](*engine_args, **engine_kwargs)) ) else: # The match is not explicitly defined, evaluate it as a glob results.append(six.text_type(matchers['glob_match.match'](word))) results = ' '.join(results) log.debug('compound_match %s ? "%s" => "%s"', opts['id'], tgt, results) try: return eval(results) # pylint: disable=W0123 except Exception: log.error( 'Invalid compound target: %s for results: %s', tgt, results) return False return False
Runs the compound target check
def autofit(ts, maxp=5, maxd=2, maxq=5, sc=None): """ Utility function to help in fitting an automatically selected ARIMA model based on approximate Akaike Information Criterion (AIC) values. The model search is based on the heuristic developed by Hyndman and Khandakar (2008) and described in [[http://www.jstatsoft .org/v27/i03/paper]]. In contrast to the algorithm in the paper, we use an approximation to the AIC, rather than an exact value. Note that if the maximum differencing order provided does not suffice to induce stationarity, the function returns a failure, with the appropriate message. Additionally, note that the heuristic only considers models that have parameters satisfying the stationarity/invertibility constraints. Finally, note that our algorithm is slightly more lenient than the original heuristic. For example, the original heuristic rejects models with parameters "close" to violating stationarity/invertibility. We only reject those that actually violate it. This functionality is even less mature than some of the other model fitting functions here, so use it with caution. Parameters ---------- ts: time series to which to automatically fit an ARIMA model as a Numpy array maxP: limit for the AR order maxD: limit for differencing order maxQ: limit for the MA order sc: The SparkContext, required. returns an ARIMAModel """ assert sc != None, "Missing SparkContext" jmodel = sc._jvm.com.cloudera.sparkts.models.ARIMA.autoFit(_py2java(sc, Vectors.dense(ts)), maxp, maxd, maxq) return ARIMAModel(jmodel=jmodel, sc=sc)
Utility function to help in fitting an automatically selected ARIMA model based on approximate Akaike Information Criterion (AIC) values. The model search is based on the heuristic developed by Hyndman and Khandakar (2008) and described in [[http://www.jstatsoft .org/v27/i03/paper]]. In contrast to the algorithm in the paper, we use an approximation to the AIC, rather than an exact value. Note that if the maximum differencing order provided does not suffice to induce stationarity, the function returns a failure, with the appropriate message. Additionally, note that the heuristic only considers models that have parameters satisfying the stationarity/invertibility constraints. Finally, note that our algorithm is slightly more lenient than the original heuristic. For example, the original heuristic rejects models with parameters "close" to violating stationarity/invertibility. We only reject those that actually violate it. This functionality is even less mature than some of the other model fitting functions here, so use it with caution. Parameters ---------- ts: time series to which to automatically fit an ARIMA model as a Numpy array maxP: limit for the AR order maxD: limit for differencing order maxQ: limit for the MA order sc: The SparkContext, required. returns an ARIMAModel
def _record_field_to_json(fields, row_value): """Convert a record/struct field to its JSON representation. Args: fields ( \ Sequence[:class:`~google.cloud.bigquery.schema.SchemaField`], \ ): The :class:`~google.cloud.bigquery.schema.SchemaField`s of the record's subfields to use for type conversion and field names. row_value (Union[Tuple[Any], Mapping[str, Any]): A tuple or dictionary to convert to JSON-serializable values. Returns: Mapping[str, any]: A JSON-serializable dictionary. """ record = {} isdict = isinstance(row_value, dict) for subindex, subfield in enumerate(fields): subname = subfield.name if isdict: subvalue = row_value.get(subname) else: subvalue = row_value[subindex] record[subname] = _field_to_json(subfield, subvalue) return record
Convert a record/struct field to its JSON representation. Args: fields ( \ Sequence[:class:`~google.cloud.bigquery.schema.SchemaField`], \ ): The :class:`~google.cloud.bigquery.schema.SchemaField`s of the record's subfields to use for type conversion and field names. row_value (Union[Tuple[Any], Mapping[str, Any]): A tuple or dictionary to convert to JSON-serializable values. Returns: Mapping[str, any]: A JSON-serializable dictionary.
async def handle_user_exception(self, error: Exception) -> Response: """Handle an exception that has been raised. This should forward :class:`~quart.exception.HTTPException` to :meth:`handle_http_exception`, then attempt to handle the error. If it cannot it should reraise the error. """ if isinstance(error, HTTPException) and not self.trap_http_exception(error): return await self.handle_http_exception(error) handler = self._find_exception_handler(error) if handler is None: raise error return await handler(error)
Handle an exception that has been raised. This should forward :class:`~quart.exception.HTTPException` to :meth:`handle_http_exception`, then attempt to handle the error. If it cannot it should reraise the error.
def excepthook (self, etype, evalue, etb): """Handle an uncaught exception. We always forward the exception on to whatever `sys.excepthook` was present upon setup. However, if the exception is a KeyboardInterrupt, we additionally kill ourselves with an uncaught SIGINT, so that invoking programs know what happened. """ self.inner_excepthook (etype, evalue, etb) if issubclass (etype, KeyboardInterrupt): # Don't try this at home, kids. On some systems os.kill (0, ...) # signals our entire progress group, which is not what we want, # so we use os.getpid (). signal.signal (signal.SIGINT, signal.SIG_DFL) os.kill (os.getpid (), signal.SIGINT)
Handle an uncaught exception. We always forward the exception on to whatever `sys.excepthook` was present upon setup. However, if the exception is a KeyboardInterrupt, we additionally kill ourselves with an uncaught SIGINT, so that invoking programs know what happened.
def _maybe_parse_configurable_reference(self): """Try to parse a configurable reference (@[scope/name/]fn_name[()]).""" if self._current_token.value != '@': return False, None location = self._current_location() self._advance_one_token() scoped_name = self._parse_selector(allow_periods_in_scope=True) evaluate = False if self._current_token.value == '(': evaluate = True self._advance() if self._current_token.value != ')': self._raise_syntax_error("Expected ')'.") self._advance_one_token() self._skip_whitespace_and_comments() with utils.try_with_location(location): reference = self._delegate.configurable_reference(scoped_name, evaluate) return True, reference
Try to parse a configurable reference (@[scope/name/]fn_name[()]).
def size(dtype): """Returns the number of bytes to represent this `dtype`.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'size'): return dtype.size return np.dtype(dtype).itemsize
Returns the number of bytes to represent this `dtype`.
def process_nxml_str(nxml_str, citation=None, offline=False, output_fname=default_output_fname): """Return a ReachProcessor by processing the given NXML string. NXML is the format used by PubmedCentral for papers in the open access subset. Parameters ---------- nxml_str : str The NXML string to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ if offline: if not try_offline: logger.error('Offline reading is not available.') return None try: api_ruler = reach_reader.get_api_ruler() except ReachOfflineReadingError as e: logger.error(e) logger.error('Cannot read offline because the REACH ApiRuler ' 'could not be instantiated.') return None try: result_map = api_ruler.annotateNxml(nxml_str, 'fries') except JavaException as e: logger.error('Could not process NXML.') logger.error(e) return None # REACH version < 1.3.3 json_str = result_map.get('resultJson') if not json_str: # REACH version >= 1.3.3 json_str = result_map.get('result') if json_str is None: logger.warning('No results retrieved') return None if isinstance(json_str, bytes): json_str = json_str.decode('utf-8') return process_json_str(json_str, citation) else: data = {'nxml': nxml_str} try: res = requests.post(reach_nxml_url, data) except requests.exceptions.RequestException as e: logger.error('Could not connect to REACH service:') logger.error(e) return None if res.status_code != 200: logger.error('Could not process NXML via REACH service.' + 'Status code: %d' % res.status_code) return None json_str = res.text with open(output_fname, 'wb') as fh: fh.write(json_str.encode('utf-8')) return process_json_str(json_str, citation)
Return a ReachProcessor by processing the given NXML string. NXML is the format used by PubmedCentral for papers in the open access subset. Parameters ---------- nxml_str : str The NXML string to be processed. citation : Optional[str] A PubMed ID passed to be used in the evidence for the extracted INDRA Statements. Default: None offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements.
def loudest_triggers_from_cli(opts, coinc_parameters=None, sngl_parameters=None, bank_parameters=None): """ Parses the CLI options related to find the loudest coincident or single detector triggers. Parameters ---------- opts : object Result of parsing the CLI with OptionParser. coinc_parameters : list List of datasets in statmap file to retrieve. sngl_parameters : list List of datasets in single-detector trigger files to retrieve. bank_parameters : list List of datasets in template bank file to retrieve. Results ------- bin_names : dict A list of bin names. bin_results : dict A list of dict holding trigger data data. """ # list to hold trigger data bin_results = [] # list of IFOs ifos = opts.sngl_trigger_files.keys() # get indices of bins in template bank bins_idx, bank_data = bank_bins_from_cli(opts) bin_names = bins_idx.keys() # if taking triggers from statmap file if opts.statmap_file and opts.bank_file and opts.sngl_trigger_files: # loop over each bin for bin_name in bin_names: data = {} # get template has and detection statistic for coincident events statmap = hdf.ForegroundTriggers( opts.statmap_file, opts.bank_file, sngl_files=opts.sngl_trigger_files.values(), n_loudest=opts.search_n_loudest, group=opts.statmap_group) template_hash = statmap.get_bankfile_array("template_hash") stat = statmap.get_coincfile_array("stat") # get indices of triggers in bin bin_idx = numpy.in1d(template_hash, bank_data["template_hash"][bins_idx[bin_name]]) # get indices for sorted detection statistic in bin sorting = stat[bin_idx].argsort()[::-1] # get variables for n-th loudest triggers for p in coinc_parameters: arr = statmap.get_coincfile_array(p) data[p] = arr[bin_idx][sorting][:opts.n_loudest] for p in sngl_parameters: for ifo in ifos: key = "/".join([ifo, p]) arr = statmap.get_snglfile_array_dict(p)[ifo] data[key] = arr[bin_idx][sorting][:opts.n_loudest] for p in bank_parameters: arr = statmap.get_bankfile_array(p) data[p] = arr[bin_idx][sorting][:opts.n_loudest] # append results bin_results.append(data) # if taking triggers from single detector file elif opts.bank_file and opts.sngl_trigger_files: # loop over each bin for bin_name in bin_names: data = {} # only use one IFO if len(opts.sngl_trigger_files.keys()) == 1: ifo = opts.sngl_trigger_files.keys()[0] else: raise ValueError("Too many IFOs") # get newSNR as statistic from single detector files sngls = hdf.SingleDetTriggers(opts.sngl_trigger_files[ifo], opts.bank_file, opts.veto_file, opts.veto_segment_name, None, ifo) # cluster n_loudest = opts.search_n_loudest \ if opts.search_n_loudest else len(sngls.template_id) sngls.mask_to_n_loudest_clustered_events(n_loudest=n_loudest) template_hash = \ sngls.bank["template_hash"][:][sngls.template_id] # get indices of triggers in bin bin_idx = numpy.in1d(template_hash, bank_data["template_hash"][bins_idx[bin_name]]) # sort by detection statistic stats = sngls.stat sorting = stats[bin_idx].argsort()[::-1] # get indices for sorted detection statistic in bin for p in sngl_parameters: key = "/".join([ifo, p]) arr = sngls.get_column(p) data[key] = arr[bin_idx][sorting][:opts.n_loudest] for p in bank_parameters: arr = sngls.bank[p][:] data[p] = \ arr[sngls.template_id][bin_idx][sorting][:opts.n_loudest] # append results bin_results.append(data) # else did not supply enough command line options else: raise ValueError("Must have --bank-file and --sngl-trigger-files") return bin_names, bin_results
Parses the CLI options related to find the loudest coincident or single detector triggers. Parameters ---------- opts : object Result of parsing the CLI with OptionParser. coinc_parameters : list List of datasets in statmap file to retrieve. sngl_parameters : list List of datasets in single-detector trigger files to retrieve. bank_parameters : list List of datasets in template bank file to retrieve. Results ------- bin_names : dict A list of bin names. bin_results : dict A list of dict holding trigger data data.
def do_alarm_definition_patch(mc, args): '''Patch the alarm definition.''' fields = {} fields['alarm_id'] = args.id if args.name: fields['name'] = args.name if args.description: fields['description'] = args.description if args.expression: fields['expression'] = args.expression if args.alarm_actions: fields['alarm_actions'] = _arg_split_patch_update(args.alarm_actions, patch=True) if args.ok_actions: fields['ok_actions'] = _arg_split_patch_update(args.ok_actions, patch=True) if args.undetermined_actions: fields['undetermined_actions'] = _arg_split_patch_update(args.undetermined_actions, patch=True) if args.actions_enabled: if args.actions_enabled not in enabled_types: errmsg = ('Invalid value, not one of [' + ', '.join(enabled_types) + ']') print(errmsg) return fields['actions_enabled'] = args.actions_enabled in ['true', 'True'] if args.severity: if not _validate_severity(args.severity): return fields['severity'] = args.severity try: alarm = mc.alarm_definitions.patch(**fields) except (osc_exc.ClientException, k_exc.HttpError) as he: raise osc_exc.CommandError('%s\n%s' % (he.message, he.details)) else: print(jsonutils.dumps(alarm, indent=2))
Patch the alarm definition.
def _connect(self): "Create a Unix domain socket connection" sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(self.socket_timeout) sock.connect(self.path) return sock
Create a Unix domain socket connection
def der_cert(der_data): """ Load a DER encoded certificate :param der_data: DER-encoded certificate :return: A cryptography.x509.certificate instance """ if isinstance(der_data, str): der_data = bytes(der_data, 'utf-8') return x509.load_der_x509_certificate(der_data, default_backend())
Load a DER encoded certificate :param der_data: DER-encoded certificate :return: A cryptography.x509.certificate instance
def shutil_rmtree_onerror(func: Callable[[str], None], path: str, exc_info: EXC_INFO_TYPE) -> None: """ Error handler for ``shutil.rmtree``. If the error is due to an access error (read only file) it attempts to add write permission and then retries. If the error is for another reason it re-raises the error. Usage: ``shutil.rmtree(path, onerror=shutil_rmtree_onerror)`` See https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied """ # noqa if not os.access(path, os.W_OK): # Is the error an access error ? os.chmod(path, stat.S_IWUSR) func(path) else: exc = exc_info[1] raise exc
Error handler for ``shutil.rmtree``. If the error is due to an access error (read only file) it attempts to add write permission and then retries. If the error is for another reason it re-raises the error. Usage: ``shutil.rmtree(path, onerror=shutil_rmtree_onerror)`` See https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied
def output(self,pin,value): """Set the specified pin the provided high/low value. Value should be either 1 (ON or HIGH), or 0 (OFF or LOW) or a boolean. """ self.mraa_gpio.Gpio.write(self.mraa_gpio.Gpio(pin), value)
Set the specified pin the provided high/low value. Value should be either 1 (ON or HIGH), or 0 (OFF or LOW) or a boolean.
def console_get_default_background(con: tcod.console.Console) -> Color: """Return this consoles default background color. .. deprecated:: 8.5 Use :any:`Console.default_bg` instead. """ return Color._new_from_cdata( lib.TCOD_console_get_default_background(_console(con)) )
Return this consoles default background color. .. deprecated:: 8.5 Use :any:`Console.default_bg` instead.
def check_for_rerun_user_task(self): """ Checks that the user task needs to re-run. If necessary, current task and pre task's states are changed and re-run. If wf_meta not in data(there is no user interaction from pre-task) and last completed task type is user task and current step is not EndEvent and there is no lane change, this user task is rerun. """ data = self.current.input if 'wf_meta' in data: return current_task = self.workflow.get_tasks(Task.READY)[0] current_task_type = current_task.task_spec.__class__.__name__ pre_task = current_task.parent pre_task_type = pre_task.task_spec.__class__.__name__ if pre_task_type != 'UserTask': return if current_task_type == 'EndEvent': return pre_lane = pre_task.task_spec.lane current_lane = current_task.task_spec.lane if pre_lane == current_lane: pre_task._set_state(Task.READY) current_task._set_state(Task.MAYBE)
Checks that the user task needs to re-run. If necessary, current task and pre task's states are changed and re-run. If wf_meta not in data(there is no user interaction from pre-task) and last completed task type is user task and current step is not EndEvent and there is no lane change, this user task is rerun.
def mag_cal_progress_encode(self, compass_id, cal_mask, cal_status, attempt, completion_pct, completion_mask, direction_x, direction_y, direction_z): ''' Reports progress of compass calibration. compass_id : Compass being calibrated (uint8_t) cal_mask : Bitmask of compasses being calibrated (uint8_t) cal_status : Status (see MAG_CAL_STATUS enum) (uint8_t) attempt : Attempt number (uint8_t) completion_pct : Completion percentage (uint8_t) completion_mask : Bitmask of sphere sections (see http://en.wikipedia.org/wiki/Geodesic_grid) (uint8_t) direction_x : Body frame direction vector for display (float) direction_y : Body frame direction vector for display (float) direction_z : Body frame direction vector for display (float) ''' return MAVLink_mag_cal_progress_message(compass_id, cal_mask, cal_status, attempt, completion_pct, completion_mask, direction_x, direction_y, direction_z)
Reports progress of compass calibration. compass_id : Compass being calibrated (uint8_t) cal_mask : Bitmask of compasses being calibrated (uint8_t) cal_status : Status (see MAG_CAL_STATUS enum) (uint8_t) attempt : Attempt number (uint8_t) completion_pct : Completion percentage (uint8_t) completion_mask : Bitmask of sphere sections (see http://en.wikipedia.org/wiki/Geodesic_grid) (uint8_t) direction_x : Body frame direction vector for display (float) direction_y : Body frame direction vector for display (float) direction_z : Body frame direction vector for display (float)
def middleware_in_executor(middleware): '''Use this middleware to run a synchronous middleware in the event loop executor. Useful when using synchronous web-frameworks such as :django:`django <>`. ''' @wraps(middleware) def _(environ, start_response): loop = get_event_loop() return loop.run_in_executor(None, middleware, environ, start_response) return _
Use this middleware to run a synchronous middleware in the event loop executor. Useful when using synchronous web-frameworks such as :django:`django <>`.
def and_terms(*args): """ Connect given term strings or list(s) of term strings with an AND operator for querying. Args: An arbitrary number of either strings or lists of strings representing query terms. Returns A query string consisting of argument terms and'ed together. """ args = [arg if not isinstance(arg, list) else ' '.join(arg) for arg in args] return '({0})'.format(' '.join(args))
Connect given term strings or list(s) of term strings with an AND operator for querying. Args: An arbitrary number of either strings or lists of strings representing query terms. Returns A query string consisting of argument terms and'ed together.
def filter_by_pattern(self, pattern): """Filter the Data Collection based on a list of booleans. Args: pattern: A list of True/False values. Typically, this is a list with a length matching the length of the Data Collections values but it can also be a pattern to be repeated over the Data Collection. Return: A new Data Collection with filtered data """ _filt_values, _filt_datetimes = self._filter_by_pattern(pattern) if self._enumeration is None: self._get_mutable_enumeration() col_obj = self._enumeration['mutable'][self._collection_type] collection = col_obj(self.header.duplicate(), _filt_values, _filt_datetimes) collection._validated_a_period = self._validated_a_period return collection
Filter the Data Collection based on a list of booleans. Args: pattern: A list of True/False values. Typically, this is a list with a length matching the length of the Data Collections values but it can also be a pattern to be repeated over the Data Collection. Return: A new Data Collection with filtered data
def openXmlDocument(path=None, file_=None, data=None, url=None, mime_type=None): """**Factory function** Will guess what document type is best suited and return the appropriate document type. User must provide either ``path``, ``file_``, ``data`` or ``url`` parameter. :param path: file path in the local filesystem to a document. :param file_: a file (like) object to a document (must be opened in 'rb' mode') :param data: the binary data of a document :param url: the URL of a document :param mime_type: mime type if known. One of the known MIME types from :mod:`openxmllib.contenttypes`. Note that ``mime_tyype`` parameter **must** be provided if you provide the Open XML document through the ``data`` parameter. Otherwise, if you don't provide one, we'll try to guess which is the most appropriate using the file extension. :return: A subclass of :class:`openxmllib.document.Document`. """ if path is not None: file_ = open(path, 'rb') elif file_ is not None: assert hasattr(file_, 'read') elif url is not None: file_ = urllib2.urlopen(url) if mime_type is None: mime_type = file_.headers.gettype() elif data is not None: file_ = cStringIO.StringIO(data) assert mime_type is not None else: raise ValueError("Either path, file_, data, or url should be provided") # Mime type based document if mime_type is not None: for class_ in _document_classes: if class_.canProcessMime(mime_type): return class_(file_, mime_type=mime_type) raise ValueError("%s MIME type is unknown." % mime_type) else: assert hasattr(file_, 'name') for class_ in _document_classes: if class_.canProcessFilename(file_.name): return class_(file_, mime_type=mime_type) raise ValueError("Can't guess mime_type. You should set the mime_type param") return
**Factory function** Will guess what document type is best suited and return the appropriate document type. User must provide either ``path``, ``file_``, ``data`` or ``url`` parameter. :param path: file path in the local filesystem to a document. :param file_: a file (like) object to a document (must be opened in 'rb' mode') :param data: the binary data of a document :param url: the URL of a document :param mime_type: mime type if known. One of the known MIME types from :mod:`openxmllib.contenttypes`. Note that ``mime_tyype`` parameter **must** be provided if you provide the Open XML document through the ``data`` parameter. Otherwise, if you don't provide one, we'll try to guess which is the most appropriate using the file extension. :return: A subclass of :class:`openxmllib.document.Document`.
def _compute_base_term(self, C, rup, dists): """ Compute and return base model term, that is the first term in equation 1, page 74. The calculation of this term is explained in paragraph 'Base Model', page 75. """ c1 = self.CONSTS['c1'] R = np.sqrt(dists.rrup ** 2 + self.CONSTS['c4'] ** 2) base_term = (C['a1'] + C['a8'] * ((8.5 - rup.mag) ** 2) + (C['a2'] + self.CONSTS['a3'] * (rup.mag - c1)) * np.log(R)) if rup.mag <= c1: return base_term + self.CONSTS['a4'] * (rup.mag - c1) else: return base_term + self.CONSTS['a5'] * (rup.mag - c1)
Compute and return base model term, that is the first term in equation 1, page 74. The calculation of this term is explained in paragraph 'Base Model', page 75.
def p_generate_if_woelse(self, p): 'generate_if : IF LPAREN cond RPAREN gif_true_item' p[0] = IfStatement(p[3], p[5], None, lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
generate_if : IF LPAREN cond RPAREN gif_true_item
def add_x509_key_descriptors(metadata, cert=None, add_encryption=True): """ Adds the x509 descriptors (sign/encryption) to the metadata The same cert will be used for sign/encrypt :param metadata: SAML Metadata XML :type metadata: string :param cert: x509 cert :type cert: string :param add_encryption: Determines if the KeyDescriptor[use="encryption"] should be added. :type add_encryption: boolean :returns: Metadata with KeyDescriptors :rtype: string """ if cert is None or cert == '': return metadata try: root = OneLogin_Saml2_XML.to_etree(metadata) except Exception as e: raise Exception('Error parsing metadata. ' + str(e)) assert root.tag == '{%s}EntityDescriptor' % OneLogin_Saml2_Constants.NS_MD try: sp_sso_descriptor = next(root.iterfind('.//md:SPSSODescriptor', namespaces=OneLogin_Saml2_Constants.NSMAP)) except StopIteration: raise Exception('Malformed metadata.') if add_encryption: OneLogin_Saml2_Metadata.__add_x509_key_descriptors(sp_sso_descriptor, cert, False) OneLogin_Saml2_Metadata.__add_x509_key_descriptors(sp_sso_descriptor, cert, True) return OneLogin_Saml2_XML.to_string(root)
Adds the x509 descriptors (sign/encryption) to the metadata The same cert will be used for sign/encrypt :param metadata: SAML Metadata XML :type metadata: string :param cert: x509 cert :type cert: string :param add_encryption: Determines if the KeyDescriptor[use="encryption"] should be added. :type add_encryption: boolean :returns: Metadata with KeyDescriptors :rtype: string
def __getitem_slice(self, slce): """Return a range which represents the requested slce of the sequence represented by this range. """ scaled_indices = (self._step * n for n in slce.indices(self._len)) start_offset, stop_offset, new_step = scaled_indices return newrange(self._start + start_offset, self._start + stop_offset, new_step)
Return a range which represents the requested slce of the sequence represented by this range.
def validate_config(cls, config): """ Validates a config dictionary parsed from a cluster config file. Checks that a discovery method is defined and that at least one of the balancers in the config are installed and available. """ if "discovery" not in config: raise ValueError("No discovery method defined.") installed_balancers = Balancer.get_installed_classes().keys() if not any([balancer in config for balancer in installed_balancers]): raise ValueError("No available balancer configs defined.")
Validates a config dictionary parsed from a cluster config file. Checks that a discovery method is defined and that at least one of the balancers in the config are installed and available.
def start(token, control=False, trigger='!', groups=None, groups_pillar_name=None, fire_all=False, tag='salt/engines/slack'): ''' Listen to slack events and forward them to salt, new version ''' if (not token) or (not token.startswith('xoxb')): time.sleep(2) # don't respawn too quickly log.error('Slack bot token not found, bailing...') raise UserWarning('Slack Engine bot token not configured') try: client = SlackClient(token=token) message_generator = client.generate_triggered_messages(token, trigger, groups, groups_pillar_name) client.run_commands_from_slack_async(message_generator, fire_all, tag, control) except Exception: raise Exception('{}'.format(traceback.format_exc()))
Listen to slack events and forward them to salt, new version
def comparable(self): """str: comparable representation of the path specification.""" string_parts = [] if self.location is not None: string_parts.append('location: {0:s}'.format(self.location)) if self.store_index is not None: string_parts.append('store index: {0:d}'.format(self.store_index)) return self._GetComparable(sub_comparable_string=', '.join(string_parts))
str: comparable representation of the path specification.
def set_value(self, dry_wet: LeakSensorState): """Set the state to wet or dry.""" if dry_wet == LeakSensorState.DRY: self._update_subscribers(0x11) else: self._update_subscribers(0x13)
Set the state to wet or dry.
def replace_namespaced_deployment_scale(self, name, namespace, body, **kwargs): """ replace scale of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_deployment_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) return data
replace scale of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_deployment_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1Scale If the method is called asynchronously, returns the request thread.
def set_encode_key_value(self, value, store_type=PUBLIC_KEY_STORE_TYPE_BASE64): """Set the value based on the type of encoding supported by RSA.""" if store_type == PUBLIC_KEY_STORE_TYPE_PEM: PublicKeyBase.set_encode_key_value(self, value.exportKey('PEM').decode(), store_type) else: PublicKeyBase.set_encode_key_value(self, value.exportKey('DER'), store_type)
Set the value based on the type of encoding supported by RSA.
def _segment_index(self, recarr, existing_index, start, new_segments): """ Generate index of datetime64 -> item offset. Parameters: ----------- new_data: new data being written (or appended) existing_index: index field from the versions document of the previous version start: first (0-based) offset of the new data segments: list of offsets. Each offset is the row index of the the last row of a particular chunk relative to the start of the _original_ item. array(new_data) - segments = array(offsets in item) Returns: -------- Binary(compress(array([(index, datetime)])) Where index is the 0-based index of the datetime in the DataFrame """ # find the index of the first datetime64 column idx_col = self._datetime64_index(recarr) # if one exists let's create the index on it if idx_col is not None: new_segments = np.array(new_segments, dtype='i8') last_rows = recarr[new_segments - start] # create numpy index index = np.core.records.fromarrays([last_rows[idx_col]] + [new_segments, ], dtype=INDEX_DTYPE) # append to existing index if exists if existing_index: # existing_index_arr is read-only but it's never written to existing_index_arr = np.frombuffer(decompress(existing_index), dtype=INDEX_DTYPE) if start > 0: existing_index_arr = existing_index_arr[existing_index_arr['index'] < start] index = np.concatenate((existing_index_arr, index)) return Binary(compress(index.tostring())) elif existing_index: raise ArcticException("Could not find datetime64 index in item but existing data contains one") return None
Generate index of datetime64 -> item offset. Parameters: ----------- new_data: new data being written (or appended) existing_index: index field from the versions document of the previous version start: first (0-based) offset of the new data segments: list of offsets. Each offset is the row index of the the last row of a particular chunk relative to the start of the _original_ item. array(new_data) - segments = array(offsets in item) Returns: -------- Binary(compress(array([(index, datetime)])) Where index is the 0-based index of the datetime in the DataFrame
def parse_py(s, **kwargs): """Parse a string into a (nbformat, string) tuple.""" nbf = current_nbformat nbm = current_nbformat_minor pattern = r'# <nbformat>(?P<nbformat>\d+[\.\d+]*)</nbformat>' m = re.search(pattern,s) if m is not None: digits = m.group('nbformat').split('.') nbf = int(digits[0]) if len(digits) > 1: nbm = int(digits[1]) return nbf, nbm, s
Parse a string into a (nbformat, string) tuple.
def next_page(self, max_=None): """ Return a query set which requests the page after this response. :param max_: Maximum number of items to return. :type max_: :class:`int` or :data:`None` :rtype: :class:`ResultSetMetadata` :return: A new request set up to request the next page. Must be called on a result set which has :attr:`last` set. """ result = type(self)() result.after = After(self.last.value) result.max_ = max_ return result
Return a query set which requests the page after this response. :param max_: Maximum number of items to return. :type max_: :class:`int` or :data:`None` :rtype: :class:`ResultSetMetadata` :return: A new request set up to request the next page. Must be called on a result set which has :attr:`last` set.
def create(self, friendly_name=values.unset, sync_service_sid=values.unset): """ Create a new DeploymentInstance :param unicode friendly_name: A human readable description for this Deployment. :param unicode sync_service_sid: The unique identifier of the Sync service instance. :returns: Newly created DeploymentInstance :rtype: twilio.rest.preview.deployed_devices.fleet.deployment.DeploymentInstance """ data = values.of({'FriendlyName': friendly_name, 'SyncServiceSid': sync_service_sid, }) payload = self._version.create( 'POST', self._uri, data=data, ) return DeploymentInstance(self._version, payload, fleet_sid=self._solution['fleet_sid'], )
Create a new DeploymentInstance :param unicode friendly_name: A human readable description for this Deployment. :param unicode sync_service_sid: The unique identifier of the Sync service instance. :returns: Newly created DeploymentInstance :rtype: twilio.rest.preview.deployed_devices.fleet.deployment.DeploymentInstance
def migrate_database(adapter): """Migrate an old loqusdb instance to 1.0 Args: adapter Returns: nr_updated(int): Number of variants that where updated """ all_variants = adapter.get_variants() nr_variants = all_variants.count() nr_updated = 0 with progressbar(all_variants, label="Updating variants", length=nr_variants) as bar: for variant in bar: # Do not update if the variants have the correct format if 'chrom' in variant: continue nr_updated += 1 splitted_id = variant['_id'].split('_') chrom = splitted_id[0] start = int(splitted_id[1]) ref = splitted_id[2] alt = splitted_id[3] # Calculate end end = start + (max(len(ref), len(alt)) - 1) adapter.db.variant.find_one_and_update( {'_id': variant['_id']}, { '$set': { 'chrom': chrom, 'start': start, 'end': end } } ) return nr_updated
Migrate an old loqusdb instance to 1.0 Args: adapter Returns: nr_updated(int): Number of variants that where updated
async def subscribe(self, *args, **kwargs): """ Subscribe to channels. Channels supplied as keyword arguments expect a channel name as the key and a callable as the value. A channel's callable will be invoked automatically when a message is received on that channel rather than producing a message via ``listen()`` or ``get_message()``. """ if args: args = list_or_args(args[0], args[1:]) new_channels = {} new_channels.update(dict.fromkeys(map(self.encode, args))) for channel, handler in iteritems(kwargs): new_channels[self.encode(channel)] = handler ret_val = await self.execute_command('SUBSCRIBE', *iterkeys(new_channels)) # update the channels dict AFTER we send the command. we don't want to # subscribe twice to these channels, once for the command and again # for the reconnection. self.channels.update(new_channels) return ret_val
Subscribe to channels. Channels supplied as keyword arguments expect a channel name as the key and a callable as the value. A channel's callable will be invoked automatically when a message is received on that channel rather than producing a message via ``listen()`` or ``get_message()``.
def calculateHurst(self, series, exponent=None): ''' :type series: List :type exponent: int :rtype: float ''' rescaledRange = list() sizeRange = list() rescaledRangeMean = list() if(exponent is None): exponent = self.bestExponent(len(series)) for i in range(0, exponent): partsNumber = int(math.pow(2, i)) size = int(len(series)/partsNumber) sizeRange.append(size) rescaledRange.append(0) rescaledRangeMean.append(0) for x in range(0, partsNumber): start = int(size*(x)) limit = int(size*(x+1)) deviationAcumulative = self.sumDeviation(self.deviation( series, start, limit, self.mean(series, start, limit))) deviationsDifference = float( max(deviationAcumulative) - min(deviationAcumulative)) standartDeviation = self.standartDeviation( series, start, limit) if(deviationsDifference != 0 and standartDeviation != 0): rescaledRange[i] += (deviationsDifference / standartDeviation) y = 0 for x in rescaledRange: rescaledRangeMean[y] = x/int(math.pow(2, y)) y = y+1 # log calculation rescaledRangeLog = list() sizeRangeLog = list() for i in range(0, exponent): rescaledRangeLog.append(math.log(rescaledRangeMean[i], 10)) sizeRangeLog.append(math.log(sizeRange[i], 10)) slope, intercept = np.polyfit(sizeRangeLog, rescaledRangeLog, 1) ablineValues = [slope * i + intercept for i in sizeRangeLog] plt.plot(sizeRangeLog, rescaledRangeLog, '--') plt.plot(sizeRangeLog, ablineValues, 'b') plt.title(slope) # graphic dimension settings limitUp = 0 if(max(sizeRangeLog) > max(rescaledRangeLog)): limitUp = max(sizeRangeLog) else: limitUp = max(rescaledRangeLog) limitDown = 0 if(min(sizeRangeLog) > min(rescaledRangeLog)): limitDown = min(rescaledRangeLog) else: limitDown = min(sizeRangeLog) plt.gca().set_xlim(limitDown, limitUp) plt.gca().set_ylim(limitDown, limitUp) print("Hurst exponent: " + str(slope)) plt.show() return slope
:type series: List :type exponent: int :rtype: float
def no_ssl_verification(self): """ Requests module fails due to lets encrypt ssl encryption. Will be fixed in the future release.""" try: from functools import partialmethod except ImportError: # Python 2 fallback: https://gist.github.com/carymrobbins/8940382 from functools import partial class partialmethod(partial): def __get__(self, instance, owner): if instance is None: return self return partial(self.func, instance, *(self.args or ()), **(self.keywords or {})) old_request = requests.Session.request requests.Session.request = partialmethod(old_request, verify=False) warnings.filterwarnings('ignore', 'Unverified HTTPS request') yield warnings.resetwarnings() requests.Session.request = old_request
Requests module fails due to lets encrypt ssl encryption. Will be fixed in the future release.
def train(self, ftrain): '''Trains the polynomial expansion. :param numpy.ndarray/function ftrain: output values corresponding to the quadrature points given by the getQuadraturePoints method to which the expansion should be trained. Or a function that should be evaluated at the quadrature points to give these output values. *Sample Usage*:: >>> thePC = PolySurrogate(dimensions=2) >>> thePC.train(myFunc) >>> predicted_q = thePC.predict([0, 1]) >>> thePC = PolySurrogate(dimensions=2) >>> U = thePC.getQuadraturePoints() >>> Q = [myFunc(u) for u in U] >>> thePC.train(Q) >>> predicted_q = thePC.predict([0, 1]) ''' self.coeffs = 0*self.coeffs upoints, wpoints = self.getQuadraturePointsAndWeights() try: fpoints = [ftrain(u) for u in upoints] except TypeError: fpoints = ftrain for ipoly in np.arange(self.N_poly): inds = tuple(self.index_polys[ipoly]) coeff = 0.0 for (u, q, w) in zip(upoints, fpoints, wpoints): coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w) self.coeffs[inds] = coeff return None
Trains the polynomial expansion. :param numpy.ndarray/function ftrain: output values corresponding to the quadrature points given by the getQuadraturePoints method to which the expansion should be trained. Or a function that should be evaluated at the quadrature points to give these output values. *Sample Usage*:: >>> thePC = PolySurrogate(dimensions=2) >>> thePC.train(myFunc) >>> predicted_q = thePC.predict([0, 1]) >>> thePC = PolySurrogate(dimensions=2) >>> U = thePC.getQuadraturePoints() >>> Q = [myFunc(u) for u in U] >>> thePC.train(Q) >>> predicted_q = thePC.predict([0, 1])
def warning(self, message, *args, **kwargs): """Log warning event. Compatible with logging.warning signature. """ self.system.warning(message, *args, **kwargs)
Log warning event. Compatible with logging.warning signature.
def searchTriples(expnums,ccd): """Given a list of exposure numbers, find all the KBOs in that set of exposures""" import MOPfits,os import MOPdbaccess if len(expnums)!=3: return(-1) mysql=MOPdbaccess.connect('bucket','cfhls','MYSQL') bucket=mysql.cursor() ### Some program Constants proc_file = open("proc-these-files","w") proc_file.write("# Files to be planted and searched\n") proc_file.write("# image fwhm plant\n") import string import os.path filenames=[] import pyfits for expnum in expnums: bucket.execute("SELECT obs_iq_refccd FROM exposure WHERE expnum=%s" , (expnum, ) ) row=bucket.fetchone() fwhm=row[0] if not fwhm > 0: fwhm=1.0 if int(ccd)<18: cutout="[-*,-*]" else: cutout=None filename=MOPfits.adGet(str(expnum)+"p",extno=int(ccd),cutout=cutout) print filename if not os.access(filename,os.R_OK): return(-3) filename=os.path.splitext(filename) filenames.append(filename[0]) proc_file.write("%s %f %s \n" % ( filename[0], fwhm/0.183, "no")) proc_file.flush() proc_file.close() command="find.pl -p '' -d ./ " sys.stderr.write(command) try: os.system(command) except: sys.stderr.write("Failed while running find") file_extens=[ "cands.comb", "measure3.cands.astrom", "measure3.WARNING", "measure3.astrom.scatter"] if os.access("find.OK",os.R_OK): os.system("touch /home/cadc/kavelaar/results/05AQ06B/"+filenames[0]+".OK") else: os.system("touch /home/cadc/kavelaar/results/05AQ06B/"+filenames[0]+".FAILED") ### look for the cand.comb file and store in the DB import shutil for ext in file_extens: if os.access(filenames[0]+"."+ext,os.R_OK): shutil.copy(filenames[0]+"."+ext,"/home/cadc/kavelaar/results/05AQ06B") astrom=filenames[0]+".measure3.cands.astrom" print astrom cmd = "mpc_gen.pl -c "+astrom print os.access(astrom,os.R_OK) if os.access(astrom,os.R_OK): print cmd os.system(cmd) os.system("mpcIngest.pl *.MPC") os.system("cp *.MPC /home/cadc/kavelaar/results/05AQ06B") return(1) return(0)
Given a list of exposure numbers, find all the KBOs in that set of exposures
def sort_index(self, **kwargs): """Sorts the data with respect to either the columns or the indices. Returns: DataManager containing the data sorted by columns or indices. """ axis = kwargs.pop("axis", 0) index = self.columns if axis else self.index # sort_index can have ascending be None and behaves as if it is False. # sort_values cannot have ascending be None. Thus, the following logic is to # convert the ascending argument to one that works with sort_values ascending = kwargs.pop("ascending", True) if ascending is None: ascending = False kwargs["ascending"] = ascending def sort_index_builder(df, **kwargs): if axis: df.columns = index else: df.index = index return df.sort_index(axis=axis, **kwargs) func = self._prepare_method(sort_index_builder, **kwargs) new_data = self._map_across_full_axis(axis, func) if axis: new_columns = pandas.Series(self.columns).sort_values(**kwargs) new_index = self.index else: new_index = pandas.Series(self.index).sort_values(**kwargs) new_columns = self.columns return self.__constructor__( new_data, new_index, new_columns, self.dtypes.copy() )
Sorts the data with respect to either the columns or the indices. Returns: DataManager containing the data sorted by columns or indices.
def __record(self, oid=None): """Reads and returns a dbf record row as a list of values.""" f = self.__getFileObj(self.dbf) recordContents = self.__recStruct.unpack(f.read(self.__recStruct.size)) if recordContents[0] != b' ': # deleted record return None record = [] for (name, typ, size, deci), value in zip(self.fields, recordContents): if name == 'DeletionFlag': continue elif typ in ("N","F"): # numeric or float: number stored as a string, right justified, and padded with blanks to the width of the field. value = value.split(b'\0')[0] value = value.replace(b'*', b'') # QGIS NULL is all '*' chars if value == b'': value = None elif deci: try: value = float(value) except ValueError: #not parseable as float, set to None value = None else: # force to int try: # first try to force directly to int. # forcing a large int to float and back to int # will lose information and result in wrong nr. value = int(value) except ValueError: # forcing directly to int failed, so was probably a float. try: value = int(float(value)) except ValueError: #not parseable as int, set to None value = None elif typ == 'D': # date: 8 bytes - date stored as a string in the format YYYYMMDD. if value.count(b'0') == len(value): # QGIS NULL is all '0' chars value = None else: try: y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8]) value = date(y, m, d) except: value = value.strip() elif typ == 'L': # logical: 1 byte - initialized to 0x20 (space) otherwise T or F. if value == b" ": value = None # space means missing or not yet set else: if value in b'YyTt1': value = True elif value in b'NnFf0': value = False else: value = None # unknown value is set to missing else: # anything else is forced to string/unicode value = u(value, self.encoding, self.encodingErrors) value = value.strip() record.append(value) return _Record(self.__fieldposition_lookup, record, oid)
Reads and returns a dbf record row as a list of values.
def get_song(self, netease=False): """ 获取歌曲, 对外统一接口 """ song = self._playlist.get(True) self.hash_sid[song['sid']] = True # 去重 self.get_netease_song(song, netease) # 判断是否网易320k self._playingsong = song return song
获取歌曲, 对外统一接口
def serve_forever(self): """Wrapper to the serve_forever function.""" loop = True while loop: loop = self.__serve_forever() self.end()
Wrapper to the serve_forever function.
def find(cls, *args, **kwargs): """Same as ``collection.find``, returns model object instead of dict.""" return cls.from_cursor(cls.collection.find(*args, **kwargs))
Same as ``collection.find``, returns model object instead of dict.
def create(self, fields): """ Create the object only once. So, you need loop to usage. :param `fields` is dictionary fields. """ try: # Cleaning the fields, and check if has `ForeignKey` type. cleaned_fields = {} for key, value in fields.items(): if type(value) is dict: try: if value['type'] == 'fk': fake_fk = self.fake_fk(value['field_name']) cleaned_fields.update({key: fake_fk}) except: pass else: cleaned_fields.update({key: value}) # Creating the object from dictionary fields. model_class = self.model_class() obj = model_class.objects.create(**cleaned_fields) # The `ManyToManyField` need specific object, # so i handle it after created the object. for key, value in fields.items(): if type(value) is dict: try: if value['type'] == 'm2m': self.fake_m2m(obj, value['field_name']) except: pass try: obj.save_m2m() except: obj.save() return obj except Exception as e: raise e
Create the object only once. So, you need loop to usage. :param `fields` is dictionary fields.
def transfer(self, receiver_address, amount, sender_account): """ Transfer a number of tokens from `sender_account` to `receiver_address` :param receiver_address: hex str ethereum address to receive this transfer of tokens :param amount: int number of tokens to transfer :param sender_account: Account instance to take the tokens from :return: bool """ self._keeper.token.token_approve(receiver_address, amount, sender_account) self._keeper.token.transfer(receiver_address, amount, sender_account)
Transfer a number of tokens from `sender_account` to `receiver_address` :param receiver_address: hex str ethereum address to receive this transfer of tokens :param amount: int number of tokens to transfer :param sender_account: Account instance to take the tokens from :return: bool
def createMemoryParserCtxt(buffer, size): """Create a parser context for an XML in-memory document. """ ret = libxml2mod.xmlCreateMemoryParserCtxt(buffer, size) if ret is None:raise parserError('xmlCreateMemoryParserCtxt() failed') return parserCtxt(_obj=ret)
Create a parser context for an XML in-memory document.
def luks_cleartext_holder(self): """Get wrapper to the unlocked luks cleartext device.""" if not self.is_luks: return None for device in self._daemon: if device.luks_cleartext_slave == self: return device return None
Get wrapper to the unlocked luks cleartext device.
def run(self, verbose=False): """ Del user modules to force Python to deeply reload them Do not del modules which are considered as system modules, i.e. modules installed in subdirectories of Python interpreter's binary Do not del C modules """ log = [] modules_copy = dict(sys.modules) for modname, module in modules_copy.items(): if modname == 'aaaaa': print(modname, module) print(self.previous_modules) if modname not in self.previous_modules: modpath = getattr(module, '__file__', None) if modpath is None: # *module* is a C module that is statically linked into the # interpreter. There is no way to know its path, so we # choose to ignore it. continue if not self.is_module_blacklisted(modname, modpath): log.append(modname) del sys.modules[modname] if verbose and log: print("\x1b[4;33m%s\x1b[24m%s\x1b[0m" % ("UMD has deleted", ": " + ", ".join(log)))
Del user modules to force Python to deeply reload them Do not del modules which are considered as system modules, i.e. modules installed in subdirectories of Python interpreter's binary Do not del C modules
def _cleanup(self, kill, verbose): """Look for dead components (weight=0) and remove them if enabled by ``kill``. Resize storage. Recompute determinant and covariance. """ if kill: removed_indices = self.g.prune() self.nout -= len(removed_indices) if verbose and removed_indices: print('Removing %s' % removed_indices) for j in removed_indices: self.inv_map.pop(j[0])
Look for dead components (weight=0) and remove them if enabled by ``kill``. Resize storage. Recompute determinant and covariance.
def receive_message( sock, operation, request_id, max_message_size=MAX_MESSAGE_SIZE): """Receive a raw BSON message or raise socket.error.""" header = _receive_data_on_socket(sock, 16) length = _UNPACK_INT(header[:4])[0] actual_op = _UNPACK_INT(header[12:])[0] if operation != actual_op: raise ProtocolError("Got opcode %r but expected " "%r" % (actual_op, operation)) # No request_id for exhaust cursor "getMore". if request_id is not None: response_id = _UNPACK_INT(header[8:12])[0] if request_id != response_id: raise ProtocolError("Got response id %r but expected " "%r" % (response_id, request_id)) if length <= 16: raise ProtocolError("Message length (%r) not longer than standard " "message header size (16)" % (length,)) if length > max_message_size: raise ProtocolError("Message length (%r) is larger than server max " "message size (%r)" % (length, max_message_size)) return _receive_data_on_socket(sock, length - 16)
Receive a raw BSON message or raise socket.error.
def convert_sqlite_to_mysql( self): """*copy the contents of the sqlite database into the mysql database* See class docstring for usage """ from fundamentals.renderer import list_of_dictionaries from fundamentals.mysql import directory_script_runner self.log.debug('starting the ``convert_sqlite_to_mysql`` method') con = lite.connect(self.pathToSqlite) con.row_factory = lite.Row cur = con.cursor() # GET ALL TABLE NAMES cur.execute("SELECT name FROM sqlite_master WHERE type='table';") tables = cur.fetchall() createStatements = [] inserts = [] for table in tables: table = table['name'] if table == "sqlite_sequence": continue # CREATE TABLE collection_books (folder_id, fingerprint, primary key(folder_id, fingerprint)); # GENEREATE THE MYSQL CREATE STATEMENTS FOR EACH TABLE cur.execute( "SELECT sql FROM sqlite_master WHERE name = '%(table)s';" % locals()) createStatement = cur.fetchone() createStatement = createStatement[0].replace('"', '`') + ";" if "DEFAULT" not in createStatement: if "primary key(" in createStatement: tmp = createStatement.split("primary key(") tmp[0] = tmp[0].replace( ",", " varchar(150) DEFAULT NULL,") createStatement = ("primary key(").join(tmp) if "primary key," in createStatement: tmp = createStatement.split("primary key,") tmp[1] = tmp[1].replace( ",", " varchar(150) DEFAULT NULL,") tmp[1] = tmp[1].replace( ");", " varchar(150) DEFAULT NULL);") createStatement = ("primary key,").join(tmp) createStatement = createStatement.replace( "INTEGER PRIMARY KEY", "INTEGER AUTO_INCREMENT PRIMARY KEY") createStatement = createStatement.replace( "AUTOINCREMENT", "AUTO_INCREMENT") createStatement = createStatement.replace( "DEFAULT 't'", "DEFAULT '1'") createStatement = createStatement.replace( "DEFAULT 'f'", "DEFAULT '0'") createStatement = createStatement.replace(",'t'", ",'1'") createStatement = createStatement.replace(",'f'", ",'0'") if "CREATE TABLE `" in createStatement: createStatement = createStatement.replace( "CREATE TABLE `", "CREATE TABLE IF NOT EXISTS `" + self.tablePrefix) else: createStatement = createStatement.replace( "CREATE TABLE ", "CREATE TABLE IF NOT EXISTS " + self.tablePrefix) if ", primary key(" in createStatement: createStatement = createStatement.replace(", primary key(", """, `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP, `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP, `updated` tinyint(4) DEFAULT '0', primary key(""") else: createStatement = createStatement.replace(");", """, `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP, `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP, `updated` tinyint(4) DEFAULT '0'); """) createStatement = createStatement.replace( " text primary key", " varchar(100) primary key") createStatement = createStatement.replace( "`EntryText` TEXT NOT NULL,", "`EntryText` TEXT,") createStatement = createStatement.replace( "`SelectionText` TEXT NOT NULL", "`SelectionText` TEXT") createStatement = createStatement.replace( "`Filename` INTEGER NOT NULL,", "`Filename` TEXT NOT NULL,") createStatement = createStatement.replace( "`SessionPartUUID` TEXT NOT NULL UNIQUE,", "`SessionPartUUID` VARCHAR(100) NOT NULL UNIQUE,") createStatement = createStatement.replace( "`Name` TEXT PRIMARY KEY NOT NULL", "`Name` VARCHAR(100) PRIMARY KEY NOT NULL") createStatement = createStatement.replace( " VARCHAR ", " VARCHAR(100) ") createStatement = createStatement.replace( " VARCHAR,", " VARCHAR(100),") # GRAB THE DATA TO ADD TO THE MYSQL DATABASE TABLES cur.execute( "SELECT * from '%(table)s';" % locals()) rows = cur.fetchall() allRows = [] for row in rows: allRows.append(dict(row)) # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists("/tmp/headjack/"): os.makedirs("/tmp/headjack/") writequery( log=self.log, sqlQuery=createStatement, dbConn=self.dbConn, ) from fundamentals.mysql import insert_list_of_dictionaries_into_database_tables # USE dbSettings TO ACTIVATE MULTIPROCESSING insert_list_of_dictionaries_into_database_tables( dbConn=self.dbConn, log=self.log, dictList=allRows, dbTableName=self.tablePrefix + table, uniqueKeyList=[], dateModified=True, dateCreated=True, batchSize=10000, replace=True, dbSettings=self.settings["database settings"] ) # # DUMP THE DATA INTO A MYSQL DATABASE # dataSet = list_of_dictionaries( # log=self.log, # listOfDictionaries=allRows # ) # originalList = dataSet.list # now = datetime.now() # now = now.strftime("%Y%m%dt%H%M%S%f.sql") # mysqlData = dataSet.mysql( # tableName=self.tablePrefix + table, filepath="/tmp/headjack/" + # now, createStatement=createStatement) # directory_script_runner( # log=self.log, # pathToScriptDirectory="/tmp/headjack/", # databaseName=self.settings["database settings"]["db"], # loginPath=self.settings["database settings"]["loginPath"], # successRule="delete", # failureRule="failed" # ) con.close() self.log.debug('completed the ``convert_sqlite_to_mysql`` method') return None
*copy the contents of the sqlite database into the mysql database* See class docstring for usage
def loglike(self): ''' The summed log-probability of all stochastic variables that depend on self.stochastics, with self.stochastics removed. ''' sum = logp_of_set(self.children) if self.verbose > 2: print_('\t' + self._id + ' Current log-likelihood ', sum) return sum
The summed log-probability of all stochastic variables that depend on self.stochastics, with self.stochastics removed.
def _extract_sender( message: Message, resent_dates: List[Union[str, Header]] = None ) -> str: """ Extract the sender from the message object given. """ if resent_dates: sender_header = "Resent-Sender" from_header = "Resent-From" else: sender_header = "Sender" from_header = "From" # Prefer the sender field per RFC 2822:3.6.2. if sender_header in message: sender = message[sender_header] else: sender = message[from_header] return str(sender) if sender else ""
Extract the sender from the message object given.
def grouped_insert(t, value): """Insert value into the target tree 't' with correct grouping.""" collator = Collator.createInstance(Locale(t.lang) if t.lang else Locale()) if value.tail is not None: val_prev = value.getprevious() if val_prev is not None: val_prev.tail = (val_prev.tail or '') + value.tail else: val_parent = value.getparent() if val_parent is not None: val_parent.text = (val_parent.text or '') + value.tail value.tail = None if t.isgroup and t.sort(value) is not None: if t.groupby: for child in t.tree: if child.get('class') == 'group-by': # child[0] is the label span order = collator.compare( t.groupby(child[1]) or '', t.groupby(value) or '') if order == 0: c_target = Target(child, sort=t.sort, lang=t.lang) insert_group(value, c_target) break elif order > 0: group = create_group(t.groupby(value)) group.append(value) child.addprevious(group) break else: group = create_group(t.groupby(value)) group.append(value) t.tree.append(group) else: insert_group(value, t) elif t.sort and t.sort(value) is not None: insert_sort(value, t) elif t.location == 'inside': for child in t.tree: value.append(child) value.text = t.tree.text t.tree.text = None t.tree.append(value) elif t.location == 'outside': value.tail = t.tree.tail t.tree.tail = None target_parent_descendants = ( [n.getparent() for n in t.parent.iterdescendants() if n == t.tree]) try: parent = target_parent_descendants[0] parent.insert(parent.index(t.tree), value) value.append(t.tree) except IndexError as e: logger.error('Target of outside has been moved or deleted') raise e elif t.location == 'before': value.tail = t.tree.text t.tree.text = None t.tree.insert(0, value) else: t.tree.append(value)
Insert value into the target tree 't' with correct grouping.
def start(self): """Start the daemon""" if self._already_running(): message = 'pid file %s already exists. Daemon already running?\n' sys.stderr.write(message % self.pid_file) return 0 self.set_gid() self.set_uid() # Create log files (if configured) with the new user/group. Creating # them as root would allow symlink exploits. self.setup_logging() # Create pid file with new user/group. This ensures we will be able # to delete the file when shutting down. self.daemonize() try: self.run() except Exception: self.logger.exception('Exception while running the daemon:') return 1 return 0
Start the daemon
def retrieve(self, id) : """ Retrieve a single task Returns a single task available to the user according to the unique task ID provided If the specified task does not exist, this query will return an error :calls: ``get /tasks/{id}`` :param int id: Unique identifier of a Task. :return: Dictionary that support attriubte-style access and represent Task resource. :rtype: dict """ _, _, task = self.http_client.get("/tasks/{id}".format(id=id)) return task
Retrieve a single task Returns a single task available to the user according to the unique task ID provided If the specified task does not exist, this query will return an error :calls: ``get /tasks/{id}`` :param int id: Unique identifier of a Task. :return: Dictionary that support attriubte-style access and represent Task resource. :rtype: dict
def generic_html(self, result, errors): """ Try to display any object in sensible HTML. """ h1 = htmlize(type(result)) out = [] result = pre_process_json(result) if not hasattr(result, 'items'): # result is a non-container header = "<tr><th>Value</th></tr>" if type(result) is list: result = htmlize_list(result) else: result = htmlize(result) out = ["<tr><td>" + result + "</td></tr>"] elif hasattr(result, 'lower'): out = ["<tr><td>" + result + "</td></tr>"] else: # object is a dict header = "<tr><th>Key</th><th>Value</th></tr>" for key, value in result.items(): v = htmlize(value) row = "<tr><td>{0}</td><td>{1}</td></tr>".format(key, v) out.append(row) env = Environment(loader=PackageLoader('giotto')) template = env.get_template('generic.html') rendered = template.render({'header': h1, 'table_header': header, 'table_body': out}) return {'body': rendered, 'mimetype': 'text/html'}
Try to display any object in sensible HTML.
def _flush(self): """ Returns a list of all current data """ if self._recording: raise Exception("Cannot flush data queue while recording!") if self._saving_cache: logging.warn("Flush when using cache means unsaved data will be lost and not returned!") self._cmds_q.put(("reset_data_segment",)) else: data = self._extract_q(0) return data
Returns a list of all current data
async def sort(self, request, reverse=False): """Sort collection.""" return sorted( self.collection, key=lambda o: getattr(o, self.columns_sort, 0), reverse=reverse)
Sort collection.
def execute(self, query_string, params=None): """Executes a query. Returns the resulting cursor. :query_string: the parameterized query string :params: can be either a tuple or a dictionary, and must match the parameterization style of the query :return: a cursor object """ cr = self.connection.cursor() logger.info("SQL: %s (%s)", query_string, params) self.last_query = (query_string, params) t0 = time.time() cr.execute(query_string, params or self.core.empty_params) ms = (time.time() - t0) * 1000 logger.info("RUNTIME: %.2f ms", ms) self._update_cursor_stats(cr) return cr
Executes a query. Returns the resulting cursor. :query_string: the parameterized query string :params: can be either a tuple or a dictionary, and must match the parameterization style of the query :return: a cursor object