code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_distributions(cls, ctx, extra_dist_dirs=[]): '''Returns all the distributions found locally.''' if extra_dist_dirs: raise BuildInterruptingException( 'extra_dist_dirs argument to get_distributions ' 'is not yet implemented') dist_dir = ctx.dist_dir folders = glob.glob(join(dist_dir, '*')) for dir in extra_dist_dirs: folders.extend(glob.glob(join(dir, '*'))) dists = [] for folder in folders: if exists(join(folder, 'dist_info.json')): with open(join(folder, 'dist_info.json')) as fileh: dist_info = json.load(fileh) dist = cls(ctx) dist.name = folder.split('/')[-1] dist.dist_dir = folder dist.needs_build = False dist.recipes = dist_info['recipes'] if 'archs' in dist_info: dist.archs = dist_info['archs'] if 'ndk_api' in dist_info: dist.ndk_api = dist_info['ndk_api'] else: dist.ndk_api = None warning( "Distribution {distname}: ({distdir}) has been " "built with an unknown api target, ignoring it, " "you might want to delete it".format( distname=dist.name, distdir=dist.dist_dir ) ) dists.append(dist) return dists
Returns all the distributions found locally.
def thaw_parameter(self, name): """ Thaw a parameter by name Args: name: The name of the parameter """ i = self.get_parameter_names(include_frozen=True).index(name) self.unfrozen_mask[i] = True
Thaw a parameter by name Args: name: The name of the parameter
def send_with_media( self, *, text: str, files: List[str], captions: List[str]=[], ) -> List[OutputRecord]: """ Upload media to mastodon, and send status and media, and captions if present. :param text: post text. :param files: list of files to upload with post. :param captions: list of captions to include as alt-text with files. :returns: list of output records, each corresponding to either a single post, or an error. """ try: self.ldebug(f"Uploading files {files}.") if captions is None: captions = [] if len(files) > len(captions): captions.extend([self.default_caption_message] * (len(files) - len(captions))) media_dicts = [] for i, file in enumerate(files): caption = captions[i] media_dicts.append(self.api.media_post(file, description=caption)) self.ldebug(f"Media ids {media_dicts}") except mastodon.MastodonError as e: return [self.handle_error( f"Bot {self.bot_name} encountered an error when uploading {files}:\n{e}\n", e )] try: status = self.api.status_post(status=text, media_ids=media_dicts) self.ldebug(f"Status object from toot: {status}.") return [TootRecord(record_data={ "toot_id": status["id"], "text": text, "media_ids": media_dicts, "captions": captions })] except mastodon.MastodonError as e: return [self.handle_error((f"Bot {self.bot_name} encountered an error when " f"sending post {text} with media dicts {media_dicts}:" f"\n{e}\n"), e)]
Upload media to mastodon, and send status and media, and captions if present. :param text: post text. :param files: list of files to upload with post. :param captions: list of captions to include as alt-text with files. :returns: list of output records, each corresponding to either a single post, or an error.
def index_config(request): '''This view returns the index configuration of the current application as JSON. Currently, this consists of a Solr index url and the Fedora content models that this application expects to index. .. Note:: By default, Fedora system content models (such as ``fedora-system:ContentModel-3.0``) are excluded. Any application that actually wants to index such objects will need to customize this view to include them. ''' #Ensure permission to this resource is allowed. Currently based on IP only. if _permission_denied_check(request): return HttpResponseForbidden('Access to this web service was denied.', content_type='text/html') content_list = getattr(settings, 'EUL_INDEXER_CONTENT_MODELS', []) # Generate an automatic list of lists of content models (one list for each defined type) # if no content model settings exist if not content_list: for cls in six.itervalues(DigitalObject.defined_types): # by default, Fedora system content models are excluded content_group = [model for model in getattr(cls, 'CONTENT_MODELS', []) if not model.startswith('info:fedora/fedora-system:')] # if the group of content models is not empty, add it to the list if content_group: content_list.append(content_group) response = { 'CONTENT_MODELS': content_list, 'SOLR_URL': settings.SOLR_SERVER_URL } return HttpResponse(json.dumps(response), content_type='application/json')
This view returns the index configuration of the current application as JSON. Currently, this consists of a Solr index url and the Fedora content models that this application expects to index. .. Note:: By default, Fedora system content models (such as ``fedora-system:ContentModel-3.0``) are excluded. Any application that actually wants to index such objects will need to customize this view to include them.
def _engineServicesRunning(): """ Return true if the engine services are running """ process = subprocess.Popen(["ps", "aux"], stdout=subprocess.PIPE) stdout = process.communicate()[0] result = process.returncode if result != 0: raise RuntimeError("Unable to check for running client job manager") # See if the CJM is running running = False for line in stdout.split("\n"): if "python" in line and "clientjobmanager.client_job_manager" in line: running = True break return running
Return true if the engine services are running
def parse_parameters_from_response(self, response): """ Returns a response signature and query string generated from the server response. 'h' aka signature argument is stripped from the returned query string. """ lines = response.splitlines() pairs = [line.strip().split('=', 1) for line in lines if '=' in line] pairs = sorted(pairs) signature = ([unquote(v) for k, v in pairs if k == 'h'] or [None])[0] # already quoted query_string = '&' . join([k + '=' + v for k, v in pairs if k != 'h']) return (signature, query_string)
Returns a response signature and query string generated from the server response. 'h' aka signature argument is stripped from the returned query string.
def TermsProcessor(instance, placeholder, rendered_content, original_context): """ Adds links all placeholders plugins except django-terms plugins """ if 'terms' in original_context: return rendered_content return mark_safe(replace_terms(rendered_content))
Adds links all placeholders plugins except django-terms plugins
def spans_columns(self, column_names): """ Checks if this index exactly spans the given column names in the correct order. :type column_names: list :rtype: bool """ columns = self.get_columns() number_of_columns = len(columns) same_columns = True for i in range(number_of_columns): column = self._trim_quotes(columns[i].lower()) if i >= len(column_names) or column != self._trim_quotes( column_names[i].lower() ): same_columns = False return same_columns
Checks if this index exactly spans the given column names in the correct order. :type column_names: list :rtype: bool
def wait_until_running(self, callback=None): """Waits until the remote worker is running, then calls the callback. Usually, this method is passed to a different thread; the callback is then a function patching results through to the result queue.""" status = self.machine.scheduler.wait_until_running( self.job, self.worker_config.time_out) if status.running: self.online = True if callback: callback(self) else: raise TimeoutError("Timeout while waiting for worker to run: " + self.worker_config.name)
Waits until the remote worker is running, then calls the callback. Usually, this method is passed to a different thread; the callback is then a function patching results through to the result queue.
def set_zone(time_zone): ''' Set the local time zone. Use ``timezone.list_zones`` to list valid time_zone arguments :param str time_zone: The time zone to apply :return: True if successful, False if not :rtype: bool :raises: SaltInvocationError on Invalid Timezone :raises: CommandExecutionError on failure CLI Example: .. code-block:: bash salt '*' timezone.set_zone America/Denver ''' if time_zone not in list_zones(): raise SaltInvocationError('Invalid Timezone: {0}'.format(time_zone)) salt.utils.mac_utils.execute_return_success( 'systemsetup -settimezone {0}'.format(time_zone)) return time_zone in get_zone()
Set the local time zone. Use ``timezone.list_zones`` to list valid time_zone arguments :param str time_zone: The time zone to apply :return: True if successful, False if not :rtype: bool :raises: SaltInvocationError on Invalid Timezone :raises: CommandExecutionError on failure CLI Example: .. code-block:: bash salt '*' timezone.set_zone America/Denver
def remove(self): """Removes the node from the graph. Note this does not remove the associated data object. See :func:`Node.can_remove` for limitations on what can be deleted. :returns: :class:`BaseNodeData` subclass associated with the deleted Node :raises AttributeError: if called on a ``Node`` that cannot be deleted """ if not self.can_remove(): raise AttributeError('this node cannot be deleted') data = self.data self.parents.remove(self) self.delete() return data
Removes the node from the graph. Note this does not remove the associated data object. See :func:`Node.can_remove` for limitations on what can be deleted. :returns: :class:`BaseNodeData` subclass associated with the deleted Node :raises AttributeError: if called on a ``Node`` that cannot be deleted
def _sim_trajectories(self, time_size, start_pos, rs, total_emission=False, save_pos=False, radial=False, wrap_func=wrap_periodic): """Simulate (in-memory) `time_size` steps of trajectories. Simulate Brownian motion diffusion and emission of all the particles. Uses the attributes: num_particles, sigma_1d, box, psf. Arguments: time_size (int): number of time steps to be simulated. start_pos (array): shape (num_particles, 3), particles start positions. This array is modified to store the end position after this method is called. rs (RandomState): a `numpy.random.RandomState` object used to generate the random numbers. total_emission (bool): if True, store only the total emission array containing the sum of emission of all the particles. save_pos (bool): if True, save the particles 3D trajectories wrap_func (function): the function used to apply the boundary condition (use :func:`wrap_periodic` or :func:`wrap_mirror`). Returns: POS (list): list of 3D trajectories arrays (3 x time_size) em (array): array of emission (total or per-particle) """ time_size = int(time_size) num_particles = self.num_particles if total_emission: em = np.zeros(time_size, dtype=np.float32) else: em = np.zeros((num_particles, time_size), dtype=np.float32) POS = [] # pos_w = np.zeros((3, c_size)) for i, sigma_1d in enumerate(self.sigma_1d): delta_pos = rs.normal(loc=0, scale=sigma_1d, size=3 * time_size) delta_pos = delta_pos.reshape(3, time_size) pos = np.cumsum(delta_pos, axis=-1, out=delta_pos) pos += start_pos[i] # Coordinates wrapping using the specified boundary conditions for coord in (0, 1, 2): pos[coord] = wrap_func(pos[coord], *self.box.b[coord]) # Sample the PSF along i-th trajectory then square to account # for emission and detection PSF. Ro = sqrt(pos[0]**2 + pos[1]**2) # radial pos. on x-y plane Z = pos[2] current_em = self.psf.eval_xz(Ro, Z)**2 if total_emission: # Add the current particle emission to the total emission em += current_em.astype(np.float32) else: # Store the individual emission of current particle em[i] = current_em.astype(np.float32) if save_pos: pos_save = np.vstack((Ro, Z)) if radial else pos POS.append(pos_save[np.newaxis, :, :]) # Update start_pos in-place for current particle start_pos[i] = pos[:, -1:] return POS, em
Simulate (in-memory) `time_size` steps of trajectories. Simulate Brownian motion diffusion and emission of all the particles. Uses the attributes: num_particles, sigma_1d, box, psf. Arguments: time_size (int): number of time steps to be simulated. start_pos (array): shape (num_particles, 3), particles start positions. This array is modified to store the end position after this method is called. rs (RandomState): a `numpy.random.RandomState` object used to generate the random numbers. total_emission (bool): if True, store only the total emission array containing the sum of emission of all the particles. save_pos (bool): if True, save the particles 3D trajectories wrap_func (function): the function used to apply the boundary condition (use :func:`wrap_periodic` or :func:`wrap_mirror`). Returns: POS (list): list of 3D trajectories arrays (3 x time_size) em (array): array of emission (total or per-particle)
def fetch(dataset_uri, item_identifier): """Return abspath to file with item content. Fetches the file from remote storage if required. """ dataset = dtoolcore.DataSet.from_uri(dataset_uri) click.secho(dataset.item_content_abspath(item_identifier))
Return abspath to file with item content. Fetches the file from remote storage if required.
def parse_record( self, lines ): """ Parse a TRANSFAC record out of `lines` and return a motif. """ # Break lines up temp_lines = [] for line in lines: fields = line.rstrip( "\r\n" ).split( None, 1 ) if len( fields ) == 1: fields.append( "" ) temp_lines.append( fields ) lines = temp_lines # Fill in motif from lines motif = TransfacMotif() current_line = 0 while 1: # Done parsing if no more lines to consume if current_line >= len( lines ): break # Remove prefix and first separator from line prefix, rest = lines[ current_line ] # No action for this prefix, just ignore the line if prefix not in self.parse_actions: current_line += 1 continue # Get action for line action = self.parse_actions[ prefix ] # Store a single line value if action[0] == "store_single": key = action[1] setattr( motif, key, rest ) current_line += 1 # Add a single line value to a list if action[0] == "store_single_list": key = action[1] if not getattr( motif, key ): setattr( motif, key, [] ) getattr( motif, key ).append( rest ) current_line += 1 # Add a single line value to a dictionary if action[0] == "store_single_key_value": key = action[1] k, v = rest.strip().split( '=', 1 ) if not getattr( motif, key ): setattr( motif, key, {} ) getattr( motif, key )[k] = v current_line += 1 # Store a block of text if action[0] == "store_block": key = action[1] value = [] while current_line < len( lines ) and lines[ current_line ][0] == prefix: value.append( lines[current_line][1] ) current_line += 1 setattr( motif, key, str.join( "\n", value ) ) # Store a matrix if action[0] == "store_matrix": # First line is alphabet alphabet = rest.split() alphabet_size = len( alphabet ) rows = [] pattern = "" current_line += 1 # Next lines are the rows of the matrix (we allow 0 rows) while current_line < len( lines ): prefix, rest = lines[ current_line ] # Prefix should be a two digit 0 padded row number if not prefix.isdigit(): break # The first `alphabet_size` fields are the row values values = rest.split() rows.append( [ float(_) for _ in values[:alphabet_size] ] ) # TRANSFAC includes an extra column with the IUPAC code if len( values ) > alphabet_size: pattern += values[alphabet_size] current_line += 1 # Only store the pattern if it is the correct length (meaning # that every row had an extra field) if len( pattern ) != len( rows ): pattern = None matrix = FrequencyMatrix.from_rows( alphabet, rows ) setattr( motif, action[1], matrix ) # Only return a motif if we saw at least ID or AC or NA if motif.id or motif.accession or motif.name: return motif
Parse a TRANSFAC record out of `lines` and return a motif.
def _weighting(weights, exponent): """Return a weighting whose type is inferred from the arguments.""" if np.isscalar(weights): weighting = NumpyTensorSpaceConstWeighting(weights, exponent) elif weights is None: weighting = NumpyTensorSpaceConstWeighting(1.0, exponent) else: # last possibility: make an array arr = np.asarray(weights) weighting = NumpyTensorSpaceArrayWeighting(arr, exponent) return weighting
Return a weighting whose type is inferred from the arguments.
def rec_new(self, zone, record_type, name, content, ttl=1, priority=None, service=None, service_name=None, protocol=None, weight=None, port=None, target=None): """ Create a DNS record for the given zone :param zone: domain name :type zone: str :param record_type: Type of DNS record. Valid values are [A/CNAME/MX/TXT/SPF/AAAA/NS/SRV/LOC] :type record_type: str :param name: name of the DNS record :type name: str :param content: content of the DNS record :type content: str :param ttl: TTL of the DNS record in seconds. 1 = Automatic, otherwise, value must in between 120 and 4,294,967,295 seconds. :type ttl: int :param priority: [applies to MX/SRV] MX record priority. :type priority: int :param service: Service for SRV record :type service: str :param service_name: Service Name for SRV record :type service_name: str :param protocol: Protocol for SRV record. Values are [_tcp/_udp/_tls]. :type protocol: str :param weight: Weight for SRV record. :type weight: int :param port: Port for SRV record :type port: int :param target: Target for SRV record :type target: str :return: :rtype: dict """ params = { 'a': 'rec_new', 'z': zone, 'type': record_type, 'name': name, 'content': content, 'ttl': ttl } if priority is not None: params['prio'] = priority if service is not None: params['service'] = service if service_name is not None: params['srvname'] = service_name if protocol is not None: params['protocol'] = protocol if weight is not None: params['weight'] = weight if port is not None: params['port'] = port if target is not None: params['target'] = target return self._request(params)
Create a DNS record for the given zone :param zone: domain name :type zone: str :param record_type: Type of DNS record. Valid values are [A/CNAME/MX/TXT/SPF/AAAA/NS/SRV/LOC] :type record_type: str :param name: name of the DNS record :type name: str :param content: content of the DNS record :type content: str :param ttl: TTL of the DNS record in seconds. 1 = Automatic, otherwise, value must in between 120 and 4,294,967,295 seconds. :type ttl: int :param priority: [applies to MX/SRV] MX record priority. :type priority: int :param service: Service for SRV record :type service: str :param service_name: Service Name for SRV record :type service_name: str :param protocol: Protocol for SRV record. Values are [_tcp/_udp/_tls]. :type protocol: str :param weight: Weight for SRV record. :type weight: int :param port: Port for SRV record :type port: int :param target: Target for SRV record :type target: str :return: :rtype: dict
def set_unit_desired_state(self, unit, desired_state): """Update the desired state of a unit running in the cluster Args: unit (str, Unit): The Unit, or name of the unit to update desired_state: State the user wishes the Unit to be in ("inactive", "loaded", or "launched") Returns: Unit: The unit that was updated Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400 ValueError: An invalid value was provided for ``desired_state`` """ if desired_state not in self._STATES: raise ValueError('state must be one of: {0}'.format( self._STATES )) # if we are given an object, grab it's name property # otherwise, convert to unicode if isinstance(unit, Unit): unit = unit.name else: unit = str(unit) self._single_request('Units.Set', unitName=unit, body={ 'desiredState': desired_state }) return self.get_unit(unit)
Update the desired state of a unit running in the cluster Args: unit (str, Unit): The Unit, or name of the unit to update desired_state: State the user wishes the Unit to be in ("inactive", "loaded", or "launched") Returns: Unit: The unit that was updated Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400 ValueError: An invalid value was provided for ``desired_state``
def parse_ts(ts): """ parse timestamp. :param ts: timestamp in ISO8601 format :return: tbd!!! """ # ISO8601 = '%Y-%m-%dT%H:%M:%SZ' # ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ' # RFC1123 = '%a, %d %b %Y %H:%M:%S %Z' dt = maya.parse(ts.strip()) return dt.datetime(naive=True)
parse timestamp. :param ts: timestamp in ISO8601 format :return: tbd!!!
def _conditional_toward_zero(method, sign): """ Whether to round toward zero. :param method: rounding method :type method: element of RoundingMethods.METHODS() :param int sign: -1, 0, or 1 as appropriate Complexity: O(1) """ return method is RoundingMethods.ROUND_HALF_ZERO or \ (method is RoundingMethods.ROUND_HALF_DOWN and sign == 1) or \ (method is RoundingMethods.ROUND_HALF_UP and sign == -1)
Whether to round toward zero. :param method: rounding method :type method: element of RoundingMethods.METHODS() :param int sign: -1, 0, or 1 as appropriate Complexity: O(1)
def _social_auth_login(self, request, **kwargs): ''' View function that redirects to social auth login, in case the user is not logged in. ''' if request.user.is_authenticated(): if not request.user.is_active or not request.user.is_staff: raise PermissionDenied() else: messages.add_message(request, messages.WARNING, 'Please authenticate first.') return redirect_to_login(request.get_full_path())
View function that redirects to social auth login, in case the user is not logged in.
def files_mkdir(self, path, parents=False, **kwargs): """Creates a directory within the MFS. .. code-block:: python >>> c.files_mkdir("/test") b'' Parameters ---------- path : str Filepath within the MFS parents : bool Create parent directories as needed and do not raise an exception if the requested directory already exists """ kwargs.setdefault("opts", {"parents": parents}) args = (path,) return self._client.request('/files/mkdir', args, **kwargs)
Creates a directory within the MFS. .. code-block:: python >>> c.files_mkdir("/test") b'' Parameters ---------- path : str Filepath within the MFS parents : bool Create parent directories as needed and do not raise an exception if the requested directory already exists
def _onCompletionListItemSelected(self, index): """Item selected. Insert completion to editor """ model = self._widget.model() selectedWord = model.words[index] textToInsert = selectedWord[len(model.typedText()):] self._qpart.textCursor().insertText(textToInsert) self._closeCompletion()
Item selected. Insert completion to editor
def process_view(self, request, view_func, view_args, view_kwargs): """ Forwards unauthenticated requests to the admin page to the CAS login URL, as well as calls to django.contrib.auth.views.login and logout. """ if view_func == login: return cas_login(request, *view_args, **view_kwargs) elif view_func == logout: return cas_logout(request, *view_args, **view_kwargs) if settings.CAS_ADMIN_PREFIX: if not request.path.startswith(settings.CAS_ADMIN_PREFIX): return None elif not view_func.__module__.startswith('django.contrib.admin.'): return None try: # use callable for pre-django 2.0 is_authenticated = request.user.is_authenticated() except TypeError: is_authenticated = request.user.is_authenticated if is_authenticated: if request.user.is_staff: return None else: error = ('<h1>Forbidden</h1><p>You do not have staff ' 'privileges.</p>') return HttpResponseForbidden(error) params = urlencode({REDIRECT_FIELD_NAME: request.get_full_path()}) return HttpResponseRedirect(reverse(cas_login) + '?' + params)
Forwards unauthenticated requests to the admin page to the CAS login URL, as well as calls to django.contrib.auth.views.login and logout.
def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1 ''' topics = get_all_topics(region=region, key=key, keyid=keyid, profile=profile) if name.startswith('arn:aws:sns:'): return name in list(topics.values()) else: return name in list(topics.keys())
Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1
def create_parser(subparsers): ''' :param subparsers: :return: ''' parser = subparsers.add_parser( 'restart', help='Restart a topology', usage="%(prog)s [options] cluster/[role]/[env] <topology-name> [container-id]", add_help=True) args.add_titles(parser) args.add_cluster_role_env(parser) args.add_topology(parser) parser.add_argument( 'container-id', nargs='?', type=int, default=-1, help='Identifier of the container to be restarted') args.add_config(parser) args.add_service_url(parser) args.add_verbose(parser) parser.set_defaults(subcommand='restart') return parser
:param subparsers: :return:
def get_filter_solvers(self, filter_): """Returns the filter solvers that can solve the given filter. Arguments --------- filter : dataql.resources.BaseFilter An instance of the a subclass of ``BaseFilter`` for which we want to get the solver classes that can solve it. Returns ------- list The list of filter solvers instances that can solve the given resource. Raises ------ dataql.solvers.exceptions.SolverNotFound When no solver is able to solve the given filter. Example ------- >>> from dataql.resources import Filter >>> registry = Registry() >>> registry.get_filter_solvers(Filter(name='foo')) [<FilterSolver>] >>> registry.get_filter_solvers(None) # doctest: +ELLIPSIS Traceback (most recent call last): dataql.solvers.exceptions.SolverNotFound: No solvers found for this kind of object:... """ solvers_classes = [s for s in self.filter_solver_classes if s.can_solve(filter_)] if solvers_classes: solvers = [] for solver_class in solvers_classes: # Put the solver instance in the cache if not cached yet. if solver_class not in self._filter_solvers_cache: self._filter_solvers_cache[solver_class] = solver_class(self) solvers.append(self._filter_solvers_cache[solver_class]) return solvers raise SolverNotFound(self, filter_)
Returns the filter solvers that can solve the given filter. Arguments --------- filter : dataql.resources.BaseFilter An instance of the a subclass of ``BaseFilter`` for which we want to get the solver classes that can solve it. Returns ------- list The list of filter solvers instances that can solve the given resource. Raises ------ dataql.solvers.exceptions.SolverNotFound When no solver is able to solve the given filter. Example ------- >>> from dataql.resources import Filter >>> registry = Registry() >>> registry.get_filter_solvers(Filter(name='foo')) [<FilterSolver>] >>> registry.get_filter_solvers(None) # doctest: +ELLIPSIS Traceback (most recent call last): dataql.solvers.exceptions.SolverNotFound: No solvers found for this kind of object:...
def receiver(url, **kwargs): """ Return receiver instance from connection url string url <str> connection url eg. 'tcp://0.0.0.0:8080' """ res = url_to_resources(url) fnc = res["receiver"] return fnc(res.get("url"), **kwargs)
Return receiver instance from connection url string url <str> connection url eg. 'tcp://0.0.0.0:8080'
def set_learning_objectives(self, objective_ids): """Sets the learning objectives. arg: objective_ids (osid.id.Id[]): the learning objective ``Ids`` raise: InvalidArgument - ``objective_ids`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.learning.ActivityForm.set_assets_template if not isinstance(objective_ids, list): raise errors.InvalidArgument() if self.get_learning_objectives_metadata().is_read_only(): raise errors.NoAccess() idstr_list = [] for object_id in objective_ids: if not self._is_valid_id(object_id): raise errors.InvalidArgument() idstr_list.append(str(object_id)) self._my_map['learningObjectiveIds'] = idstr_list
Sets the learning objectives. arg: objective_ids (osid.id.Id[]): the learning objective ``Ids`` raise: InvalidArgument - ``objective_ids`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def add_assay(self, name, assay): """ Add an assay to the material. :param name: The name of the new assay. :param assay: A numpy array containing the size class mass fractions for the assay. The sequence of the assay's elements must correspond to the sequence of the material's size classes. """ if not type(assay) is numpy.ndarray: raise Exception("Invalid assay. It must be a numpy array.") elif not assay.shape == (self.size_class_count,): raise Exception( "Invalid assay: It must have the same number of elements " "as the material has size classes.") elif name in self.assays.keys(): raise Exception( "Invalid assay: An assay with that name already exists.") self.assays[name] = assay
Add an assay to the material. :param name: The name of the new assay. :param assay: A numpy array containing the size class mass fractions for the assay. The sequence of the assay's elements must correspond to the sequence of the material's size classes.
def get_count(self, *args, **selectors): """ Return the count of UI object with *selectors* Example: | ${count} | Get Count | text=Accessibility | # Get the count of UI object text=Accessibility | | ${accessibility_text} | Get Object | text=Accessibility | # These two keywords combination | | ${count} | Get Count Of Object | ${accessibility_text} | # do the same thing. | """ obj = self.get_object(**selectors) return self.get_count_of_object(obj)
Return the count of UI object with *selectors* Example: | ${count} | Get Count | text=Accessibility | # Get the count of UI object text=Accessibility | | ${accessibility_text} | Get Object | text=Accessibility | # These two keywords combination | | ${count} | Get Count Of Object | ${accessibility_text} | # do the same thing. |
def _submit_gauges_from_histogram(self, metric_name, metric, scraper_config, hostname=None): """ Extracts metrics from a prometheus histogram and sends them as gauges """ for sample in metric.samples: val = sample[self.SAMPLE_VALUE] if not self._is_value_valid(val): self.log.debug("Metric value is not supported for metric {}".format(sample[self.SAMPLE_NAME])) continue custom_hostname = self._get_hostname(hostname, sample, scraper_config) if sample[self.SAMPLE_NAME].endswith("_sum"): tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname) self.gauge( "{}.{}.sum".format(scraper_config['namespace'], metric_name), val, tags=tags, hostname=custom_hostname, ) elif sample[self.SAMPLE_NAME].endswith("_count"): tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname) self.gauge( "{}.{}.count".format(scraper_config['namespace'], metric_name), val, tags=tags, hostname=custom_hostname, ) elif ( scraper_config['send_histograms_buckets'] and sample[self.SAMPLE_NAME].endswith("_bucket") and "Inf" not in sample[self.SAMPLE_LABELS]["le"] ): sample[self.SAMPLE_LABELS]["le"] = float(sample[self.SAMPLE_LABELS]["le"]) tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname) self.gauge( "{}.{}.count".format(scraper_config['namespace'], metric_name), val, tags=tags, hostname=custom_hostname, )
Extracts metrics from a prometheus histogram and sends them as gauges
def replace_handler(logger, match_handler, reconfigure): """ Prepare to replace a handler. :param logger: Refer to :func:`find_handler()`. :param match_handler: Refer to :func:`find_handler()`. :param reconfigure: :data:`True` if an existing handler should be replaced, :data:`False` otherwise. :returns: A tuple of two values: 1. The matched :class:`~logging.Handler` object or :data:`None` if no handler was matched. 2. The :class:`~logging.Logger` to which the matched handler was attached or the logger given to :func:`replace_handler()`. """ handler, other_logger = find_handler(logger, match_handler) if handler and other_logger and reconfigure: # Remove the existing handler from the logger that its attached to # so that we can install a new handler that behaves differently. other_logger.removeHandler(handler) # Switch to the logger that the existing handler was attached to so # that reconfiguration doesn't narrow the scope of logging. logger = other_logger return handler, logger
Prepare to replace a handler. :param logger: Refer to :func:`find_handler()`. :param match_handler: Refer to :func:`find_handler()`. :param reconfigure: :data:`True` if an existing handler should be replaced, :data:`False` otherwise. :returns: A tuple of two values: 1. The matched :class:`~logging.Handler` object or :data:`None` if no handler was matched. 2. The :class:`~logging.Logger` to which the matched handler was attached or the logger given to :func:`replace_handler()`.
def setup_columns(self): """Creates the treeview stuff""" tv = self.view['tv_categories'] # sets the model tv.set_model(self.model) # creates the columns cell = gtk.CellRendererText() tvcol = gtk.TreeViewColumn('Name', cell) def cell_data_func(col, cell, mod, it): if mod[it][0]: cell.set_property('text', mod[it][0].name) return tvcol.set_cell_data_func(cell, cell_data_func) tv.append_column(tvcol) return
Creates the treeview stuff
def dump_http(method, url, request_headers, response, output_stream): """ Dump all headers and response headers into output_stream. :param request_headers: Dictionary of HTTP request headers. :param response_headers: Dictionary of HTTP response headers. :param output_stream: Stream where the request is being dumped at. """ # Start header. output_stream.write('---------START-HTTP---------\n') # Get parsed url. parsed_url = urlsplit(url) # Dump all request headers recursively. http_path = parsed_url.path if parsed_url.query: http_path = http_path + '?' + parsed_url.query output_stream.write('{0} {1} HTTP/1.1\n'.format(method, http_path)) for k, v in list(request_headers.items()): if k is 'authorization': # Redact signature header value from trace logs. v = re.sub(r'Signature=([[0-9a-f]+)', 'Signature=*REDACTED*', v) output_stream.write('{0}: {1}\n'.format(k.title(), v)) # Write a new line. output_stream.write('\n') # Write response status code. output_stream.write('HTTP/1.1 {0}\n'.format(response.status)) # Dump all response headers recursively. for k, v in list(response.getheaders().items()): output_stream.write('{0}: {1}\n'.format(k.title(), v)) # For all errors write all the available response body. if response.status != 200 and \ response.status != 204 and response.status != 206: output_stream.write('{0}'.format(response.read())) # End header. output_stream.write('---------END-HTTP---------\n')
Dump all headers and response headers into output_stream. :param request_headers: Dictionary of HTTP request headers. :param response_headers: Dictionary of HTTP response headers. :param output_stream: Stream where the request is being dumped at.
def _gatherDataFromLookups(gpos, scriptOrder): """ Gather kerning and classes from the applicable lookups and return them in script order. """ lookupIndexes = _gatherLookupIndexes(gpos) seenLookups = set() kerningDictionaries = [] leftClassDictionaries = [] rightClassDictionaries = [] for script in scriptOrder: kerning = [] leftClasses = [] rightClasses = [] for lookupIndex in lookupIndexes[script]: if lookupIndex in seenLookups: continue seenLookups.add(lookupIndex) result = _gatherKerningForLookup(gpos, lookupIndex) if result is None: continue k, lG, rG = result kerning.append(k) leftClasses.append(lG) rightClasses.append(rG) if kerning: kerningDictionaries.append(kerning) leftClassDictionaries.append(leftClasses) rightClassDictionaries.append(rightClasses) return kerningDictionaries, leftClassDictionaries, rightClassDictionaries
Gather kerning and classes from the applicable lookups and return them in script order.
def parse_args(): """ parse main() args """ description = ( "Get Wikipedia article info and Wikidata via MediaWiki APIs.\n\n" "Gets a random English Wikipedia article by default, or in the\n" "language -lang, or from the wikisite -wiki, or by specific\n" "title -title. The output is a plain text extract unless -HTML.") epilog = ("Powered by https://github.com/siznax/wptools/ %s" % wptools.__version__) argp = argparse.ArgumentParser( description=description, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) argp.add_argument("-H", "-HTML", action='store_true', help="output HTML extract") argp.add_argument("-l", "-lang", default='en', help="language code") argp.add_argument("-n", "-nowrap", action='store_true', help="do not wrap text") argp.add_argument("-q", "-query", action='store_true', help="show query and exit") argp.add_argument("-s", "-silent", action='store_true', help="quiet output to stderr") argp.add_argument("-t", "-title", help="get a specific title") argp.add_argument("-v", "-verbose", action='store_true', help="HTTP status to stderr") argp.add_argument("-w", "-wiki", help="use alternative wikisite") return argp.parse_args()
parse main() args
def first_true(iterable, default=False, pred=None): """Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which pred(item) is true. """ # first_true([a,b,c], x) --> a or b or c or x # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x return next(filter(pred, iterable), default)
Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which pred(item) is true.
def _back_transform(self,inplace=True): """ Private method to remove log10 transformation from ensemble Parameters ---------- inplace: bool back transform self in place Returns ------ ParameterEnsemble : ParameterEnsemble if inplace if False Note ---- Don't call this method unless you know what you are doing """ if not self.istransformed: raise Exception("ParameterEnsemble already back transformed") istransformed = self.pst.parameter_data.loc[:,"partrans"] == "log" if inplace: self.loc[:,istransformed] = 10.0**(self.loc[:,istransformed]) self.loc[:,:] = (self.loc[:,:] -\ self.pst.parameter_data.offset)/\ self.pst.parameter_data.scale self.__istransformed = False else: vals = (self.pst.parameter_data.parval1 -\ self.pst.parameter_data.offset) /\ self.pst.parameter_data.scale new_en = ParameterEnsemble(pst=self.pst.get(),data=self.loc[:,:].copy(), columns=self.columns, mean_values=vals,istransformed=False) new_en.loc[:,istransformed] = 10.0**(self.loc[:,istransformed]) new_en.loc[:,:] = (new_en.loc[:,:] -\ new_en.pst.parameter_data.offset)/\ new_en.pst.parameter_data.scale return new_en
Private method to remove log10 transformation from ensemble Parameters ---------- inplace: bool back transform self in place Returns ------ ParameterEnsemble : ParameterEnsemble if inplace if False Note ---- Don't call this method unless you know what you are doing
def reconfigure_messaging(self, msg_host, msg_port): '''force messaging reconnector to the connect to the (host, port)''' self._messaging.create_external_route( 'rabbitmq', host=msg_host, port=msg_port)
force messaging reconnector to the connect to the (host, port)
def create_task_from_cu(cu, prof=None): """ Purpose: Create a Task based on the Compute Unit. Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD. Also, this is not required for the most part. TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU :arguments: :cu: RP Compute Unit :return: Task """ try: logger.debug('Create Task from CU %s' % cu.name) if prof: prof.prof('task from cu - create', uid=cu.name.split(',')[0].strip()) task = Task() task.uid = cu.name.split(',')[0].strip() task.name = cu.name.split(',')[1].strip() task.parent_stage['uid'] = cu.name.split(',')[2].strip() task.parent_stage['name'] = cu.name.split(',')[3].strip() task.parent_pipeline['uid'] = cu.name.split(',')[4].strip() task.parent_pipeline['name'] = cu.name.split(',')[5].strip() task.rts_uid = cu.uid if cu.state == rp.DONE: task.exit_code = 0 else: task.exit_code = 1 task.path = ru.Url(cu.sandbox).path if prof: prof.prof('task from cu - done', uid=cu.name.split(',')[0].strip()) logger.debug('Task %s created from CU %s' % (task.uid, cu.name)) return task except Exception, ex: logger.exception('Task creation from CU failed, error: %s' % ex) raise
Purpose: Create a Task based on the Compute Unit. Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD. Also, this is not required for the most part. TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU :arguments: :cu: RP Compute Unit :return: Task
def add_loaded_callback(self, callback): """Add a callback to be run when the ALDB load is complete.""" if callback not in self._cb_aldb_loaded: self._cb_aldb_loaded.append(callback)
Add a callback to be run when the ALDB load is complete.
def unchunk(self): """ Convert a chunked array back into a full array with (key,value) pairs where key is a tuple of indices, and value is an ndarray. """ plan, padding, vshape, split = self.plan, self.padding, self.vshape, self.split nchunks = self.getnumber(plan, vshape) full_shape = concatenate((nchunks, plan)) n = len(vshape) perm = concatenate(list(zip(range(n), range(n, 2*n)))) if self.uniform: def _unchunk(it): ordered = sorted(it, key=lambda kv: kv[0][split:]) keys, values = zip(*ordered) yield keys[0][:split], asarray(values).reshape(full_shape).transpose(perm).reshape(vshape) else: def _unchunk(it): ordered = sorted(it, key=lambda kv: kv[0][split:]) keys, values = zip(*ordered) k_chks = [k[split:] for k in keys] arr = empty(nchunks, dtype='object') for (i, d) in zip(k_chks, values): arr[i] = d yield keys[0][:split], allstack(arr.tolist()) # remove padding if self.padded: removepad = self.removepad rdd = self._rdd.map(lambda kv: (kv[0], removepad(kv[0][split:], kv[1], nchunks, padding, axes=range(n)))) else: rdd = self._rdd # skip partitionBy if there is not actually any chunking if array_equal(self.plan, self.vshape): rdd = rdd.map(lambda kv: (kv[0][:split], kv[1])) ordered = self._ordered else: ranges = self.kshape npartitions = int(prod(ranges)) if len(self.kshape) == 0: partitioner = lambda k: 0 else: partitioner = lambda k: ravel_multi_index(k[:split], ranges) rdd = rdd.partitionBy(numPartitions=npartitions, partitionFunc=partitioner).mapPartitions(_unchunk) ordered = True if array_equal(self.vshape, [1]): rdd = rdd.mapValues(lambda v: squeeze(v)) newshape = self.shape[:-1] else: newshape = self.shape return BoltArraySpark(rdd, shape=newshape, split=self._split, dtype=self.dtype, ordered=ordered)
Convert a chunked array back into a full array with (key,value) pairs where key is a tuple of indices, and value is an ndarray.
def _calculate_comparison_stats(truth_vcf): """Identify calls to validate from the input truth VCF. """ # Avoid very small events for average calculations min_stat_size = 50 min_median_size = 250 sizes = [] svtypes = set([]) with utils.open_gzipsafe(truth_vcf) as in_handle: for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith("#")): stats = _summarize_call(call) if stats["size"] > min_stat_size: sizes.append(stats["size"]) svtypes.add(stats["svtype"]) pct10 = int(np.percentile(sizes, 10)) pct25 = int(np.percentile(sizes, 25)) pct50 = int(np.percentile(sizes, 50)) pct75 = int(np.percentile(sizes, 75)) ranges_detailed = [(int(min(sizes)), pct10), (pct10, pct25), (pct25, pct50), (pct50, pct75), (pct75, max(sizes))] ranges_split = [(int(min(sizes)), pct50), (pct50, max(sizes))] return {"min_size": int(min(sizes) * 0.95), "max_size": int(max(sizes) + 1.05), "svtypes": svtypes, "merge_size": int(np.percentile([x for x in sizes if x > min_median_size], 50)), "ranges": []}
Identify calls to validate from the input truth VCF.
def open(uri, mode, kerberos=False, user=None, password=None): """Implement streamed reader from a web site. Supports Kerberos and Basic HTTP authentication. Parameters ---------- url: str The URL to open. mode: str The mode to open using. kerberos: boolean, optional If True, will attempt to use the local Kerberos credentials user: str, optional The username for authenticating over HTTP password: str, optional The password for authenticating over HTTP Note ---- If neither kerberos or (user, password) are set, will connect unauthenticated. """ if mode == 'rb': return BufferedInputBase(uri, mode, kerberos=kerberos, user=user, password=password) else: raise NotImplementedError('http support for mode %r not implemented' % mode)
Implement streamed reader from a web site. Supports Kerberos and Basic HTTP authentication. Parameters ---------- url: str The URL to open. mode: str The mode to open using. kerberos: boolean, optional If True, will attempt to use the local Kerberos credentials user: str, optional The username for authenticating over HTTP password: str, optional The password for authenticating over HTTP Note ---- If neither kerberos or (user, password) are set, will connect unauthenticated.
def find_all_matches(finder, ireq, pre=False): # type: (PackageFinder, InstallRequirement, bool) -> List[InstallationCandidate] """Find all matching dependencies using the supplied finder and the given ireq. :param finder: A package finder for discovering matching candidates. :type finder: :class:`~pip._internal.index.PackageFinder` :param ireq: An install requirement. :type ireq: :class:`~pip._internal.req.req_install.InstallRequirement` :return: A list of matching candidates. :rtype: list[:class:`~pip._internal.index.InstallationCandidate`] """ candidates = clean_requires_python(finder.find_all_candidates(ireq.name)) versions = {candidate.version for candidate in candidates} allowed_versions = _get_filtered_versions(ireq, versions, pre) if not pre and not allowed_versions: allowed_versions = _get_filtered_versions(ireq, versions, True) candidates = {c for c in candidates if c.version in allowed_versions} return candidates
Find all matching dependencies using the supplied finder and the given ireq. :param finder: A package finder for discovering matching candidates. :type finder: :class:`~pip._internal.index.PackageFinder` :param ireq: An install requirement. :type ireq: :class:`~pip._internal.req.req_install.InstallRequirement` :return: A list of matching candidates. :rtype: list[:class:`~pip._internal.index.InstallationCandidate`]
def set_tile(self, row, col, value): """ Set the tile at position row, col to have the given value. """ #print('set_tile: y=', row, 'x=', col) if col < 0: print("ERROR - x less than zero", col) col = 0 #return if col > self.grid_width - 1 : print("ERROR - x larger than grid", col) col = self.grid_width - 1 #return if row < 0: print("ERROR - y less than zero", row) row = 0 #return if row > self.grid_height - 1: print("ERROR - y larger than grid", row) row = self.grid_height - 1 self.grid[row][col] = value
Set the tile at position row, col to have the given value.
def tarbell_configure(command, args): """ Tarbell configuration routine. """ puts("Configuring Tarbell. Press ctrl-c to bail out!") # Check if there's settings configured settings = Settings() path = settings.path prompt = True if len(args): prompt = False config = _get_or_create_config(path) if prompt or "drive" in args: config.update(_setup_google_spreadsheets(config, path, prompt)) if prompt or "s3" in args: config.update(_setup_s3(config, path, prompt)) if prompt or "path" in args: config.update(_setup_tarbell_project_path(config, path, prompt)) if prompt or "templates" in args: if "project_templates" in config: override_templates = raw_input("\nFound Base Template config. Would you like to override them? [Default: No, 'none' to skip]") if override_templates and override_templates != "No" and override_templates != "no" and override_templates != "N" and override_templates != "n": config.update(_setup_default_templates(config, path, prompt)) else: puts("\nPreserving Base Template config...") else: config.update(_setup_default_templates(config, path, prompt)) settings.config = config with open(path, 'w') as f: puts("\nWriting {0}".format(colored.green(path))) settings.save() if all: puts("\n- Done configuring Tarbell. Type `{0}` for help.\n" .format(colored.green("tarbell"))) return settings
Tarbell configuration routine.
def createJobSubscriptionAsync(self, ackCallback, callback, jobExecutionType=jobExecutionTopicType.JOB_WILDCARD_TOPIC, jobReplyType=jobExecutionTopicReplyType.JOB_REQUEST_TYPE, jobId=None): """ **Description** Asynchronously creates an MQTT subscription to a jobs related topic based on the provided arguments **Syntax** .. code:: python #Subscribe to notify-next topic to monitor change in job referred to by $next myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_NOTIFY_NEXT_TOPIC) #Subscribe to notify topic to monitor changes to jobs in pending list myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_NOTIFY_TOPIC) #Subscribe to receive messages for job execution updates myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_UPDATE_TOPIC, jobExecutionTopicReplyType.JOB_ACCEPTED_REPLY_TYPE) #Subscribe to receive messages for describing a job execution myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_DESCRIBE_TOPIC, jobExecutionTopicReplyType.JOB_ACCEPTED_REPLY_TYPE, jobId) **Parameters** *ackCallback* - Callback to be invoked when the client receives a SUBACK. Should be in form :code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the disconnect request and :code:`data` is the granted QoS for this subscription. *callback* - Function to be called when a new message for the subscribed job topic comes in. Should be in form :code:`customCallback(client, userdata, message)`, where :code:`message` contains :code:`topic` and :code:`payload`. Note that :code:`client` and :code:`userdata` are here just to be aligned with the underneath Paho callback function signature. These fields are pending to be deprecated and should not be depended on. *jobExecutionType* - Member of the jobExecutionTopicType class specifying the jobs topic to subscribe to Defaults to jobExecutionTopicType.JOB_WILDCARD_TOPIC *jobReplyType* - Member of the jobExecutionTopicReplyType class specifying the (optional) reply sub-topic to subscribe to Defaults to jobExecutionTopicReplyType.JOB_REQUEST_TYPE which indicates the subscription isn't intended for a jobs reply topic *jobId* - JobId of the topic if the topic type requires one. Defaults to None **Returns** Subscribe request packet id, for tracking purpose in the corresponding callback. """ topic = self._thingJobManager.getJobTopic(jobExecutionType, jobReplyType, jobId) return self._AWSIoTMQTTClient.subscribeAsync(topic, self._QoS, ackCallback, callback)
**Description** Asynchronously creates an MQTT subscription to a jobs related topic based on the provided arguments **Syntax** .. code:: python #Subscribe to notify-next topic to monitor change in job referred to by $next myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_NOTIFY_NEXT_TOPIC) #Subscribe to notify topic to monitor changes to jobs in pending list myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_NOTIFY_TOPIC) #Subscribe to receive messages for job execution updates myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_UPDATE_TOPIC, jobExecutionTopicReplyType.JOB_ACCEPTED_REPLY_TYPE) #Subscribe to receive messages for describing a job execution myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_DESCRIBE_TOPIC, jobExecutionTopicReplyType.JOB_ACCEPTED_REPLY_TYPE, jobId) **Parameters** *ackCallback* - Callback to be invoked when the client receives a SUBACK. Should be in form :code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the disconnect request and :code:`data` is the granted QoS for this subscription. *callback* - Function to be called when a new message for the subscribed job topic comes in. Should be in form :code:`customCallback(client, userdata, message)`, where :code:`message` contains :code:`topic` and :code:`payload`. Note that :code:`client` and :code:`userdata` are here just to be aligned with the underneath Paho callback function signature. These fields are pending to be deprecated and should not be depended on. *jobExecutionType* - Member of the jobExecutionTopicType class specifying the jobs topic to subscribe to Defaults to jobExecutionTopicType.JOB_WILDCARD_TOPIC *jobReplyType* - Member of the jobExecutionTopicReplyType class specifying the (optional) reply sub-topic to subscribe to Defaults to jobExecutionTopicReplyType.JOB_REQUEST_TYPE which indicates the subscription isn't intended for a jobs reply topic *jobId* - JobId of the topic if the topic type requires one. Defaults to None **Returns** Subscribe request packet id, for tracking purpose in the corresponding callback.
def Register(self, name, constructor): """Registers a new constructor in the factory. Args: name: A name associated with given constructor. constructor: A constructor function that creates instances. Raises: ValueError: If there already is a constructor associated with given name. """ precondition.AssertType(name, Text) if name in self._constructors: message = "Duplicated constructors %r and %r for name '%s'" message %= (constructor, self._constructors[name], name) raise ValueError(message) self._constructors[name] = constructor
Registers a new constructor in the factory. Args: name: A name associated with given constructor. constructor: A constructor function that creates instances. Raises: ValueError: If there already is a constructor associated with given name.
def insertValue(self, pos, configValue, displayValue=None): """ Will insert the configValue in the configValues and the displayValue in the displayValues list. If displayValue is None, the configValue is set in the displayValues as well """ self._configValues.insert(pos, configValue) self._displayValues.insert(pos, displayValue if displayValue is not None else configValue)
Will insert the configValue in the configValues and the displayValue in the displayValues list. If displayValue is None, the configValue is set in the displayValues as well
def _execute(self, sql, params): """Execute statement with reconnecting by connection closed error codes. 2006 (CR_SERVER_GONE_ERROR): MySQL server has gone away 2013 (CR_SERVER_LOST): Lost connection to MySQL server during query 2055 (CR_SERVER_LOST_EXTENDED): Lost connection to MySQL server at '%s', system error: %d """ try: return self._execute_unsafe(sql, params) except MySQLdb.OperationalError as ex: if ex.args[0] in (2006, 2013, 2055): self._log("Connection with server is lost. Trying to reconnect.") self.connect() return self._execute_unsafe(sql, params) raise
Execute statement with reconnecting by connection closed error codes. 2006 (CR_SERVER_GONE_ERROR): MySQL server has gone away 2013 (CR_SERVER_LOST): Lost connection to MySQL server during query 2055 (CR_SERVER_LOST_EXTENDED): Lost connection to MySQL server at '%s', system error: %d
def find_field_by_name(browser, field_type, name): """ Locate the control input with the given ``name``. :param browser: ``world.browser`` :param string field_type: a field type (i.e. `button`) :param string name: ``name`` attribute Returns: an :class:`ElementSelector` """ return ElementSelector( browser, field_xpath(field_type, 'name') % string_literal(name), filter_displayed=True, )
Locate the control input with the given ``name``. :param browser: ``world.browser`` :param string field_type: a field type (i.e. `button`) :param string name: ``name`` attribute Returns: an :class:`ElementSelector`
def table(self, name, database=None, schema=None): """Create a table expression that references a particular a table called `name` in a MySQL database called `database`. Parameters ---------- name : str The name of the table to retrieve. database : str, optional The database in which the table referred to by `name` resides. If ``None`` then the ``current_database`` is used. schema : str, optional The schema in which the table resides. If ``None`` then the `public` schema is assumed. Returns ------- table : TableExpr A table expression. """ if database is not None and database != self.current_database: return self.database(name=database).table(name=name, schema=schema) else: alch_table = self._get_sqla_table(name, schema=schema) node = self.table_class(alch_table, self, self._schemas.get(name)) return self.table_expr_class(node)
Create a table expression that references a particular a table called `name` in a MySQL database called `database`. Parameters ---------- name : str The name of the table to retrieve. database : str, optional The database in which the table referred to by `name` resides. If ``None`` then the ``current_database`` is used. schema : str, optional The schema in which the table resides. If ``None`` then the `public` schema is assumed. Returns ------- table : TableExpr A table expression.
def build(self): """Builds this controlled gate. :return: The controlled gate, defined by this object. :rtype: Program """ self.defined_gates = set(STANDARD_GATE_NAMES) prog = self._recursive_builder(self.operation, self.gate_name, self.control_qubits, self.target_qubit) return prog
Builds this controlled gate. :return: The controlled gate, defined by this object. :rtype: Program
def item_count(self, request, variant_id=None): """ Get quantity of a single item in the basket """ bid = utils.basket_id(request) item = ProductVariant.objects.get(id=variant_id) try: count = BasketItem.objects.get(basket_id=bid, variant=item).quantity except BasketItem.DoesNotExist: count = 0 return Response(data={"quantity": count}, status=status.HTTP_200_OK)
Get quantity of a single item in the basket
async def download(self, resource_url): ''' Download given Resource URL by finding path through graph and applying each step ''' resolver_path = self.find_path_from_url(resource_url) await self.apply_resolver_path(resource_url, resolver_path)
Download given Resource URL by finding path through graph and applying each step
def build_bam_tags(): '''builds the list of BAM tags to be added to output BAMs''' #pylint: disable=unused-argument def _combine_filters(fam, paired_align, align): filters = [x.filter_value for x in [fam, align] if x and x.filter_value] if filters: return ";".join(filters).replace('; ', ';') return None boolean_tag_value = {True:1} tags = [ BamTag("X0", "Z", ("filter (why the alignment was excluded)"), _combine_filters), BamTag("X1", "Z", ("leftmost~rightmost matched pair positions"), lambda fam, pair, align: pair.positions('{left}~{right}')), BamTag("X2", "Z", ("L~R CIGARs"), lambda fam, pair, align: pair.cigars('{left}~{right}')), BamTag("X3", "i", "unique identifier for this alignment family", lambda fam, pair, align: fam.umi_sequence), BamTag("X4", "Z", ("L~R UMT barcodes for this alignment family; because " "of fuzzy matching the family UMT may be distinct " "from the UMT of the original alignment"), lambda fam, pair, align: fam.umt('{left}~{right}')), BamTag("X5", "i", "family size (number of align pairs in this family)", lambda fam, pair, align: fam.included_pair_count), BamTag("X6", "i", ("presence of this tag signals that this alignment " "would be the template for the consensus alignment"), lambda fam, pair, align: boolean_tag_value.get(fam.is_consensus_template(align), None))] return tags
builds the list of BAM tags to be added to output BAMs
def validate(self): """ Error check the attributes of the ActivateRequestPayload object. """ if self.unique_identifier is not None: if not isinstance(self.unique_identifier, attributes.UniqueIdentifier): msg = "invalid unique identifier" raise TypeError(msg) if self.compromise_occurrence_date is not None: if not isinstance(self.compromise_occurrence_date, primitives.DateTime): msg = "invalid compromise time" raise TypeError(msg) if not isinstance(self.revocation_reason, objects.RevocationReason): msg = "invalid revocation reason" raise TypeError(msg)
Error check the attributes of the ActivateRequestPayload object.
def unicode_to_hex(unicode_string): """ Return a string containing the Unicode hexadecimal codepoint of each Unicode character in the given Unicode string. Return ``None`` if ``unicode_string`` is ``None``. Example:: a => U+0061 ab => U+0061 U+0062 :param str unicode_string: the Unicode string to convert :rtype: (Unicode) str """ if unicode_string is None: return None acc = [] for c in unicode_string: s = hex(ord(c)).replace("0x", "").upper() acc.append("U+" + ("0" * (4 - len(s))) + s) return u" ".join(acc)
Return a string containing the Unicode hexadecimal codepoint of each Unicode character in the given Unicode string. Return ``None`` if ``unicode_string`` is ``None``. Example:: a => U+0061 ab => U+0061 U+0062 :param str unicode_string: the Unicode string to convert :rtype: (Unicode) str
def _encode_observations(self, observations): """Encodes observations as PNG.""" return [ Observation( self._session.obj.run( self._encoded_image_t.obj, feed_dict={self._decoded_image_p.obj: observation} ), self._decode_png ) for observation in observations ]
Encodes observations as PNG.
def retweet(self, id): """ Retweet a tweet. :param id: ID of the tweet in question :return: True if success, False otherwise """ try: self._client.retweet(id=id) return True except TweepError as e: if e.api_code == TWITTER_PAGE_DOES_NOT_EXISTS_ERROR: return False raise
Retweet a tweet. :param id: ID of the tweet in question :return: True if success, False otherwise
def bytes_to_number(b, endian='big'): """ Convert a string to an integer. :param b: String or bytearray to convert. :param endian: Byte order to convert into ('big' or 'little' endian-ness, default 'big') Assumes bytes are 8 bits. This is a special-case version of string_to_number with a full base-256 ASCII alphabet. It is the reverse of ``number_to_bytes(n)``. Examples:: >>> bytes_to_number(b'*') 42 >>> bytes_to_number(b'\\xff') 255 >>> bytes_to_number(b'\\x01\\x00') 256 >>> bytes_to_number(b'\\x00\\x01', endian='little') 256 """ if endian == 'big': b = reversed(b) n = 0 for i, ch in enumerate(bytearray(b)): n ^= ch << i * 8 return n
Convert a string to an integer. :param b: String or bytearray to convert. :param endian: Byte order to convert into ('big' or 'little' endian-ness, default 'big') Assumes bytes are 8 bits. This is a special-case version of string_to_number with a full base-256 ASCII alphabet. It is the reverse of ``number_to_bytes(n)``. Examples:: >>> bytes_to_number(b'*') 42 >>> bytes_to_number(b'\\xff') 255 >>> bytes_to_number(b'\\x01\\x00') 256 >>> bytes_to_number(b'\\x00\\x01', endian='little') 256
def http_post(self, path, query_data={}, post_data={}, files=None, **kwargs): """Make a POST request to the Gitlab server. Args: path (str): Path or full URL to query ('/projects' or 'http://whatever/v4/api/projecs') query_data (dict): Data to send as query parameters post_data (dict): Data to send in the body (will be converted to json) files (dict): The files to send to the server **kwargs: Extra options to send to the server (e.g. sudo) Returns: The parsed json returned by the server if json is return, else the raw content Raises: GitlabHttpError: When the return code is not 2xx GitlabParsingError: If the json data could not be parsed """ result = self.http_request('post', path, query_data=query_data, post_data=post_data, files=files, **kwargs) try: if result.headers.get('Content-Type', None) == 'application/json': return result.json() except Exception: raise GitlabParsingError( error_message="Failed to parse the server message") return result
Make a POST request to the Gitlab server. Args: path (str): Path or full URL to query ('/projects' or 'http://whatever/v4/api/projecs') query_data (dict): Data to send as query parameters post_data (dict): Data to send in the body (will be converted to json) files (dict): The files to send to the server **kwargs: Extra options to send to the server (e.g. sudo) Returns: The parsed json returned by the server if json is return, else the raw content Raises: GitlabHttpError: When the return code is not 2xx GitlabParsingError: If the json data could not be parsed
def main(): """Used for development and testing.""" expr_list = [ "max(-_.千幸福的笑脸{घोड़ा=馬, " "dn2=dv2,千幸福的笑脸घ=千幸福的笑脸घ}) gte 100 " "times 3 && " "(min(ເຮືອນ{dn3=dv3,家=дом}) < 10 or sum(biz{dn5=dv5}) >99 and " "count(fizzle) lt 0or count(baz) > 1)".decode('utf8'), "max(foo{hostname=mini-mon,千=千}, 120) > 100 and (max(bar)>100 " " or max(biz)>100)".decode('utf8'), "max(foo)>=100", "test_metric{this=that, that = this} < 1", "max ( 3test_metric5 { this = that }) lt 5 times 3", "3test_metric5 lt 3", "ntp.offset > 1 or ntp.offset < -5", "max(3test_metric5{it's this=that's it}) lt 5 times 3", "count(log.error{test=1}, deterministic) > 1.0", "count(log.error{test=1}, deterministic, 120) > 1.0", "last(test_metric{hold=here}) < 13", "count(log.error{test=1}, deterministic, 130) > 1.0", "count(log.error{test=1}, deterministic) > 1.0 times 0", ] for expr in expr_list: print('orig expr: {}'.format(expr.encode('utf8'))) sub_exprs = [] try: alarm_expr_parser = AlarmExprParser(expr) sub_exprs = alarm_expr_parser.sub_expr_list except Exception as ex: print("Parse failed: {}".format(ex)) for sub_expr in sub_exprs: print('sub expr: {}'.format( sub_expr.fmtd_sub_expr_str.encode('utf8'))) print('sub_expr dimensions: {}'.format( sub_expr.dimensions_str.encode('utf8'))) print('sub_expr deterministic: {}'.format( sub_expr.deterministic)) print('sub_expr period: {}'.format( sub_expr.period)) print("") print("")
Used for development and testing.
def applicationpolicy(arg=None): """ Decorator for application policy method. Allows policy to be built up from methods registered for different event classes. """ def _mutator(func): wrapped = singledispatch(func) @wraps(wrapped) def wrapper(*args, **kwargs): event = kwargs.get('event') or args[-1] return wrapped.dispatch(type(event))(*args, **kwargs) wrapper.register = wrapped.register return wrapper assert isfunction(arg), arg return _mutator(arg)
Decorator for application policy method. Allows policy to be built up from methods registered for different event classes.
def _find_by_android(self, browser, criteria, tag, constraints): """Find element matches by UI Automator.""" return self._filter_elements( browser.find_elements_by_android_uiautomator(criteria), tag, constraints)
Find element matches by UI Automator.
def selectlastrow(self, window_name, object_name): """ Select last row @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer """ object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) cell = object_handle.AXRows[-1] if not cell.AXSelected: object_handle.activate() cell.AXSelected = True else: # Selected pass return 1
Select last row @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer
async def get_creds(self, proof_req_json: str, filt: dict = None, filt_dflt_incl: bool = False) -> (Set[str], str): """ Get credentials from HolderProver wallet corresponding to proof request and filter criteria; return credential identifiers from wallet and credentials json. Return empty set and empty production for no such credentials. :param proof_req_json: proof request json as Verifier creates; has entries for proof request's nonce, name, and version; plus credential's requested attributes, requested predicates. I.e., :: { 'nonce': string, # indy-sdk makes no semantic specification on this value 'name': string, # indy-sdk makes no semantic specification on this value 'version': numeric-string, # indy-sdk makes no semantic specification on this value 'requested_attributes': { '<attr_uuid>': { # aka attr_referent, a proof-request local identifier 'name': string, # attribute name (matches case- and space-insensitively) 'restrictions' [ # optional { "schema_id": string, # optional "schema_issuer_did": string, # optional "schema_name": string, # optional "schema_version": string, # optional "issuer_did": string, # optional "cred_def_id": string # optional }, { ... # if more than one restriction given, combined disjunctively (i.e., via OR) } ], 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet 'from': int, # optional, epoch seconds 'to': int # optional, epoch seconds } }, ... }, 'requested_predicates': { '<pred_uuid>': { # aka predicate_referent, a proof-request local predicate identifier 'name': string, # attribute name (matches case- and space-insensitively) 'p_type': '>=', 'p_value': int, # predicate value 'restrictions': [ # optional { "schema_id": string, # optional "schema_issuer_did": string, # optional "schema_name": string, # optional "schema_version": string, # optional "issuer_did": string, # optional "cred_def_id": string # optional }, { ... # if more than one restriction given, combined disjunctively (i.e., via OR) } ], 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet 'from': int, # optional, epoch seconds 'to': int # optional, epoch seconds } }, ... }, 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet 'from': Optional<int>, 'to': Optional<int> } } :param filt: filter for matching attribute-value pairs and predicates; dict mapping each cred def id to dict (specify empty dict or none for no filter, matching all) mapping attributes to values to match or compare. E.g., :: { 'Vx4E82R17q...:3:CL:16:0': { 'attr-match': { 'name': 'Alex', 'sex': 'M', 'favouriteDrink': None }, 'minima': { # if both attr-match and minima present, combined conjunctively (i.e., via AND) 'favouriteNumber' : 10, 'score': '100' # nicety: implementation converts to int for caller }, }, 'R17v42T4pk...:3:CL:19:0': { 'attr-match': { 'height': 175, 'birthdate': '1975-11-15' # combined conjunctively (i.e., via AND) } }, 'Z9ccax812j...:3:CL:27:0': { 'attr-match': {} # match all attributes on this cred def } ... } :param filt_dflt_incl: whether to include (True) all credentials from wallet that filter does not identify by cred def, or to exclude (False) all such credentials :return: tuple with (set of referents, creds json for input proof request); empty set and empty production for no such credential """ LOGGER.debug('HolderProver.get_creds >>> proof_req_json: %s, filt: %s', proof_req_json, filt) if filt is None: filt = {} rv = None creds_json = await anoncreds.prover_get_credentials_for_proof_req(self.wallet.handle, proof_req_json) creds = json.loads(creds_json) cred_ids = set() if filt: for cd_id in filt: try: json.loads(await self.get_cred_def(cd_id)) except AbsentCredDef: LOGGER.warning('HolderProver.get_creds: ignoring filter criterion, no cred def on %s', cd_id) filt.pop(cd_id) for inner_creds in {**creds['attrs'], **creds['predicates']}.values(): for cred in inner_creds: # cred is a dict in a list of dicts cred_info = cred['cred_info'] if filt: cred_cd_id = cred_info['cred_def_id'] if cred_cd_id not in filt: if filt_dflt_incl: cred_ids.add(cred_info['referent']) continue if 'attr-match' in (filt[cred_cd_id] or {}): # maybe filt[cred_cd_id]: None if not {k: str(filt[cred_cd_id].get('attr-match', {})[k]) for k in filt[cred_cd_id].get('attr-match', {})}.items() <= cred_info['attrs'].items(): continue if 'minima' in (filt[cred_cd_id] or {}): # maybe filt[cred_cd_id]: None minima = filt[cred_cd_id].get('minima', {}) try: if any((attr not in cred_info['attrs']) or (int(cred_info['attrs'][attr]) < int(minima[attr])) for attr in minima): continue except ValueError: continue # int conversion failed - reject candidate cred_ids.add(cred_info['referent']) else: cred_ids.add(cred_info['referent']) if filt: creds = json.loads(prune_creds_json(creds, cred_ids)) rv = (cred_ids, json.dumps(creds)) LOGGER.debug('HolderProver.get_creds <<< %s', rv) return rv
Get credentials from HolderProver wallet corresponding to proof request and filter criteria; return credential identifiers from wallet and credentials json. Return empty set and empty production for no such credentials. :param proof_req_json: proof request json as Verifier creates; has entries for proof request's nonce, name, and version; plus credential's requested attributes, requested predicates. I.e., :: { 'nonce': string, # indy-sdk makes no semantic specification on this value 'name': string, # indy-sdk makes no semantic specification on this value 'version': numeric-string, # indy-sdk makes no semantic specification on this value 'requested_attributes': { '<attr_uuid>': { # aka attr_referent, a proof-request local identifier 'name': string, # attribute name (matches case- and space-insensitively) 'restrictions' [ # optional { "schema_id": string, # optional "schema_issuer_did": string, # optional "schema_name": string, # optional "schema_version": string, # optional "issuer_did": string, # optional "cred_def_id": string # optional }, { ... # if more than one restriction given, combined disjunctively (i.e., via OR) } ], 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet 'from': int, # optional, epoch seconds 'to': int # optional, epoch seconds } }, ... }, 'requested_predicates': { '<pred_uuid>': { # aka predicate_referent, a proof-request local predicate identifier 'name': string, # attribute name (matches case- and space-insensitively) 'p_type': '>=', 'p_value': int, # predicate value 'restrictions': [ # optional { "schema_id": string, # optional "schema_issuer_did": string, # optional "schema_name": string, # optional "schema_version": string, # optional "issuer_did": string, # optional "cred_def_id": string # optional }, { ... # if more than one restriction given, combined disjunctively (i.e., via OR) } ], 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet 'from': int, # optional, epoch seconds 'to': int # optional, epoch seconds } }, ... }, 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet 'from': Optional<int>, 'to': Optional<int> } } :param filt: filter for matching attribute-value pairs and predicates; dict mapping each cred def id to dict (specify empty dict or none for no filter, matching all) mapping attributes to values to match or compare. E.g., :: { 'Vx4E82R17q...:3:CL:16:0': { 'attr-match': { 'name': 'Alex', 'sex': 'M', 'favouriteDrink': None }, 'minima': { # if both attr-match and minima present, combined conjunctively (i.e., via AND) 'favouriteNumber' : 10, 'score': '100' # nicety: implementation converts to int for caller }, }, 'R17v42T4pk...:3:CL:19:0': { 'attr-match': { 'height': 175, 'birthdate': '1975-11-15' # combined conjunctively (i.e., via AND) } }, 'Z9ccax812j...:3:CL:27:0': { 'attr-match': {} # match all attributes on this cred def } ... } :param filt_dflt_incl: whether to include (True) all credentials from wallet that filter does not identify by cred def, or to exclude (False) all such credentials :return: tuple with (set of referents, creds json for input proof request); empty set and empty production for no such credential
def get_reference_templates(self, ref_types): """Return the reference templates for the types as an ordered dictionary.""" return OrderedDict([(x, self.get_reference_template(x)) for x in ref_types])
Return the reference templates for the types as an ordered dictionary.
def putscript(self, name, content): """Upload a script to the server See MANAGESIEVE specifications, section 2.6 :param name: script's name :param content: script's content :rtype: boolean """ content = tools.to_bytes(content) content = tools.to_bytes("{%d+}" % len(content)) + CRLF + content code, data = ( self.__send_command("PUTSCRIPT", [name.encode("utf-8"), content])) if code == "OK": return True return False
Upload a script to the server See MANAGESIEVE specifications, section 2.6 :param name: script's name :param content: script's content :rtype: boolean
def svm_version_path(version): """ Path to specified spark version. Accepts semantic version numbering. :param version: Spark version as String :return: String. """ return os.path.join(Spark.HOME_DIR, Spark.SVM_DIR, 'v{}'.format(version))
Path to specified spark version. Accepts semantic version numbering. :param version: Spark version as String :return: String.
def insert(self, cache_key, paths, overwrite=False): """Cache the output of a build. By default, checks cache.has(key) first, only proceeding to create and insert an artifact if it is not already in the cache (though `overwrite` can be used to skip the check and unconditionally insert). :param CacheKey cache_key: A CacheKey object. :param list<str> paths: List of absolute paths to generated dirs/files. These must be under the artifact_root. :param bool overwrite: Skip check for existing, insert even if already in cache. """ missing_files = [f for f in paths if not os.path.exists(f)] if missing_files: raise ArtifactCacheError('Tried to cache nonexistent files {0}'.format(missing_files)) if not overwrite: if self.has(cache_key): logger.debug('Skipping insert of existing artifact: {0}'.format(cache_key)) return False try: self.try_insert(cache_key, paths) return True except NonfatalArtifactCacheError as e: logger.error('Error while writing to artifact cache: {0}'.format(e)) return False
Cache the output of a build. By default, checks cache.has(key) first, only proceeding to create and insert an artifact if it is not already in the cache (though `overwrite` can be used to skip the check and unconditionally insert). :param CacheKey cache_key: A CacheKey object. :param list<str> paths: List of absolute paths to generated dirs/files. These must be under the artifact_root. :param bool overwrite: Skip check for existing, insert even if already in cache.
def create_build_configuration_set_raw(**kwargs): """ Create a new BuildConfigurationSet. """ config_set = _create_build_config_set_object(**kwargs) response = utils.checked_api_call(pnc_api.build_group_configs, 'create_new', body=config_set) if response: return response.content
Create a new BuildConfigurationSet.
def set_scanner (type, scanner): """ Sets a scanner class that will be used for this 'type'. """ if __debug__: from .scanner import Scanner assert isinstance(type, basestring) assert issubclass(scanner, Scanner) validate (type) __types [type]['scanner'] = scanner
Sets a scanner class that will be used for this 'type'.
def missing_intervals(startdate, enddate, start, end, dateconverter=None, parseinterval=None, intervals=None): '''Given a ``startdate`` and an ``enddate`` dates, evaluate the date intervals from which data is not available. It return a list of two-dimensional tuples containing start and end date for the interval. The list could countain 0,1 or 2 tuples.''' parseinterval = parseinterval or default_parse_interval dateconverter = dateconverter or todate startdate = dateconverter(parseinterval(startdate, 0)) enddate = max(startdate, dateconverter(parseinterval(enddate, 0))) if intervals is not None and not isinstance(intervals, Intervals): intervals = Intervals(intervals) calc_intervals = Intervals() # we have some history already if start: # the startdate not available if startdate < start: calc_start = startdate calc_end = parseinterval(start, -1) if calc_end >= calc_start: calc_intervals.append(Interval(calc_start, calc_end)) if enddate > end: calc_start = parseinterval(end, 1) calc_end = enddate if calc_end >= calc_start: calc_intervals.append(Interval(calc_start, calc_end)) else: start = startdate end = enddate calc_intervals.append(Interval(startdate, enddate)) if calc_intervals: if intervals: calc_intervals.extend(intervals) elif intervals: calc_intervals = intervals return calc_intervals
Given a ``startdate`` and an ``enddate`` dates, evaluate the date intervals from which data is not available. It return a list of two-dimensional tuples containing start and end date for the interval. The list could countain 0,1 or 2 tuples.
def showMetadata(dat): """ Display the metadata specified LiPD in pretty print | Example | showMetadata(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict dat: Metadata :return none: """ _tmp = rm_values_fields(copy.deepcopy(dat)) print(json.dumps(_tmp, indent=2)) return
Display the metadata specified LiPD in pretty print | Example | showMetadata(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict dat: Metadata :return none:
def _latex_format(obj: Any) -> str: """Format an object as a latex string.""" if isinstance(obj, float): try: return sympy.latex(symbolize(obj)) except ValueError: return "{0:.4g}".format(obj) return str(obj)
Format an object as a latex string.
def _get_required_fn(fn, root_path): """ Definition of the MD5 file requires, that all paths will be absolute for the package directory, not for the filesystem. This function converts filesystem-absolute paths to package-absolute paths. Args: fn (str): Local/absolute path to the file. root_path (str): Local/absolute path to the package directory. Returns: str: Package-absolute path to the file. Raises: ValueError: When `fn` is absolute and `root_path` relative or \ conversely. """ if not fn.startswith(root_path): raise ValueError("Both paths have to be absolute or local!") replacer = "/" if root_path.endswith("/") else "" return fn.replace(root_path, replacer, 1)
Definition of the MD5 file requires, that all paths will be absolute for the package directory, not for the filesystem. This function converts filesystem-absolute paths to package-absolute paths. Args: fn (str): Local/absolute path to the file. root_path (str): Local/absolute path to the package directory. Returns: str: Package-absolute path to the file. Raises: ValueError: When `fn` is absolute and `root_path` relative or \ conversely.
def write(self, data, auto_flush=True): """ <Purpose> Writes a data string to the file. <Arguments> data: A string containing some data. auto_flush: Boolean argument, if set to 'True', all data will be flushed from internal buffer. <Exceptions> None. <Return> None. """ self.temporary_file.write(data) if auto_flush: self.flush()
<Purpose> Writes a data string to the file. <Arguments> data: A string containing some data. auto_flush: Boolean argument, if set to 'True', all data will be flushed from internal buffer. <Exceptions> None. <Return> None.
def upload(self, *args, **kwargs): """Runs command on every job in the run.""" for job in self.jobs: job.upload(*args, **kwargs)
Runs command on every job in the run.
def load_plugins(self): """ Tells core to take plugin options and instantiate plugin classes """ logger.info("Loading plugins...") for (plugin_name, plugin_path, plugin_cfg) in self.config.plugins: logger.debug("Loading plugin %s from %s", plugin_name, plugin_path) if plugin_path == "yandextank.plugins.Overload": logger.warning( "Deprecated plugin name: 'yandextank.plugins.Overload'\n" "There is a new generic plugin now.\n" "Correcting to 'yandextank.plugins.DataUploader overload'") plugin_path = "yandextank.plugins.DataUploader overload" try: plugin = il.import_module(plugin_path) except ImportError: logger.warning('Plugin name %s path %s import error', plugin_name, plugin_path) logger.debug('Plugin name %s path %s import error', plugin_name, plugin_path, exc_info=True) raise try: instance = getattr(plugin, 'Plugin')(self, cfg=plugin_cfg, name=plugin_name) except AttributeError: logger.warning('Plugin %s classname should be `Plugin`', plugin_name) raise else: self.register_plugin(self.PLUGIN_PREFIX + plugin_name, instance) logger.debug("Plugin instances: %s", self._plugins)
Tells core to take plugin options and instantiate plugin classes
def tag(self, text): """Retrieves list of regex_matches in text. Parameters ---------- text: Text The estnltk text object to search for events. Returns ------- list of matches """ matches = self._match(text.text) matches = self._resolve_conflicts(matches) if self.return_layer: return matches else: text[self.layer_name] = matches
Retrieves list of regex_matches in text. Parameters ---------- text: Text The estnltk text object to search for events. Returns ------- list of matches
def parse_arguments(filters, arguments, modern=False): """ Return a dict of parameters. Take a list of filters and for each try to get the corresponding value in arguments or a default value. Then check that value's type. The @modern parameter indicates how the arguments should be interpreted. The old way is that you always specify a list and in the list you write the names of types as strings. I.e. instad of `str` you write `'str'`. The modern way allows you to specify arguments by real Python types and entering it as a list means you accept and expect it to be a list. For example, using the modern way: filters = [ ("param1", "default", [str]), ("param2", None, int), ("param3", ["list", "of", 4, "values"], [str]) ] arguments = { "param1": "value1", "unknown": 12345 } => { "param1": ["value1"], "param2": 0, "param3": ["list", "of", "4", "values"] } And an example for the old way: filters = [ ("param1", "default", ["list", "str"]), ("param2", None, "int"), ("param3", ["list", "of", 4, "values"], ["list", "str"]) ] arguments = { "param1": "value1", "unknown": 12345 } => { "param1": ["value1"], "param2": 0, "param3": ["list", "of", "4", "values"] } The reason for having the modern and the non-modern way is transition of legacy code. One day it will all be the modern way. """ params = DotDict() for i in filters: count = len(i) param = None if count <= 1: param = arguments.get(i[0]) else: param = arguments.get(i[0], i[1]) # proceed and do the type checking if count >= 3: types = i[2] if modern: if isinstance(types, list) and param is not None: assert len(types) == 1 if not isinstance(param, list): param = [param] param = [check_type(x, types[0]) for x in param] else: param = check_type(param, types) else: if not isinstance(types, list): types = [types] for t in reversed(types): if t == "list" and not isinstance(param, list): if param is None or param == '': param = [] else: param = [param] elif t == "list" and isinstance(param, list): continue elif isinstance(param, list) and "list" not in types: param = " ".join(param) param = check_type(param, t) elif isinstance(param, list): param = [check_type(x, t) for x in param] else: param = check_type(param, t) params[i[0]] = param return params
Return a dict of parameters. Take a list of filters and for each try to get the corresponding value in arguments or a default value. Then check that value's type. The @modern parameter indicates how the arguments should be interpreted. The old way is that you always specify a list and in the list you write the names of types as strings. I.e. instad of `str` you write `'str'`. The modern way allows you to specify arguments by real Python types and entering it as a list means you accept and expect it to be a list. For example, using the modern way: filters = [ ("param1", "default", [str]), ("param2", None, int), ("param3", ["list", "of", 4, "values"], [str]) ] arguments = { "param1": "value1", "unknown": 12345 } => { "param1": ["value1"], "param2": 0, "param3": ["list", "of", "4", "values"] } And an example for the old way: filters = [ ("param1", "default", ["list", "str"]), ("param2", None, "int"), ("param3", ["list", "of", 4, "values"], ["list", "str"]) ] arguments = { "param1": "value1", "unknown": 12345 } => { "param1": ["value1"], "param2": 0, "param3": ["list", "of", "4", "values"] } The reason for having the modern and the non-modern way is transition of legacy code. One day it will all be the modern way.
def _show_doc(cls, fmt_func, keys=None, indent=0, grouped=False, func=None, include_links=False, *args, **kwargs): """ Classmethod to print the formatoptions and their documentation This function is the basis for the :meth:`show_summaries` and :meth:`show_docs` methods Parameters ---------- fmt_func: function A function that takes the key, the key as it is printed, and the documentation of a formatoption as argument and returns what shall be printed %(Plotter.show_keys.parameters)s Other Parameters ---------------- %(Plotter.show_keys.other_parameters)s Returns ------- %(Plotter.show_keys.returns)s See Also -------- show_summaries, show_docs""" def titled_group(groupname): bars = str_indent + '*' * len(groupname) + '\n' return bars + str_indent + groupname + '\n' + bars func = func or default_print_func keys = cls._enhance_keys(keys, *args, **kwargs) str_indent = " " * indent if grouped: grouped_keys = DefaultOrderedDict(list) for fmto in map(lambda key: getattr(cls, key), keys): grouped_keys[fmto.groupname].append(fmto.key) text = "\n\n".join( titled_group(group) + cls._show_doc( fmt_func, keys, indent=indent, grouped=False, func=str, include_links=include_links) for group, keys in six.iteritems(grouped_keys)) return func(text.rstrip()) if include_links or (include_links is None and cls.include_links): long_keys = list(map(lambda key: ':attr:`~%s.%s.%s`' % ( cls.__module__, cls.__name__, key), keys)) else: long_keys = keys text = '\n'.join(str_indent + long_key + '\n' + fmt_func( key, long_key, getattr(cls, key).__doc__) for long_key, key in zip( long_keys, keys)) return func(text)
Classmethod to print the formatoptions and their documentation This function is the basis for the :meth:`show_summaries` and :meth:`show_docs` methods Parameters ---------- fmt_func: function A function that takes the key, the key as it is printed, and the documentation of a formatoption as argument and returns what shall be printed %(Plotter.show_keys.parameters)s Other Parameters ---------------- %(Plotter.show_keys.other_parameters)s Returns ------- %(Plotter.show_keys.returns)s See Also -------- show_summaries, show_docs
def flatten(struct): """ Creates a flat list of all all items in structured output (dicts, lists, items): .. code-block:: python >>> sorted(flatten({'a': 'foo', 'b': 'bar'})) ['bar', 'foo'] >>> sorted(flatten(['foo', ['bar', 'troll']])) ['bar', 'foo', 'troll'] >>> flatten('foo') ['foo'] >>> flatten(42) [42] """ if struct is None: return [] flat = [] if isinstance(struct, dict): for _, result in six.iteritems(struct): flat += flatten(result) return flat if isinstance(struct, six.string_types): return [struct] try: # if iterable iterator = iter(struct) except TypeError: return [struct] for result in iterator: flat += flatten(result) return flat
Creates a flat list of all all items in structured output (dicts, lists, items): .. code-block:: python >>> sorted(flatten({'a': 'foo', 'b': 'bar'})) ['bar', 'foo'] >>> sorted(flatten(['foo', ['bar', 'troll']])) ['bar', 'foo', 'troll'] >>> flatten('foo') ['foo'] >>> flatten(42) [42]
def from_opcode(cls, opcode, arg=_no_arg): """ Create an instruction from an opcode and raw argument. Parameters ---------- opcode : int Opcode for the instruction to create. arg : int, optional The argument for the instruction. Returns ------- intsr : Instruction An instance of the instruction named by ``opcode``. """ return type(cls)(opname[opcode], (cls,), {}, opcode=opcode)(arg)
Create an instruction from an opcode and raw argument. Parameters ---------- opcode : int Opcode for the instruction to create. arg : int, optional The argument for the instruction. Returns ------- intsr : Instruction An instance of the instruction named by ``opcode``.
def toVerticalPotential(Pot,R,phi=None): """ NAME: toVerticalPotential PURPOSE: convert a Potential to a vertical potential at a given R INPUT: Pot - Potential instance or list of such instances R - Galactocentric radius at which to evaluate the vertical potential (can be Quantity) phi= (None) Galactocentric azimuth at which to evaluate the vertical potential (can be Quantity); required if Pot is non-axisymmetric OUTPUT: (list of) linearPotential instance(s) HISTORY: 2018-10-07 - Written - Bovy (UofT) """ Pot= flatten(Pot) if _APY_LOADED: if isinstance(R,units.Quantity): if hasattr(Pot,'_ro'): R= R.to(units.kpc).value/Pot._ro else: R= R.to(units.kpc).value/Pot[0]._ro if isinstance(phi,units.Quantity): phi= phi.to(units.rad).value if isinstance(Pot,list): out= [] for pot in Pot: if isinstance(pot,linearPotential): out.append(pot) elif isinstance(pot,Potential): out.append(verticalPotential(pot,R,phi=phi)) elif isinstance(pot,planarPotential): raise PotentialError("Input to 'toVerticalPotential' cannot be a planarPotential") else: raise PotentialError("Input to 'toVerticalPotential' is neither an RZPotential-instance or a list of such instances") return out elif isinstance(Pot,Potential): return verticalPotential(Pot,R,phi=phi) elif isinstance(Pot,linearPotential): return Pot elif isinstance(Pot,planarPotential): raise PotentialError("Input to 'toVerticalPotential' cannot be a planarPotential") else: raise PotentialError("Input to 'toVerticalPotential' is neither an Potential-instance or a list of such instances")
NAME: toVerticalPotential PURPOSE: convert a Potential to a vertical potential at a given R INPUT: Pot - Potential instance or list of such instances R - Galactocentric radius at which to evaluate the vertical potential (can be Quantity) phi= (None) Galactocentric azimuth at which to evaluate the vertical potential (can be Quantity); required if Pot is non-axisymmetric OUTPUT: (list of) linearPotential instance(s) HISTORY: 2018-10-07 - Written - Bovy (UofT)
def last_commit(): """Returns the SHA1 of the last commit.""" try: root = subprocess.check_output( ['hg', 'parent', '--template={node}'], stderr=subprocess.STDOUT).strip() # Convert to unicode first return root.decode('utf-8') except subprocess.CalledProcessError: return None
Returns the SHA1 of the last commit.
def hil_controls_send(self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode, force_mavlink1=False): ''' Sent from autopilot to simulation. Hardware in the loop control outputs time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) roll_ailerons : Control output -1 .. 1 (float) pitch_elevator : Control output -1 .. 1 (float) yaw_rudder : Control output -1 .. 1 (float) throttle : Throttle 0 .. 1 (float) aux1 : Aux 1, -1 .. 1 (float) aux2 : Aux 2, -1 .. 1 (float) aux3 : Aux 3, -1 .. 1 (float) aux4 : Aux 4, -1 .. 1 (float) mode : System mode (MAV_MODE) (uint8_t) nav_mode : Navigation mode (MAV_NAV_MODE) (uint8_t) ''' return self.send(self.hil_controls_encode(time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode), force_mavlink1=force_mavlink1)
Sent from autopilot to simulation. Hardware in the loop control outputs time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) roll_ailerons : Control output -1 .. 1 (float) pitch_elevator : Control output -1 .. 1 (float) yaw_rudder : Control output -1 .. 1 (float) throttle : Throttle 0 .. 1 (float) aux1 : Aux 1, -1 .. 1 (float) aux2 : Aux 2, -1 .. 1 (float) aux3 : Aux 3, -1 .. 1 (float) aux4 : Aux 4, -1 .. 1 (float) mode : System mode (MAV_MODE) (uint8_t) nav_mode : Navigation mode (MAV_NAV_MODE) (uint8_t)
def _future_completed(future): """ Helper for run_in_executor() """ exc = future.exception() if exc: log.debug("Failed to run task on executor", exc_info=exc)
Helper for run_in_executor()
def write_mates(self): '''Scan the current chromosome for matches to any of the reads stored in the read1s buffer''' if self.chrom is not None: U.debug("Dumping %i mates for contig %s" % ( len(self.read1s), self.chrom)) for read in self.infile.fetch(reference=self.chrom, multiple_iterators=True): if any((read.is_unmapped, read.mate_is_unmapped, read.is_read1)): continue key = read.query_name, read.reference_name, read.reference_start if key in self.read1s: self.outfile.write(read) self.read1s.remove(key) U.debug("%i mates remaining" % len(self.read1s))
Scan the current chromosome for matches to any of the reads stored in the read1s buffer
def _add_redundancy_router_interfaces(self, context, router, itfc_info, new_port, redundancy_router_ids=None, ha_settings_db=None, create_ha_group=True): """To be called in add_router_interface() AFTER interface has been added to router in DB. """ # There are essentially three cases where we add interface to a # redundancy router: # 1. HA is enabled on a user visible router that has one or more # interfaces. # 2. Redundancy level is increased so one or more redundancy routers # are added. # 3. An interface is added to a user visible router. # # For 1: An HA GROUP MUST BE CREATED and EXTRA PORTS MUST BE CREATED # for each redundancy router. The id of extra port should be # specified in the interface_info argument of the # add_router_interface call so that we ADD BY PORT. # For 2: HA group need NOT be created as it will already exist (since # there is already at least on redundancy router). EXTRA PORTS # MUST BE CREATED for each added redundancy router. The id # of extra port should be specified in the interface_info # argument of the add_router_interface call so that we ADD BY # PORT. # For 3: if the interface for the user_visible_router was added by ... # a) PORT: An HA GROUP MUST BE CREATED and and EXTRA PORTS MUST BE # CREATED for each redundancy router. The id of extra port # should be specified in the interface_info argument of # the add_router_interface call so that we ADD BY PORT. # b) SUBNET: There are two cases to consider. If the added interface # of the user_visible_router has ... # b1) 1 SUBNET: An HA GROUP MUST BE CREATED and and EXTRA # PORTS MUST BE CREATED for each redundancy # router. The id of extra port should be # specified in the interface_info argument of # the add_router_interface call so we ADD BY # PORT. # b2) >1 SUBNETS: HA group need NOT be created as it will # already exist (since the redundancy routers # should already have extra ports to which the # (IPv6) subnet is added. Extra ports need # thus NOT be created. The subnet id should be # added to the existing extra ports. router_id = router['id'] if ha_settings_db is None: ha_settings_db = self._get_ha_settings_by_router_id(context, router_id) if ha_settings_db is None: return e_context = context.elevated() add_by_subnet = (itfc_info is not None and 'subnet_id' in itfc_info and len(new_port['fixed_ips']) > 1) if (add_by_subnet is False or (itfc_info is None and create_ha_group is True)): # generate ha settings and extra port for router (VIP) port self._create_ha_group(e_context, router, new_port, ha_settings_db) fixed_ips = self._get_fixed_ips_subnets(new_port['fixed_ips']) for r_id in (redundancy_router_ids or self._get_redundancy_router_ids(e_context, router_id)): if add_by_subnet is True: # need to add subnet to redundancy router port ports = self._core_plugin.get_ports( e_context, filters={'device_id': [r_id], 'network_id': [new_port['network_id']]}, fields=['fixed_ips', 'id']) redundancy_port = ports[0] fixed_ips = redundancy_port['fixed_ips'] fixed_ip = {'subnet_id': itfc_info['subnet_id']} fixed_ips.append(fixed_ip) self._core_plugin.update_port( e_context, redundancy_port['id'], {'port': {'fixed_ips': fixed_ips}}) else: redundancy_port = self._create_hidden_port( e_context, new_port['network_id'], '', fixed_ips) interface_info = {'port_id': redundancy_port['id']} self.add_router_interface(e_context, r_id, interface_info)
To be called in add_router_interface() AFTER interface has been added to router in DB.
def session_dump(self, cell, hash, fname_session): """ Dump ipython session to file :param hash: cell hash :param fname_session: output filename :return: """ logging.debug('Cell {}: Dumping session to {}'.format(hash, fname_session)) inject_code = ['import dill', 'dill.dump_session(filename="{}")'.format(fname_session), ] inject_cell = nbf.v4.new_code_cell('\n'.join(inject_code)) reply, outputs = super().run_cell(inject_cell) errors = list(filter(lambda out: out.output_type == 'error', outputs)) if len(errors): logging.info('Cell {}: Warning: serialization failed, cache disabled'.format(hash)) logging.debug( 'Cell {}: Serialization error: {}'.format(hash, CellExecutionError.from_cell_and_msg(cell, errors[0]))) # disable attempts to retrieve cache for subsequent cells self.disable_cache = True # remove partial cache for current cell os.remove(fname_session) return False return True
Dump ipython session to file :param hash: cell hash :param fname_session: output filename :return:
def meanAndStdDev(self, limit=None): """return the mean and the standard deviation optionally limited to the last limit values""" if limit is None or len(self.values) < limit: limit = len(self.values) if limit > 0: mean = sum(self.values[-limit:]) / float(limit) sumSq = 0. for v in self.values[-limit:]: sumSq += (v - mean) * (v - mean) return mean, math.sqrt(sumSq / limit) else: return None
return the mean and the standard deviation optionally limited to the last limit values
def add_cell_markdown(self, cell_str): """ Add a markdown cell :param cell_str: markdown text :return: """ logging.debug("add_cell_markdown: {}".format(cell_str)) # drop spaces and taps at beginning and end of all lines #cell = '\n'.join(map(lambda x: x.strip(), cell_str.split('\n'))) cell = '\n'.join(cell_str.split('\n')) cell = nbf.v4.new_markdown_cell(cell) self.nb['cells'].append(cell)
Add a markdown cell :param cell_str: markdown text :return:
def clear_database(engine: Connectable, schemas: Iterable[str] = ()) -> None: """ Clear any tables from an existing database. :param engine: the engine or connection to use :param schemas: full list of schema names to expect (ignored for SQLite) """ assert check_argument_types() metadatas = [] all_schemas = (None,) # type: Tuple[Optional[str], ...] all_schemas += tuple(schemas) for schema in all_schemas: # Reflect the schema to get the list of the tables, views and constraints metadata = MetaData() metadata.reflect(engine, schema=schema, views=True) metadatas.append(metadata) for metadata in metadatas: metadata.drop_all(engine, checkfirst=False)
Clear any tables from an existing database. :param engine: the engine or connection to use :param schemas: full list of schema names to expect (ignored for SQLite)
def auto_assign_decodings(self, decodings): """ :type decodings: list of Encoding """ nrz_decodings = [decoding for decoding in decodings if decoding.is_nrz or decoding.is_nrzi] fallback = nrz_decodings[0] if nrz_decodings else None candidate_decodings = [decoding for decoding in decodings if decoding not in nrz_decodings and not decoding.contains_cut] for message in self.messages: decoder_found = False for decoder in candidate_decodings: if decoder.applies_for_message(message.plain_bits): message.decoder = decoder decoder_found = True break if not decoder_found and fallback: message.decoder = fallback
:type decodings: list of Encoding
def _split_path(path): """split a path return by the api return - the sentinel: - the rest of the path as a list. - the original path stripped of / for normalisation. """ path = path.strip('/') list_path = path.split('/') sentinel = list_path.pop(0) return sentinel, list_path, path
split a path return by the api return - the sentinel: - the rest of the path as a list. - the original path stripped of / for normalisation.
def on_menu_make_MagIC_results_tables(self, event): """ Creates or Updates Specimens or Pmag Specimens MagIC table, overwrites .redo file for safety, and starts User dialog to generate other MagIC tables for later contribution to the MagIC database. The following describes the steps used in the 2.5 data format to do this: 1. read pmag_specimens.txt, pmag_samples.txt, pmag_sites.txt, and sort out lines with LP-DIR in magic_codes 2. saves a clean pmag_*.txt files without LP-DIR stuff as pmag_*.txt.tmp 3. write a new file pmag_specimens.txt 4. merge pmag_specimens.txt and pmag_specimens.txt.tmp using combine_magic.py 5. delete pmag_specimens.txt.tmp 6 (optional) extracting new pag_*.txt files (except pmag_specimens.txt) using specimens_results_magic.py 7: if #6: merge pmag_*.txt and pmag_*.txt.tmp using combine_magic.py if not #6: save pmag_*.txt.tmp as pmag_*.txt """ # --------------------------------------- # save pmag_*.txt.tmp without directional data # --------------------------------------- self.on_menu_save_interpretation(None) # --------------------------------------- # dialog box to choose coordinate systems for pmag_specimens.txt # --------------------------------------- dia = demag_dialogs.magic_pmag_specimens_table_dialog(None) CoorTypes = [] if self.test_mode: CoorTypes = ['DA-DIR'] elif dia.ShowModal() == wx.ID_OK: # Until the user clicks OK, show the message if dia.cb_spec_coor.GetValue() == True: CoorTypes.append('DA-DIR') if dia.cb_geo_coor.GetValue() == True: CoorTypes.append('DA-DIR-GEO') if dia.cb_tilt_coor.GetValue() == True: CoorTypes.append('DA-DIR-TILT') else: self.user_warning("MagIC tables not saved") print("MagIC tables not saved") return # ------------------------------ self.PmagRecsOld = {} if self.data_model == 3.0: FILES = [] else: FILES = ['pmag_specimens.txt'] for FILE in FILES: self.PmagRecsOld[FILE] = [] meas_data = [] try: meas_data, file_type = pmag.magic_read( os.path.join(self.WD, FILE)) print(("-I- Read old magic file %s\n" % os.path.join(self.WD, FILE))) # if FILE !='pmag_specimens.txt': os.remove(os.path.join(self.WD, FILE)) print(("-I- Delete old magic file %s\n" % os.path.join(self.WD, FILE))) except (OSError, IOError) as e: continue for rec in meas_data: if "magic_method_codes" in list(rec.keys()): if "LP-DIR" not in rec['magic_method_codes'] and "DE-" not in rec['magic_method_codes']: self.PmagRecsOld[FILE].append(rec) # --------------------------------------- # write a new pmag_specimens.txt # --------------------------------------- specimens_list = list(self.pmag_results_data['specimens'].keys()) specimens_list.sort() PmagSpecs = [] for specimen in specimens_list: for dirtype in CoorTypes: i = 0 for fit in self.pmag_results_data['specimens'][specimen]: mpars = fit.get(dirtype) if not mpars: mpars = self.get_PCA_parameters( specimen, fit, fit.tmin, fit.tmax, dirtype, fit.PCA_type) if not mpars or 'specimen_dec' not in list(mpars.keys()): self.user_warning("Could not calculate interpretation for specimen %s and fit %s in coordinate system %s while exporting pmag tables, skipping" % ( specimen, fit.name, dirtype)) continue PmagSpecRec = {} PmagSpecRec["magic_software_packages"] = pmag.get_version( ) + ': demag_gui' PmagSpecRec["er_specimen_name"] = specimen PmagSpecRec["er_sample_name"] = self.Data_hierarchy['sample_of_specimen'][specimen] PmagSpecRec["er_site_name"] = self.Data_hierarchy['site_of_specimen'][specimen] PmagSpecRec["er_location_name"] = self.Data_hierarchy['location_of_specimen'][specimen] if specimen in list(self.Data_hierarchy['expedition_name_of_specimen'].keys()): PmagSpecRec["er_expedition_name"] = self.Data_hierarchy['expedition_name_of_specimen'][specimen] PmagSpecRec["er_citation_names"] = "This study" if "magic_experiment_name" in self.Data[specimen]: PmagSpecRec["magic_experiment_names"] = self.Data[specimen]["magic_experiment_name"] if 'magic_instrument_codes' in list(self.Data[specimen].keys()): PmagSpecRec["magic_instrument_codes"] = self.Data[specimen]['magic_instrument_codes'] PmagSpecRec['specimen_correction'] = 'u' PmagSpecRec['specimen_direction_type'] = mpars["specimen_direction_type"] PmagSpecRec['specimen_dec'] = "%.1f" % mpars["specimen_dec"] PmagSpecRec['specimen_inc'] = "%.1f" % mpars["specimen_inc"] PmagSpecRec['specimen_flag'] = "g" if fit in self.bad_fits: PmagSpecRec['specimen_flag'] = "b" if "C" in fit.tmin or "C" in fit.tmax: PmagSpecRec['measurement_step_unit'] = "K" else: PmagSpecRec['measurement_step_unit'] = "T" if "C" in fit.tmin: PmagSpecRec['measurement_step_min'] = "%.0f" % ( mpars["measurement_step_min"]+273.) elif "mT" in fit.tmin: PmagSpecRec['measurement_step_min'] = "%8.3e" % ( mpars["measurement_step_min"]*1e-3) else: if PmagSpecRec['measurement_step_unit'] == "K": PmagSpecRec['measurement_step_min'] = "%.0f" % ( mpars["measurement_step_min"]+273.) else: PmagSpecRec['measurement_step_min'] = "%8.3e" % ( mpars["measurement_step_min"]*1e-3) if "C" in fit.tmax: PmagSpecRec['measurement_step_max'] = "%.0f" % ( mpars["measurement_step_max"]+273.) elif "mT" in fit.tmax: PmagSpecRec['measurement_step_max'] = "%8.3e" % ( mpars["measurement_step_max"]*1e-3) else: if PmagSpecRec['measurement_step_unit'] == "K": PmagSpecRec['measurement_step_min'] = "%.0f" % ( mpars["measurement_step_min"]+273.) else: PmagSpecRec['measurement_step_min'] = "%8.3e" % ( mpars["measurement_step_min"]*1e-3) PmagSpecRec['specimen_n'] = "%.0f" % mpars["specimen_n"] calculation_type = mpars['calculation_type'] PmagSpecRec["magic_method_codes"] = self.Data[specimen]['magic_method_codes'] + \ ":"+calculation_type+":"+dirtype PmagSpecRec["specimen_comp_n"] = str( len(self.pmag_results_data["specimens"][specimen])) PmagSpecRec["specimen_comp_name"] = fit.name if fit in self.bad_fits: PmagSpecRec["specimen_flag"] = "b" else: PmagSpecRec["specimen_flag"] = "g" if calculation_type in ["DE-BFL", "DE-BFL-A", "DE-BFL-O"]: PmagSpecRec['specimen_direction_type'] = 'l' PmagSpecRec['specimen_mad'] = "%.1f" % float( mpars["specimen_mad"]) PmagSpecRec['specimen_dang'] = "%.1f" % float( mpars['specimen_dang']) PmagSpecRec['specimen_alpha95'] = "" elif calculation_type in ["DE-BFP"]: PmagSpecRec['specimen_direction_type'] = 'p' PmagSpecRec['specimen_mad'] = "%.1f" % float( mpars['specimen_mad']) PmagSpecRec['specimen_dang'] = "" PmagSpecRec['specimen_alpha95'] = "" if self.data_model == 3.0: if 'bfv_dec' not in list(mpars.keys()) or \ 'bfv_inc' not in list(mpars.keys()): self.calculate_best_fit_vectors( high_level_type="sites", high_level_name=PmagSpecRec["er_site_name"], dirtype=dirtype) mpars = fit.get(dirtype) try: PmagSpecRec['dir_bfv_dec'] = "%.1f" % mpars['bfv_dec'] PmagSpecRec['dir_bfv_inc'] = "%.1f" % mpars['bfv_inc'] except KeyError: print("Error calculating BFV during export of interpretations for %s, %s, %s" % ( fit.name, specimen, dirtype)) elif calculation_type in ["DE-FM"]: PmagSpecRec['specimen_direction_type'] = 'l' PmagSpecRec['specimen_mad'] = "" PmagSpecRec['specimen_dang'] = "" PmagSpecRec['specimen_alpha95'] = "%.1f" % float( mpars['specimen_alpha95']) if dirtype == 'DA-DIR-TILT': PmagSpecRec['specimen_tilt_correction'] = "100" elif dirtype == 'DA-DIR-GEO': PmagSpecRec['specimen_tilt_correction'] = "0" else: PmagSpecRec['specimen_tilt_correction'] = "-1" PmagSpecs.append(PmagSpecRec) i += 1 # add the 'old' lines with no "LP-DIR" in if 'pmag_specimens.txt' in list(self.PmagRecsOld.keys()): for rec in self.PmagRecsOld['pmag_specimens.txt']: PmagSpecs.append(rec) PmagSpecs_fixed = self.merge_pmag_recs(PmagSpecs) if len(PmagSpecs_fixed) == 0: self.user_warning( "No data to save to MagIC tables please create some interpretations before saving") print("No data to save, MagIC tables not written") return if self.data_model == 3.0: # translate demag_gui output to 3.0 DataFrame ndf2_5 = DataFrame(PmagSpecs_fixed) if 'specimen_direction_type' in ndf2_5.columns: # doesn't exist in new model del ndf2_5['specimen_direction_type'] ndf3_0 = ndf2_5.rename(columns=map_magic.spec_magic2_2_magic3_map) if 'specimen' in ndf3_0.columns: ndf3_0 = ndf3_0.set_index("specimen") # replace the removed specimen column ndf3_0['specimen'] = ndf3_0.index # prefer keeping analyst_names in txt if 'analyst_names' in ndf3_0: del ndf3_0['analyst_names'] # get current 3.0 DataFrame from contribution object if 'specimens' not in self.con.tables: cols = ndf3_0.columns self.con.add_empty_magic_table('specimens', col_names=cols) spmdf = self.con.tables['specimens'] # remove translation collisions or deprecated terms for dc in ["dir_comp_name", "magic_method_codes"]: if dc in spmdf.df.columns: del spmdf.df[dc] # merge previous df with new interpretations DataFrame # (do not include non-directional data in the merge or else it # will be overwritten) # fix index names spmdf.df.index.name = "specimen_name" ndf3_0.index.name = "specimen_name" # pull out directional/non-directional data if 'method_codes' not in spmdf.df: spmdf.df['method_codes'] = '' directional = spmdf.df['method_codes'].str.contains('LP-DIR').astype(bool) non_directional_df = spmdf.df[~directional] spmdf.df = spmdf.df[directional] # merge new interpretations with old specimen information directional_df = spmdf.merge_dfs(ndf3_0) # add any missing columns to non_directional_df for col in directional_df.columns: if col not in non_directional_df.columns: non_directional_df[col] = "" # make sure columns are ordered the same so that we can concatenate non_directional_df.sort_index(axis='columns', inplace=True) directional_df.sort_index(axis='columns', inplace=True) # put directional/non-directional data back together merged = pd.concat([non_directional_df, directional_df]) merged.sort_index(inplace=True) spmdf.df = merged # write to disk spmdf.write_magic_file(dir_path=self.WD) TEXT = "specimens interpretations are saved in specimens.txt.\nPress OK to save to samples/sites/locations/ages tables." self.dlg = wx.MessageDialog( self, caption="Other Tables", message=TEXT, style=wx.OK | wx.CANCEL) result = self.show_dlg(self.dlg) if result == wx.ID_OK: self.dlg.Destroy() else: self.dlg.Destroy() return else: pmag.magic_write(os.path.join( self.WD, "pmag_specimens.txt"), PmagSpecs_fixed, 'pmag_specimens') print(("specimen data stored in %s\n" % os.path.join(self.WD, "pmag_specimens.txt"))) TEXT = "specimens interpretations are saved in pmag_specimens.txt.\nPress OK for pmag_samples/pmag_sites/pmag_results tables." dlg = wx.MessageDialog( self, caption="Other Pmag Tables", message=TEXT, style=wx.OK | wx.CANCEL) result = self.show_dlg(dlg) if result == wx.ID_OK: dlg.Destroy() else: dlg.Destroy() return # -------------------------------- dia = demag_dialogs.magic_pmag_tables_dialog( None, self.WD, self.Data, self.Data_info) if self.show_dlg(dia) == wx.ID_OK: # Until the user clicks OK, show the message self.On_close_MagIC_dialog(dia)
Creates or Updates Specimens or Pmag Specimens MagIC table, overwrites .redo file for safety, and starts User dialog to generate other MagIC tables for later contribution to the MagIC database. The following describes the steps used in the 2.5 data format to do this: 1. read pmag_specimens.txt, pmag_samples.txt, pmag_sites.txt, and sort out lines with LP-DIR in magic_codes 2. saves a clean pmag_*.txt files without LP-DIR stuff as pmag_*.txt.tmp 3. write a new file pmag_specimens.txt 4. merge pmag_specimens.txt and pmag_specimens.txt.tmp using combine_magic.py 5. delete pmag_specimens.txt.tmp 6 (optional) extracting new pag_*.txt files (except pmag_specimens.txt) using specimens_results_magic.py 7: if #6: merge pmag_*.txt and pmag_*.txt.tmp using combine_magic.py if not #6: save pmag_*.txt.tmp as pmag_*.txt
def train_nn_segmentation_classifier(X, y): """ Train a neural network classifier. Parameters ---------- X : numpy array A list of feature vectors y : numpy array A list of labels Returns ------- Theano expression : The trained neural network """ def build_mlp(input_var=None): n_classes = 2 # First, construct an input layer. The shape parameter defines the # expected input shape, which is just the shape of our data matrix X. l_in = lasagne.layers.InputLayer(shape=X.shape, input_var=input_var) # A dense layer implements a linear mix (xW + b) followed by a # nonlinear function. hiddens = [64, 64, 64] # sollte besser als 0.12 sein (mit [32]) layers = [l_in] for n_units in hiddens: l_hidden_1 = lasagne.layers.DenseLayer( layers[-1], # The first argument is the input to this layer num_units=n_units, # the layer's output dimensionality nonlinearity=lasagne.nonlinearities.tanh) layers.append(l_hidden_1) # For our output layer, we'll use a dense layer with a softmax # nonlinearity. l_output = lasagne.layers.DenseLayer(layers[-1], num_units=n_classes, nonlinearity=softmax) return l_output # Batch iterator, Copied directly from the Lasagne example. def iterate_minibatches(inputs, targets, batchsize, shuffle=False): """ This is just a simple helper function iterating over training data in mini-batches of a particular size, optionally in random order. It assumes data is available as numpy arrays. For big datasets, you could load numpy arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your own custom data iteration function. For small datasets, you can also copy them to GPU at once for slightly improved performance. This would involve several changes in the main program, though, and is not demonstrated here. """ assert len(inputs) == len(targets) if shuffle: indices = np.arange(len(inputs)) np.random.shuffle(indices) for start_idx in range(0, len(inputs) - batchsize + 1, batchsize): if shuffle: excerpt = indices[start_idx:start_idx + batchsize] else: excerpt = slice(start_idx, start_idx + batchsize) yield inputs[excerpt], targets[excerpt] input_var = T.matrix('inputs') target_var = T.ivector('targets') network = build_mlp(input_var) num_epochs = 7 # We reserve the last 100 training examples for validation. X_train, X_val = X[:-100], X[-100:] y_train, y_val = y[:-100], y[-100:] # Create a loss expression for training, i.e., a scalar objective we want # to minimize (for our multi-class problem, it is the cross-entropy loss): prediction = lasagne.layers.get_output(network) loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) loss = loss.mean() # We could add some weight decay as well here, see lasagne.regularization. # Create update expressions for training, i.e., how to modify the # parameters at each training step. Here, we'll use Stochastic Gradient # Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more. params = lasagne.layers.get_all_params(network, trainable=True) updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.01, momentum=0.9) # Create a loss expression for validation/testing. The crucial difference # here is that we do a deterministic forward pass through the network, # disabling dropout layers. test_prediction = lasagne.layers.get_output(network, deterministic=True) # Compile a function performing a training step on a mini-batch (by giving # the updates dictionary) and returning the corresponding training loss: train_fn = theano.function([input_var, target_var], loss, updates=updates) # Finally, launch the training loop. print("Starting training...") # We iterate over epochs: for epoch in range(num_epochs): # In each epoch, we do a full pass over the training data: train_err = 0 train_batches = 0 start_time = time.time() for batch in iterate_minibatches(X_train, y_train, 20, shuffle=True): inputs, targets = batch train_err += train_fn(inputs, targets) train_batches += 1 # Then we print the results for this epoch: print("Epoch {0} of {1} took {2:.3f}s".format( epoch + 1, num_epochs, time.time() - start_time)) print(" training loss:\t\t{0:.6f}".format(train_err / train_batches)) predict_fn = theano.function([input_var], test_prediction) return predict_fn
Train a neural network classifier. Parameters ---------- X : numpy array A list of feature vectors y : numpy array A list of labels Returns ------- Theano expression : The trained neural network