code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def __add_parameter(self, param, path_parameters, params): """Adds all parameters in a field to a method parameters descriptor. Simple fields will only have one parameter, but a message field 'x' that corresponds to a message class with fields 'y' and 'z' will result in parameters 'x.y' and 'x.z', for example. The mapping from field to parameters is mostly handled by __field_to_subfields. Args: param: Parameter to be added to the descriptor. path_parameters: A list of parameters matched from a path for this field. For example for the hypothetical 'x' from above if the path was '/a/{x.z}/b/{other}' then this list would contain only the element 'x.z' since 'other' does not match to this field. params: List of parameters. Each parameter in the field. """ # If this is a simple field, just build the descriptor and append it. # Otherwise, build a schema and assign it to this descriptor descriptor = None if not isinstance(param, messages.MessageField): name = param.name descriptor = self.__parameter_descriptor(param) descriptor['location'] = 'path' if name in path_parameters else 'query' if descriptor: params[name] = descriptor else: for subfield_list in self.__field_to_subfields(param): name = '.'.join(subfield.name for subfield in subfield_list) descriptor = self.__parameter_descriptor(subfield_list[-1]) if name in path_parameters: descriptor['required'] = True descriptor['location'] = 'path' else: descriptor.pop('required', None) descriptor['location'] = 'query' if descriptor: params[name] = descriptor
Adds all parameters in a field to a method parameters descriptor. Simple fields will only have one parameter, but a message field 'x' that corresponds to a message class with fields 'y' and 'z' will result in parameters 'x.y' and 'x.z', for example. The mapping from field to parameters is mostly handled by __field_to_subfields. Args: param: Parameter to be added to the descriptor. path_parameters: A list of parameters matched from a path for this field. For example for the hypothetical 'x' from above if the path was '/a/{x.z}/b/{other}' then this list would contain only the element 'x.z' since 'other' does not match to this field. params: List of parameters. Each parameter in the field.
def site_config_dirs(appname): """Return a list of potential user-shared config dirs for this application. "appname" is the name of application. Typical user config directories are: Mac OS X: /Library/Application Support/<AppName>/ Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in $XDG_CONFIG_DIRS Win XP: C:\Documents and Settings\All Users\Application ... ...Data\<AppName>\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: Hidden, but writeable on Win 7: C:\ProgramData\<AppName>\ """ if WINDOWS: path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) pathlist = [os.path.join(path, appname)] elif sys.platform == 'darwin': pathlist = [os.path.join('/Library/Application Support', appname)] else: # try looking in $XDG_CONFIG_DIRS xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') if xdg_config_dirs: pathlist = [ os.sep.join([os.path.expanduser(x), appname]) for x in xdg_config_dirs.split(os.pathsep) ] else: pathlist = [] # always look in /etc directly as well pathlist.append('/etc') return pathlist
Return a list of potential user-shared config dirs for this application. "appname" is the name of application. Typical user config directories are: Mac OS X: /Library/Application Support/<AppName>/ Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in $XDG_CONFIG_DIRS Win XP: C:\Documents and Settings\All Users\Application ... ...Data\<AppName>\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: Hidden, but writeable on Win 7: C:\ProgramData\<AppName>\
def chdir(self, dir, change_os_dir=0): """Change the current working directory for lookups. If change_os_dir is true, we will also change the "real" cwd to match. """ curr=self._cwd try: if dir is not None: self._cwd = dir if change_os_dir: os.chdir(dir.get_abspath()) except OSError: self._cwd = curr raise
Change the current working directory for lookups. If change_os_dir is true, we will also change the "real" cwd to match.
def get_urls(self, order="total_clicks desc", offset=None, count=None): """Returns a list of URLs you've included in messages. List is sorted by ``total_clicks``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items. """ req_data = [ None, order, fmt_paging(offset, count) ] return self.request("query:Message_Url", req_data)
Returns a list of URLs you've included in messages. List is sorted by ``total_clicks``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items.
def _open(filename, mode='r', buffering=0): """read only version of open()""" if mode not in ('r', 'rb', 'rU'): raise RuntimeError("Invalid open file mode, must be 'r', 'rb', or 'rU'") if buffering > MAX_OPEN_BUFFER: raise RuntimeError("Invalid buffering value, max buffer size is {}".format(MAX_OPEN_BUFFER)) return open(filename, mode, buffering)
read only version of open()
def fingerprint(txt): """ takes a string and truncates to standard form for data matching. Based on the spec at OpenRefine https://github.com/OpenRefine/OpenRefine/wiki/Clustering-In-Depth#fingerprint - remove leading and trailing whitespace - change all characters to their lowercase representation - remove all punctuation and control characters - split the string into whitespace-separated tokens - sort the tokens and remove duplicates - join the tokens back together - normalize extended western characters to their ASCII representation (for example "gödel" → "godel") """ raw_text = txt.upper() #.strip(' ').replace('\n','') tokens = sorted(list(set(raw_text.split(' ')))) #print('tokens = ', tokens) res = ''.join([strip_nonalpha(t) for t in tokens]) return res
takes a string and truncates to standard form for data matching. Based on the spec at OpenRefine https://github.com/OpenRefine/OpenRefine/wiki/Clustering-In-Depth#fingerprint - remove leading and trailing whitespace - change all characters to their lowercase representation - remove all punctuation and control characters - split the string into whitespace-separated tokens - sort the tokens and remove duplicates - join the tokens back together - normalize extended western characters to their ASCII representation (for example "gödel" → "godel")
def wheregreater(self, fieldname, value): """ Returns a new DataTable with rows only where the value at `fieldname` > `value`. """ return self.mask([elem > value for elem in self[fieldname]])
Returns a new DataTable with rows only where the value at `fieldname` > `value`.
def send_audio_packet(self, data, *, encode=True): """Sends an audio packet composed of the data. You must be connected to play audio. Parameters ---------- data: bytes The :term:`py:bytes-like object` denoting PCM or Opus voice data. encode: bool Indicates if ``data`` should be encoded into Opus. Raises ------- ClientException You are not connected. OpusError Encoding the data failed. """ self.checked_add('sequence', 1, 65535) if encode: encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME) else: encoded_data = data packet = self._get_voice_packet(encoded_data) try: self.socket.sendto(packet, (self.endpoint_ip, self.voice_port)) except BlockingIOError: log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp) self.checked_add('timestamp', self.encoder.SAMPLES_PER_FRAME, 4294967295)
Sends an audio packet composed of the data. You must be connected to play audio. Parameters ---------- data: bytes The :term:`py:bytes-like object` denoting PCM or Opus voice data. encode: bool Indicates if ``data`` should be encoded into Opus. Raises ------- ClientException You are not connected. OpusError Encoding the data failed.
def delete_authoring_nodes(self, editor): """ Deletes the Model authoring Nodes associated with given editor. :param editor: Editor. :type editor: Editor :return: Method success. :rtype: bool """ editor_node = foundations.common.get_first_item(self.get_editor_nodes(editor)) file_node = editor_node.parent self.unregister_editor(editor_node) self.unregister_file(file_node, raise_exception=False) return True
Deletes the Model authoring Nodes associated with given editor. :param editor: Editor. :type editor: Editor :return: Method success. :rtype: bool
def idempotency_key(self, idempotency_key): """ Sets the idempotency_key of this BatchUpsertCatalogObjectsRequest. A value you specify that uniquely identifies this request among all your requests. A common way to create a valid idempotency key is to use a Universally unique identifier (UUID). If you're unsure whether a particular request was successful, you can reattempt it with the same idempotency key without worrying about creating duplicate objects. See [Idempotency](/basics/api101/idempotency) for more information. :param idempotency_key: The idempotency_key of this BatchUpsertCatalogObjectsRequest. :type: str """ if idempotency_key is None: raise ValueError("Invalid value for `idempotency_key`, must not be `None`") if len(idempotency_key) < 1: raise ValueError("Invalid value for `idempotency_key`, length must be greater than or equal to `1`") self._idempotency_key = idempotency_key
Sets the idempotency_key of this BatchUpsertCatalogObjectsRequest. A value you specify that uniquely identifies this request among all your requests. A common way to create a valid idempotency key is to use a Universally unique identifier (UUID). If you're unsure whether a particular request was successful, you can reattempt it with the same idempotency key without worrying about creating duplicate objects. See [Idempotency](/basics/api101/idempotency) for more information. :param idempotency_key: The idempotency_key of this BatchUpsertCatalogObjectsRequest. :type: str
def bump(self, bump_part): """Return a new bumped version instance.""" major, minor, patch, stage, n = tuple(self) # stage bump if bump_part not in {"major", "minor", "patch"}: if bump_part not in self.stages: raise ValueError(f"Unknown {bump_part} stage") # We can not bump from final stage to final again. if self.stage == "final" and bump_part == "final": raise ValueError(f"{self} is already in final stage.") # bump in the same stage (numeric part) if bump_part == self.stage: n += 1 else: new_stage_number = tuple(self.stages).index(bump_part) # We can not bump to a previous stage if new_stage_number < self._stage_number: raise ValueError(f"{bump_part} stage is previous to {self}") stage = bump_part n = 0 else: # major, minor, or patch bump # Only version in final stage can do a major, minor or patch # bump if self.stage != "final": raise ValueError( f"{self} is a pre-release version." f" Can't do a {bump_part} version bump" ) if bump_part == "major": major += 1 minor, patch = 0, 0 elif bump_part == "minor": minor += 1 patch = 0 else: patch += 1 return Version(major=major, minor=minor, patch=patch, stage=stage, n=n)
Return a new bumped version instance.
def merge_like_ops(self): """ >>> Cigar("1S20M").merge_like_ops() Cigar('1S20M') >>> Cigar("1S1S20M").merge_like_ops() Cigar('2S20M') >>> Cigar("1S1S1S20M").merge_like_ops() Cigar('3S20M') >>> Cigar("1S1S1S20M1S1S").merge_like_ops() Cigar('3S20M2S') """ cigs = [] for op, grps in groupby(self.items(), itemgetter(1)): cigs.append((sum(g[0] for g in grps), op)) return Cigar(self.string_from_elements(cigs))
>>> Cigar("1S20M").merge_like_ops() Cigar('1S20M') >>> Cigar("1S1S20M").merge_like_ops() Cigar('2S20M') >>> Cigar("1S1S1S20M").merge_like_ops() Cigar('3S20M') >>> Cigar("1S1S1S20M1S1S").merge_like_ops() Cigar('3S20M2S')
def organization_fields_reorder(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/organization_fields#reorder-organization-field" api_path = "/api/v2/organization_fields/reorder.json" return self.call(api_path, method="PUT", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/organization_fields#reorder-organization-field
def get(self, sid): """ Constructs a AssetVersionContext :param sid: The sid :returns: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionContext :rtype: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionContext """ return AssetVersionContext( self._version, service_sid=self._solution['service_sid'], asset_sid=self._solution['asset_sid'], sid=sid, )
Constructs a AssetVersionContext :param sid: The sid :returns: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionContext :rtype: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionContext
def to_eng(num_in): """Return number in engineering notation.""" x = decimal.Decimal(str(num_in)) eng_not = x.normalize().to_eng_string() return(eng_not)
Return number in engineering notation.
def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular region ''' try: conn = self.connect_to_aws(rds, region) if conn: instances = conn.get_all_dbinstances() for instance in instances: self.add_rds_instance(instance, region) except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS RDS is down:\n%s" % e.message self.fail_with_error(error, 'getting RDS instances')
Makes an AWS API call to the list of RDS instances in a particular region
def flushall(args): """Execute flushall in all cluster nodes. """ cluster = Cluster.from_node(ClusterNode.from_uri(args.cluster)) for node in cluster.masters: node.flushall()
Execute flushall in all cluster nodes.
def get_review_history_statuses(instance, reverse=False): """Returns a list with the statuses of the instance from the review_history """ review_history = getReviewHistory(instance, reverse=reverse) return map(lambda event: event["review_state"], review_history)
Returns a list with the statuses of the instance from the review_history
def ParseMultiple(self, stats, file_objects, knowledge_base): """Parse the found release files.""" _ = knowledge_base # Collate files into path: contents dictionary. found_files = self._Combine(stats, file_objects) # Determine collected files and apply weighting. weights = [w for w in self.WEIGHTS if w.path in found_files] weights = sorted(weights, key=lambda x: x.weight) for _, path, handler in weights: contents = found_files[path] obj = handler(contents) complete, result = obj.Parse() if result is None: continue elif complete: yield rdf_protodict.Dict({ 'os_release': result.release, 'os_major_version': result.major, 'os_minor_version': result.minor }) return # Amazon AMIs place release info in /etc/system-release. # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/amazon-linux-ami-basics.html system_release = found_files.get('/etc/system-release', None) if system_release and 'Amazon Linux' in system_release: match_object = ReleaseFileParseHandler.RH_RE.search(system_release) if match_object and match_object.lastindex > 1: yield rdf_protodict.Dict({ 'os_release': 'AmazonLinuxAMI', 'os_major_version': int(match_object.group(1)), 'os_minor_version': int(match_object.group(2)) }) return # Fall back to /etc/os-release. results_dict = self._ParseOSReleaseFile(found_files) if results_dict is not None: yield results_dict return # No successful parse. yield rdf_anomaly.Anomaly( type='PARSER_ANOMALY', symptom='Unable to determine distribution.')
Parse the found release files.
def strelka_somatic_variant_stats(variant, variant_metadata): """Parse out the variant calling statistics for a given variant from a Strelka VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Strelka VCF Returns ------- SomaticVariantStats """ sample_info = variant_metadata["sample_info"] # Ensure there are exactly two samples in the VCF, a tumor and normal assert len(sample_info) == 2, "More than two samples found in the somatic VCF" tumor_stats = _strelka_variant_stats(variant, sample_info["TUMOR"]) normal_stats = _strelka_variant_stats(variant, sample_info["NORMAL"]) return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
Parse out the variant calling statistics for a given variant from a Strelka VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Strelka VCF Returns ------- SomaticVariantStats
def get_ldap_filter(ldap_filter): # type: (Any) -> Optional[Union[LDAPFilter, LDAPCriteria]] """ Retrieves the LDAP filter object corresponding to the given filter. Parses it the argument if it is an LDAPFilter instance :param ldap_filter: An LDAP filter (LDAPFilter or string) :return: The corresponding filter, can be None :raise ValueError: Invalid filter string found :raise TypeError: Unknown filter type """ if ldap_filter is None: return None if isinstance(ldap_filter, (LDAPFilter, LDAPCriteria)): # No conversion needed return ldap_filter elif is_string(ldap_filter): # Parse the filter return _parse_ldap(ldap_filter) # Unknown type raise TypeError( "Unhandled filter type {0}".format(type(ldap_filter).__name__) )
Retrieves the LDAP filter object corresponding to the given filter. Parses it the argument if it is an LDAPFilter instance :param ldap_filter: An LDAP filter (LDAPFilter or string) :return: The corresponding filter, can be None :raise ValueError: Invalid filter string found :raise TypeError: Unknown filter type
async def update_bucket(self, *, chat: typing.Union[str, int, None] = None, user: typing.Union[str, int, None] = None, bucket: typing.Dict = None, **kwargs): """ Update bucket for user in chat You can use bucket parameter or|and kwargs. Chat or user is always required. If one of them is not provided, you have to set missing value based on the provided one. :param bucket: :param chat: :param user: :param kwargs: :return: """ raise NotImplementedError
Update bucket for user in chat You can use bucket parameter or|and kwargs. Chat or user is always required. If one of them is not provided, you have to set missing value based on the provided one. :param bucket: :param chat: :param user: :param kwargs: :return:
def SELFDESTRUCT(self, recipient): """Halt execution and register account for later deletion""" #This may create a user account recipient = Operators.EXTRACT(recipient, 0, 160) address = self.address #FIXME for on the known addresses if issymbolic(recipient): logger.info("Symbolic recipient on self destruct") recipient = solver.get_value(self.constraints, recipient) if recipient not in self.world: self.world.create_account(address=recipient) self.world.send_funds(address, recipient, self.world.get_balance(address)) self.world.delete_account(address) raise EndTx('SELFDESTRUCT')
Halt execution and register account for later deletion
def download_image(image_id, url, x1, y1, x2, y2, output_dir): """Downloads one image, crops it, resizes it and saves it locally.""" output_filename = os.path.join(output_dir, image_id + '.png') if os.path.exists(output_filename): # Don't download image if it's already there return True try: # Download image url_file = urlopen(url) if url_file.getcode() != 200: return False image_buffer = url_file.read() # Crop, resize and save image image = Image.open(BytesIO(image_buffer)).convert('RGB') w = image.size[0] h = image.size[1] image = image.crop((int(x1 * w), int(y1 * h), int(x2 * w), int(y2 * h))) image = image.resize((299, 299), resample=Image.ANTIALIAS) image.save(output_filename) except IOError: return False return True
Downloads one image, crops it, resizes it and saves it locally.
def get_service_agreements(storage_path, status='pending'): """ Get service agreements pending to be executed. :param storage_path: storage path for the internal db, str :param status: :return: """ conn = sqlite3.connect(storage_path) try: cursor = conn.cursor() return [ row for row in cursor.execute( ''' SELECT id, did, service_definition_id, price, files, start_time, status FROM service_agreements WHERE status=?; ''', (status,)) ] finally: conn.close()
Get service agreements pending to be executed. :param storage_path: storage path for the internal db, str :param status: :return:
def to_hex_string(data): ''' Convert list of integers to a hex string, separated by ":" ''' if isinstance(data, int): return '%02X' % data return ':'.join([('%02X' % o) for o in data])
Convert list of integers to a hex string, separated by ":"
def score_n2(matrix, matrix_size): """\ Implements the penalty score feature 2. ISO/IEC 18004:2015(E) -- 7.8.3 Evaluation of data masking results - Table 11 (page 54) ============================== ==================== =============== Feature Evaluation condition Points ============================== ==================== =============== Block of modules in same color Block size = m × n N2 ×(m-1)×(n-1) ============================== ==================== =============== N2 = 3 :param matrix: The matrix to evaluate :param matrix_size: The width (or height) of the matrix. :return int: The penalty score (feature 2) of the matrix. """ score = 0 for i in range(matrix_size - 1): for j in range(matrix_size - 1): bit = matrix[i][j] if bit == matrix[i][j + 1] and bit == matrix[i + 1][j] \ and bit == matrix[i + 1][j + 1]: score += 1 return score * 3
\ Implements the penalty score feature 2. ISO/IEC 18004:2015(E) -- 7.8.3 Evaluation of data masking results - Table 11 (page 54) ============================== ==================== =============== Feature Evaluation condition Points ============================== ==================== =============== Block of modules in same color Block size = m × n N2 ×(m-1)×(n-1) ============================== ==================== =============== N2 = 3 :param matrix: The matrix to evaluate :param matrix_size: The width (or height) of the matrix. :return int: The penalty score (feature 2) of the matrix.
def get_instance(key, expire=None): """Return an instance of RedisSet.""" global _instances try: instance = _instances[key] except KeyError: instance = RedisSet( key, _redis, expire=expire ) _instances[key] = instance return instance
Return an instance of RedisSet.
def cellpar_to_cell(cellpar, ab_normal=(0,0,1), a_direction=None): """Return a 3x3 cell matrix from `cellpar` = [a, b, c, alpha, beta, gamma]. The returned cell is orientated such that a and b are normal to `ab_normal` and a is parallel to the projection of `a_direction` in the a-b plane. Default `a_direction` is (1,0,0), unless this is parallel to `ab_normal`, in which case default `a_direction` is (0,0,1). The returned cell has the vectors va, vb and vc along the rows. The cell will be oriented such that va and vb are normal to `ab_normal` and va will be along the projection of `a_direction` onto the a-b plane. Example: >>> cell = cellpar_to_cell([1, 2, 4, 10, 20, 30], (0,1,1), (1,2,3)) >>> np.round(cell, 3) array([[ 0.816, -0.408, 0.408], [ 1.992, -0.13 , 0.13 ], [ 3.859, -0.745, 0.745]]) """ if a_direction is None: if np.linalg.norm(np.cross(ab_normal, (1,0,0))) < 1e-5: a_direction = (0,0,1) else: a_direction = (1,0,0) # Define rotated X,Y,Z-system, with Z along ab_normal and X along # the projection of a_direction onto the normal plane of Z. ad = np.array(a_direction) Z = unit_vector(ab_normal) X = unit_vector(ad - dot(ad, Z)*Z) Y = np.cross(Z, X) # Express va, vb and vc in the X,Y,Z-system alpha, beta, gamma = 90., 90., 90. if isinstance(cellpar, (int, float)): a = b = c = cellpar elif len(cellpar) == 1: a = b = c = cellpar[0] elif len(cellpar) == 3: a, b, c = cellpar alpha, beta, gamma = 90., 90., 90. else: a, b, c, alpha, beta, gamma = cellpar alpha *= pi/180.0 beta *= pi/180.0 gamma *= pi/180.0 va = a * np.array([1, 0, 0]) vb = b * np.array([cos(gamma), sin(gamma), 0]) cx = cos(beta) cy = (cos(alpha) - cos(beta)*cos(gamma))/sin(gamma) cz = sqrt(1. - cx*cx - cy*cy) vc = c * np.array([cx, cy, cz]) # Convert to the Cartesian x,y,z-system abc = np.vstack((va, vb, vc)) T = np.vstack((X, Y, Z)) cell = dot(abc, T) return cell
Return a 3x3 cell matrix from `cellpar` = [a, b, c, alpha, beta, gamma]. The returned cell is orientated such that a and b are normal to `ab_normal` and a is parallel to the projection of `a_direction` in the a-b plane. Default `a_direction` is (1,0,0), unless this is parallel to `ab_normal`, in which case default `a_direction` is (0,0,1). The returned cell has the vectors va, vb and vc along the rows. The cell will be oriented such that va and vb are normal to `ab_normal` and va will be along the projection of `a_direction` onto the a-b plane. Example: >>> cell = cellpar_to_cell([1, 2, 4, 10, 20, 30], (0,1,1), (1,2,3)) >>> np.round(cell, 3) array([[ 0.816, -0.408, 0.408], [ 1.992, -0.13 , 0.13 ], [ 3.859, -0.745, 0.745]])
def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT): """Return a list of key names at the specified location. Folders are suffixed with /. The input must be a folder; list on a file will not return a value. Note that no policy-based filtering is performed on keys; do not encode sensitive information in key names. The values themselves are not accessible via this command. Supported methods: LIST: /{mount_point}/{path}. Produces: 200 application/json :param path: Specifies the path of the secrets to list. This is specified as part of the URL. :type path: str | unicode :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the list_secrets request. :rtype: dict """ api_path = '/v1/{mount_point}/{path}'.format(mount_point=mount_point, path=path) response = self._adapter.list( url=api_path, ) return response.json()
Return a list of key names at the specified location. Folders are suffixed with /. The input must be a folder; list on a file will not return a value. Note that no policy-based filtering is performed on keys; do not encode sensitive information in key names. The values themselves are not accessible via this command. Supported methods: LIST: /{mount_point}/{path}. Produces: 200 application/json :param path: Specifies the path of the secrets to list. This is specified as part of the URL. :type path: str | unicode :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the list_secrets request. :rtype: dict
def _convert_angle_limit(angle, joint, **kwargs): """Converts the limit angle of the PyPot JSON file to the internal format""" angle_pypot = angle # No need to take care of orientation if joint["orientation"] == "indirect": angle_pypot = 1 * angle_pypot # angle_pypot = angle_pypot + offset return angle_pypot * np.pi / 180
Converts the limit angle of the PyPot JSON file to the internal format
def detect(): """Does this compiler support OpenMP parallelization?""" compiler = new_compiler() hasopenmp = hasfunction(compiler, 'omp_get_num_threads()') needs_gomp = hasopenmp if not hasopenmp: compiler.add_library('gomp') hasopenmp = hasfunction(compiler, 'omp_get_num_threads()') needs_gomp = hasopenmp return hasopenmp
Does this compiler support OpenMP parallelization?
def recover_from_duplicatekeyerror(self, e): """ method tries to recover from DuplicateKeyError """ if isinstance(e, DuplicateKeyError): try: return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id) except LookupError as e: self.logger.error('Unable to recover from DuplicateKeyError error due to {0}'.format(e), exc_info=True) else: msg = 'Unable to recover from DuplicateKeyError due to unspecified UOW primary key' self.logger.error(msg)
method tries to recover from DuplicateKeyError
def scanAllProcessesForOpenFile(searchPortion, isExactMatch=True, ignoreCase=False): ''' scanAllProcessessForOpenFile - Scans all processes on the system for a given filename @param searchPortion <str> - Filename to check @param isExactMatch <bool> Default True - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForOpenFile ''' pids = getAllRunningPids() # Since processes could disappear, we run the scan as fast as possible here with a list comprehension, then assemble the return dictionary later. mappingResults = [scanProcessForOpenFile(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids] ret = {} for i in range(len(pids)): if mappingResults[i] is not None: ret[pids[i]] = mappingResults[i] return ret
scanAllProcessessForOpenFile - Scans all processes on the system for a given filename @param searchPortion <str> - Filename to check @param isExactMatch <bool> Default True - If match should be exact, otherwise a partial match is performed. @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively @return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of "mappingResults", @see scanProcessForOpenFile
def get_all_user_ssh_settings(application_name): """Retrieve the known host entries and public keys for application Retrieve the known host entries and public keys for application for all units of the given application related to this application for root user and nova user. :param application_name: Name of application eg nova-compute-something :type application_name: str :returns: Public keys + host keys for all units for app + user combination. :rtype: dict """ settings = get_ssh_settings(application_name) settings.update(get_ssh_settings(application_name, user='nova')) return settings
Retrieve the known host entries and public keys for application Retrieve the known host entries and public keys for application for all units of the given application related to this application for root user and nova user. :param application_name: Name of application eg nova-compute-something :type application_name: str :returns: Public keys + host keys for all units for app + user combination. :rtype: dict
def _get_all_indexes(self): """Returns all indexes available in the parser""" if self.parser: return [v.index for v in self.parser.get_volumes()] + [d.index for d in self.parser.disks] else: return None
Returns all indexes available in the parser
def _get_blkid_type(self): """Retrieves the FS type from the blkid command.""" try: result = _util.check_output_(['blkid', '-p', '-O', str(self.offset), self.get_raw_path()]) if not result: return None # noinspection PyTypeChecker blkid_result = dict(re.findall(r'([A-Z]+)="(.+?)"', result)) self.info['blkid_data'] = blkid_result if 'PTTYPE' in blkid_result and 'TYPE' not in blkid_result: return blkid_result.get('PTTYPE') else: return blkid_result.get('TYPE') except Exception: return None
Retrieves the FS type from the blkid command.
def find_noncopyable_vars(class_type, already_visited_cls_vars=None): """ Returns list of all `noncopyable` variables. If an already_visited_cls_vars list is provided as argument, the returned list will not contain these variables. This list will be extended with whatever variables pointing to classes have been found. Args: class_type (declarations.class_t): the class to be searched. already_visited_cls_vars (list): optional list of vars that should not be checked a second time, to prevent infinite recursions. Returns: list: list of all `noncopyable` variables. """ assert isinstance(class_type, class_declaration.class_t) logger = utils.loggers.cxx_parser mvars = class_type.variables( lambda v: not v.type_qualifiers.has_static, recursive=False, allow_empty=True) noncopyable_vars = [] if already_visited_cls_vars is None: already_visited_cls_vars = [] message = ( "__contains_noncopyable_mem_var - %s - TRUE - " + "contains const member variable") for mvar in mvars: var_type = type_traits.remove_reference(mvar.decl_type) if type_traits.is_const(var_type): no_const = type_traits.remove_const(var_type) if type_traits.is_fundamental(no_const) or is_enum(no_const): logger.debug( (message + "- fundamental or enum"), var_type.decl_string) noncopyable_vars.append(mvar) if is_class(no_const): logger.debug((message + " - class"), var_type.decl_string) noncopyable_vars.append(mvar) if type_traits.is_array(no_const): logger.debug((message + " - array"), var_type.decl_string) noncopyable_vars.append(mvar) if type_traits.is_pointer(var_type): continue if class_traits.is_my_case(var_type): cls = class_traits.get_declaration(var_type) # Exclude classes that have already been visited. if cls in already_visited_cls_vars: continue already_visited_cls_vars.append(cls) if is_noncopyable(cls, already_visited_cls_vars): logger.debug( (message + " - class that is not copyable"), var_type.decl_string) noncopyable_vars.append(mvar) logger.debug(( "__contains_noncopyable_mem_var - %s - FALSE - doesn't " + "contain noncopyable members"), class_type.decl_string) return noncopyable_vars
Returns list of all `noncopyable` variables. If an already_visited_cls_vars list is provided as argument, the returned list will not contain these variables. This list will be extended with whatever variables pointing to classes have been found. Args: class_type (declarations.class_t): the class to be searched. already_visited_cls_vars (list): optional list of vars that should not be checked a second time, to prevent infinite recursions. Returns: list: list of all `noncopyable` variables.
def get_self_attention_bias(x): """Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1] """ x_shape = common_layers.shape_list(x) self_attention_bias = common_attention.attention_bias_lower_triangle( x_shape[1]) return self_attention_bias
Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1]
def _issubclass(subclass, superclass, bound_Generic=None, bound_typevars=None, bound_typevars_readonly=False, follow_fwd_refs=True, _recursion_check=None): """Access this via ``pytypes.is_subtype``. Works like ``issubclass``, but supports PEP 484 style types from ``typing`` module. subclass : type The type to check for being a subtype of ``superclass``. superclass : type The type to check for being a supertype of ``subclass``. bound_Generic : Optional[Generic] A type object holding values for unbound typevars occurring in ``subclass`` or ``superclass``. Default: None If subclass or superclass contains unbound ``TypeVar``s and ``bound_Generic`` is provided, this function attempts to retrieve corresponding values for the unbound ``TypeVar``s from ``bound_Generic``. In collision case with ``bound_typevars`` the value from ``bound_Generic`` if preferred. bound_typevars : Optional[Dict[typing.TypeVar, type]] A dictionary holding values for unbound typevars occurring in ``subclass`` or ``superclass``. Default: {} Depending on ``bound_typevars_readonly`` pytypes can also bind values to typevars as needed. This is done by inserting according mappings into this dictionary. This can e.g. be useful to infer values for ``TypeVar``s or to consistently check a set of ``TypeVar``s across multiple calls, e.g. when checking all arguments of a function call. In collision case with ``bound_Generic`` the value from ``bound_Generic`` if preferred. bound_typevars_readonly : bool Defines if pytypes is allowed to write into the ``bound_typevars`` dictionary. Default: True If set to False, pytypes cannot assign values to ``TypeVar``s, but only checks regarding values already present in ``bound_typevars`` or ``bound_Generic``. follow_fwd_refs : bool Defines if ``_ForwardRef``s should be explored. Default: True If this is set to ``False`` and a ``_ForwardRef`` is encountered, pytypes aborts the check raising a ForwardRefError. _recursion_check : Optional[Dict[type, Set[type]]] Internally used for recursion checks. Default: None If ``Union``s and ``_ForwardRef``s occur in the same type, recursions can occur. As soon as a ``_ForwardRef`` is encountered, pytypes automatically creates this dictionary and continues in recursion-proof manner. """ if bound_typevars is None: bound_typevars = {} if superclass is Any: return True if subclass == superclass: return True if subclass is Any: return superclass is Any if isinstance(subclass, ForwardRef) or isinstance(superclass, ForwardRef): if not follow_fwd_refs: raise pytypes.ForwardRefError( "ForwardRef encountered, but follow_fwd_refs is False: '%s'\n%s"% ((subclass if isinstance(subclass, ForwardRef) else superclass) .__forward_arg__, "Retry with follow_fwd_refs=True.")) # Now that forward refs are in the game, we must continue in recursion-proof manner: if _recursion_check is None: _recursion_check = {superclass: {subclass}} elif superclass in _recursion_check: if subclass in _recursion_check[superclass]: # recursion detected return False else: _recursion_check[superclass].add(subclass) else: _recursion_check[superclass] = {subclass} if isinstance(subclass, ForwardRef): if not subclass.__forward_evaluated__: raise pytypes.ForwardRefError("ForwardRef in subclass not evaluated: '%s'\n%s"% (subclass.__forward_arg__, "Use pytypes.resolve_fw_decl")) else: return _issubclass(subclass.__forward_value__, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) else: # isinstance(superclass, ForwardRef) if not superclass.__forward_evaluated__: raise pytypes.ForwardRefError("ForwardRef in superclass not evaluated: '%s'\n%s"% (superclass.__forward_arg__, "Use pytypes.resolve_fw_decl")) else: return _issubclass(subclass, superclass.__forward_value__, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) if pytypes.apply_numeric_tower: if superclass is float and subclass is int: return True elif superclass is complex and \ (subclass is int or subclass is float): return True if superclass in _extra_dict: superclass = _extra_dict[superclass] try: if _issubclass_2(subclass, Empty, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): for empty_target in [Container, Sized, Iterable]: # We cannot simply use Union[Container, Sized, Iterable] as empty_target # because of implementation detail behavior of _issubclass_2. # It would e.g. cause false negative result of # is_subtype(Empty[Dict], Empty[Container]) try: if _issubclass_2(superclass.__origin__, empty_target, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return _issubclass_2(subclass.__args__[0], superclass.__origin__, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass if _issubclass_2(superclass, empty_target, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return _issubclass_2(subclass.__args__[0], superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass try: if _issubclass_2(superclass, Empty, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): for empty_target in [Container, Sized, Iterable]: # We cannot simply use Union[Container, Sized, Iterable] as empty_target # because of implementation detail behavior of _issubclass_2. try: if _issubclass_2(subclass.__origin__, empty_target, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return _issubclass_2(subclass.__origin__, superclass.__args__[0], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass if _issubclass_2(subclass, empty_target, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return _issubclass_2(subclass, superclass.__args__[0], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass if isinstance(superclass, TypeVar): if not superclass.__bound__ is None: if not _issubclass(subclass, superclass.__bound__, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): return False if not bound_typevars is None: try: if superclass.__contravariant__: return _issubclass(bound_typevars[superclass], subclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) elif superclass.__covariant__: return _issubclass(subclass, bound_typevars[superclass], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) else: return _issubclass(bound_typevars[superclass], subclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) and \ _issubclass(subclass, bound_typevars[superclass], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass if not bound_Generic is None: superclass = get_arg_for_TypeVar(superclass, bound_Generic) if not superclass is None: return _issubclass(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) if not bound_typevars is None: if bound_typevars_readonly: return False else: # bind it... bound_typevars[superclass] = subclass return True return False if isinstance(subclass, TypeVar): if not bound_typevars is None: try: return _issubclass(bound_typevars[subclass], superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) except: pass if not bound_Generic is None: subclass = get_arg_for_TypeVar(subclass, bound_Generic) if not subclass is None: return _issubclass(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) if not subclass.__bound__ is None: return _issubclass(subclass.__bound__, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) return False res = _issubclass_2(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) return res
Access this via ``pytypes.is_subtype``. Works like ``issubclass``, but supports PEP 484 style types from ``typing`` module. subclass : type The type to check for being a subtype of ``superclass``. superclass : type The type to check for being a supertype of ``subclass``. bound_Generic : Optional[Generic] A type object holding values for unbound typevars occurring in ``subclass`` or ``superclass``. Default: None If subclass or superclass contains unbound ``TypeVar``s and ``bound_Generic`` is provided, this function attempts to retrieve corresponding values for the unbound ``TypeVar``s from ``bound_Generic``. In collision case with ``bound_typevars`` the value from ``bound_Generic`` if preferred. bound_typevars : Optional[Dict[typing.TypeVar, type]] A dictionary holding values for unbound typevars occurring in ``subclass`` or ``superclass``. Default: {} Depending on ``bound_typevars_readonly`` pytypes can also bind values to typevars as needed. This is done by inserting according mappings into this dictionary. This can e.g. be useful to infer values for ``TypeVar``s or to consistently check a set of ``TypeVar``s across multiple calls, e.g. when checking all arguments of a function call. In collision case with ``bound_Generic`` the value from ``bound_Generic`` if preferred. bound_typevars_readonly : bool Defines if pytypes is allowed to write into the ``bound_typevars`` dictionary. Default: True If set to False, pytypes cannot assign values to ``TypeVar``s, but only checks regarding values already present in ``bound_typevars`` or ``bound_Generic``. follow_fwd_refs : bool Defines if ``_ForwardRef``s should be explored. Default: True If this is set to ``False`` and a ``_ForwardRef`` is encountered, pytypes aborts the check raising a ForwardRefError. _recursion_check : Optional[Dict[type, Set[type]]] Internally used for recursion checks. Default: None If ``Union``s and ``_ForwardRef``s occur in the same type, recursions can occur. As soon as a ``_ForwardRef`` is encountered, pytypes automatically creates this dictionary and continues in recursion-proof manner.
def file_system(self): """Gets the filesystem corresponding to the open scheduler.""" if self._file_system is None: self._file_system = self.scheduler.get_file_system() return self._file_system
Gets the filesystem corresponding to the open scheduler.
def has_cell(self, s): """Tests whether store `s` is a cell, that is, it uses exactly one cell, and there can take on only a finite number of states).""" for t in self.transitions: if len(t.lhs[s]) != 1: return False if len(t.rhs[s]) != 1: return False if t.lhs[s].position != 0: return False if t.rhs[s].position != 0: return False return True
Tests whether store `s` is a cell, that is, it uses exactly one cell, and there can take on only a finite number of states).
def wait_for_notification(self, handle: int, delegate, notification_timeout: float): """Listen for characteristics changes from a BLE address. @param: mac - MAC address in format XX:XX:XX:XX:XX:XX @param: handle - BLE characteristics handle in format 0xXX a value of 0x0100 is written to register for listening @param: delegate - gatttool receives the --listen argument and the delegate object's handleNotification is called for every returned row @param: notification_timeout """ if not self.is_connected(): raise BluetoothBackendException('Not connected to any device.') attempt = 0 delay = 10 _LOGGER.debug("Enter write_ble (%s)", current_thread()) while attempt <= self.retries: cmd = "gatttool --device={} --addr-type={} --char-write-req -a {} -n {} --adapter={} --listen".format( self._mac, self.address_type, self.byte_to_handle(handle), self.bytes_to_string(self._DATA_MODE_LISTEN), self.adapter) _LOGGER.debug("Running gatttool with a timeout of %d: %s", notification_timeout, cmd) with Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid) as process: try: result = process.communicate(timeout=notification_timeout)[0] _LOGGER.debug("Finished gatttool") except TimeoutExpired: # send signal to the process group, because listening always hangs os.killpg(process.pid, signal.SIGINT) result = process.communicate()[0] _LOGGER.debug("Listening stopped forcefully after timeout.") result = result.decode("utf-8").strip(' \n\t') if "Write Request failed" in result: raise BluetoothBackendException('Error writing handle to sensor: {}'.format(result)) _LOGGER.debug("Got %s from gatttool", result) # Parse the output to determine success if "successfully" in result: _LOGGER.debug("Exit write_ble with result (%s)", current_thread()) # extract useful data. for element in self.extract_notification_payload(result): delegate.handleNotification(handle, bytes([int(x, 16) for x in element.split()])) return True attempt += 1 _LOGGER.debug("Waiting for %s seconds before retrying", delay) if attempt < self.retries: time.sleep(delay) delay *= 2 raise BluetoothBackendException("Exit write_ble, no data ({})".format(current_thread()))
Listen for characteristics changes from a BLE address. @param: mac - MAC address in format XX:XX:XX:XX:XX:XX @param: handle - BLE characteristics handle in format 0xXX a value of 0x0100 is written to register for listening @param: delegate - gatttool receives the --listen argument and the delegate object's handleNotification is called for every returned row @param: notification_timeout
def iter_list(self, id, *args, **kwargs): """Get a list of attachments. Whereas ``list`` fetches a single page of attachments according to its ``limit`` and ``page`` arguments, ``iter_list`` returns all attachments by internally making successive calls to ``list``. :param id: Device ID as an int. :param args: Arguments that ``list`` takes. :param kwargs: Optional arguments that ``list`` takes. :return: :class:`attachments.Attachment <attachments.Attachment>` list """ l = partial(self.list, id) return self.service.iter_list(l, *args, **kwargs)
Get a list of attachments. Whereas ``list`` fetches a single page of attachments according to its ``limit`` and ``page`` arguments, ``iter_list`` returns all attachments by internally making successive calls to ``list``. :param id: Device ID as an int. :param args: Arguments that ``list`` takes. :param kwargs: Optional arguments that ``list`` takes. :return: :class:`attachments.Attachment <attachments.Attachment>` list
def __compare_parameters(self, width, height, zoom, parameters): """Compare parameters for equality Checks if a cached image is existing, the the dimensions agree and finally if the properties are equal. If so, True is returned, else False, :param width: The width of the image :param height: The height of the image :param zoom: The current scale/zoom factor :param parameters: The parameters used for the image :return: True if all parameters are equal, False else """ # Deactivated caching if not global_gui_config.get_config_value('ENABLE_CACHING', True): return False # Empty cache if not self.__image: return False # Changed image size if self.__width != width or self.__height != height: return False # Current zoom greater then prepared zoom if zoom > self.__zoom * self.__zoom_multiplicator: return False # Current zoom much smaller than prepared zoom, causes high memory usage and imperfect anti-aliasing if zoom < self.__zoom / self.__zoom_multiplicator: return False # Changed drawing parameter for key in parameters: try: if key not in self.__last_parameters or self.__last_parameters[key] != parameters[key]: return False except (AttributeError, ValueError): # Some values cannot be compared and raise an exception on comparison (e.g. numpy.ndarray). In this # case, just return False and do not cache. try: # Catch at least the ndarray-case, as this could occure relatively often import numpy if isinstance(self.__last_parameters[key], numpy.ndarray): return numpy.array_equal(self.__last_parameters[key], parameters[key]) except ImportError: return False return False return True
Compare parameters for equality Checks if a cached image is existing, the the dimensions agree and finally if the properties are equal. If so, True is returned, else False, :param width: The width of the image :param height: The height of the image :param zoom: The current scale/zoom factor :param parameters: The parameters used for the image :return: True if all parameters are equal, False else
def tointerval(s): """ If string, then convert to an interval; otherwise just return the input """ if isinstance(s, basestring): m = coord_re.search(s) if m.group('strand'): return pybedtools.create_interval_from_list([ m.group('chrom'), m.group('start'), m.group('stop'), '.', '0', m.group('strand')]) else: return pybedtools.create_interval_from_list([ m.group('chrom'), m.group('start'), m.group('stop'), ]) return s
If string, then convert to an interval; otherwise just return the input
def translate(term=None, phrase=None, api_key=GIPHY_PUBLIC_KEY, strict=False, rating=None): """ Shorthand for creating a Giphy api wrapper with the given api key and then calling the translate method. """ return Giphy(api_key=api_key, strict=strict).translate( term=term, phrase=phrase, rating=rating)
Shorthand for creating a Giphy api wrapper with the given api key and then calling the translate method.
def absent(self, name, rdtype=None): """Require that an owner name (and optionally an rdata type) does not exist as a prerequisite to the execution of the update.""" if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) if rdtype is None: rrset = self.find_rrset(self.answer, name, dns.rdataclass.NONE, dns.rdatatype.ANY, dns.rdatatype.NONE, None, True, True) else: if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) rrset = self.find_rrset(self.answer, name, dns.rdataclass.NONE, rdtype, dns.rdatatype.NONE, None, True, True)
Require that an owner name (and optionally an rdata type) does not exist as a prerequisite to the execution of the update.
def get_task_df(self): ''' Returns ------- ''' term_time_df = self._get_term_time_df() terms_to_include = ( term_time_df .groupby('term')['top'] .sum() .sort_values(ascending=False) .iloc[:self.num_terms_to_include].index ) task_df = ( term_time_df[term_time_df.term.isin(terms_to_include)][['time', 'term']] .groupby('term') .apply(lambda x: pd.Series(self._find_sequences(x['time']))) .reset_index() .rename({0: 'sequence'}, axis=1) .reset_index() .assign(start=lambda x: x['sequence'].apply(lambda x: x[0])) .assign(end=lambda x: x['sequence'].apply(lambda x: x[1])) [['term', 'start', 'end']] ) return task_df
Returns -------
def adjustText(self): """ Updates the text based on the current format options. """ pos = self.cursorPosition() self.blockSignals(True) super(XLineEdit, self).setText(self.formatText(self.text())) self.setCursorPosition(pos) self.blockSignals(False)
Updates the text based on the current format options.
def to_xdr_object(self): """Creates an XDR Memo object for a transaction with MEMO_TEXT.""" return Xdr.types.Memo(type=Xdr.const.MEMO_TEXT, text=self.text)
Creates an XDR Memo object for a transaction with MEMO_TEXT.
def write_file(self, filename): """ Write the xml data into the file. Parameters ---------- filename: Name of the file. Examples ------- >>> writer = ProbModelXMLWriter(model) >>> writer.write_file(test_file) """ writer = self.__str__()[:-1].decode('utf-8') with open(filename, 'w') as fout: fout.write(writer)
Write the xml data into the file. Parameters ---------- filename: Name of the file. Examples ------- >>> writer = ProbModelXMLWriter(model) >>> writer.write_file(test_file)
def waypoint_request_list_send(self): '''wrapper for waypoint_request_list_send''' if self.mavlink10(): self.mav.mission_request_list_send(self.target_system, self.target_component) else: self.mav.waypoint_request_list_send(self.target_system, self.target_component)
wrapper for waypoint_request_list_send
def _install_packages(path, packages): """Install all packages listed to the target directory. Ignores any package that includes Python itself and python-lambda as well since its only needed for deploying and not running the code :param str path: Path to copy installed pip packages to. :param list packages: A list of packages to be installed via pip. """ def _filter_blacklist(package): blacklist = ['-i', '#', 'Python==', 'python-lambda=='] return all(package.startswith(entry) is False for entry in blacklist) filtered_packages = filter(_filter_blacklist, packages) for package in filtered_packages: if package.startswith('-e '): package = package.replace('-e ', '') print('Installing {package}'.format(package=package)) subprocess.check_call([sys.executable, '-m', 'pip', 'install', package, '-t', path, '--ignore-installed']) print ('Install directory contents are now: {directory}'.format(directory=os.listdir(path)))
Install all packages listed to the target directory. Ignores any package that includes Python itself and python-lambda as well since its only needed for deploying and not running the code :param str path: Path to copy installed pip packages to. :param list packages: A list of packages to be installed via pip.
def isMultiContract(self, contract): """ tells if is this contract has sub-contract with expiries/strikes/sides """ if contract.m_secType == "FUT" and contract.m_expiry == "": return True if contract.m_secType in ["OPT", "FOP"] and \ (contract.m_expiry == "" or contract.m_strike == "" or contract.m_right == ""): return True tickerId = self.tickerId(contract) if tickerId in self.contract_details and \ len(self.contract_details[tickerId]["contracts"]) > 1: return True return False
tells if is this contract has sub-contract with expiries/strikes/sides
def kill_all(job_queue, reason='None given', states=None): """Terminates/cancels all RUNNING, RUNNABLE, and STARTING jobs.""" if states is None: states = ['STARTING', 'RUNNABLE', 'RUNNING'] batch = boto3.client('batch') runnable = batch.list_jobs(jobQueue=job_queue, jobStatus='RUNNABLE') job_info = runnable.get('jobSummaryList') if job_info: job_ids = [job['jobId'] for job in job_info] # Cancel jobs for job_id in job_ids: batch.cancel_job(jobId=job_id, reason=reason) res_list = [] for status in states: running = batch.list_jobs(jobQueue=job_queue, jobStatus=status) job_info = running.get('jobSummaryList') if job_info: job_ids = [job['jobId'] for job in job_info] for job_id in job_ids: logger.info('Killing %s' % job_id) res = batch.terminate_job(jobId=job_id, reason=reason) res_list.append(res) return res_list
Terminates/cancels all RUNNING, RUNNABLE, and STARTING jobs.
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore): """Initialize kvstore""" for idx, param_on_devs in enumerate(param_arrays): name = param_names[idx] kvstore.init(name, arg_params[name]) if update_on_kvstore: kvstore.pull(name, param_on_devs, priority=-idx)
Initialize kvstore
def get_languages(self): """ Get the list of languages we need to start servers and create clients for. """ languages = ['python'] all_options = CONF.options(self.CONF_SECTION) for option in all_options: if option in [l.lower() for l in LSP_LANGUAGES]: languages.append(option) return languages
Get the list of languages we need to start servers and create clients for.
def copy_file(self): share_name = self._create_share() directory_name = self._create_directory(share_name) source_file_name = self._get_file_reference() self.service.create_file(share_name, directory_name, source_file_name, 512) # Basic # Copy the file from the directory to the root of the share source = self.service.make_file_url(share_name, directory_name, source_file_name) copy = self.service.copy_file(share_name, None, 'file1copy', source) # Poll for copy completion while copy.status != 'success': count = count + 1 if count > 5: print('Timed out waiting for async copy to complete.') time.sleep(30) copy = self.service.get_file_properties(share_name, dir_name, 'file1copy').properties.copy # With SAS from a remote account to local file # Commented out as remote share, directory, file, and sas would need to be created ''' source_file_url = self.service.make_file_url( remote_share_name, remote_directory_name, remote_file_name, sas_token=remote_sas_token, ) copy = self.service.copy_file(destination_sharename, destination_directory_name, destination_file_name, source_file_url) ''' # Abort copy # Commented out as this involves timing the abort to be sent while the copy is still running # Abort copy is useful to do along with polling # self.service.abort_copy_file(share_name, dir_name, file_name, copy.id) self.service.delete_share(share_name)
source_file_url = self.service.make_file_url( remote_share_name, remote_directory_name, remote_file_name, sas_token=remote_sas_token, ) copy = self.service.copy_file(destination_sharename, destination_directory_name, destination_file_name, source_file_url)
def load_keys(self, issuer, jwks_uri='', jwks=None, replace=False): """ Fetch keys from another server :param jwks_uri: A URL pointing to a site that will return a JWKS :param jwks: A dictionary representation of a JWKS :param issuer: The provider URL :param replace: If all previously gathered keys from this provider should be replace. :return: Dictionary with usage as key and keys as values """ logger.debug("Initiating key bundle for issuer: %s" % issuer) if replace or issuer not in self.issuer_keys: self.issuer_keys[issuer] = [] if jwks_uri: self.add_url(issuer, jwks_uri) elif jwks: # jwks should only be considered if no jwks_uri is present _keys = jwks['keys'] self.issuer_keys[issuer].append(self.keybundle_cls(_keys))
Fetch keys from another server :param jwks_uri: A URL pointing to a site that will return a JWKS :param jwks: A dictionary representation of a JWKS :param issuer: The provider URL :param replace: If all previously gathered keys from this provider should be replace. :return: Dictionary with usage as key and keys as values
def run_cmd(cmd, remote, rootdir='', workdir='', ignore_exit_code=False, ssh='ssh'): r'''Run the given cmd in the given workdir, either locally or remotely, and return the combined stdout/stderr Parameters: cmd (list of str or str): Command to execute, as list consisting of the command, and options. Alternatively, the command can be given a single string, which will then be executed as a shell command. Only use shell commands when necessary, e.g. when the command involves a pipe. remote (None or str): If None, run command locally. Otherwise, run on the given host (via SSH) rootdir (str, optional): Local or remote root directory. The `workdir` variable is taken relative to `rootdir`. If not specified, effectively the current working directory is used as the root for local commands, and the home directory for remote commands. Note that `~` may be used to indicate the home directory locally or remotely. workdir (str, optional): Local or remote directory from which to run the command, relative to `rootdir`. If `rootdir` is empty, `~` may be used to indicate the home directory. ignore_exit_code (boolean, optional): By default, `subprocess.CalledProcessError` will be raised if the call has an exit code other than 0. This exception can be supressed by passing `ignore_exit_code=False` ssh (str, optional): The executable to be used for ssh. If not a full path, the executable must be in ``$PATH`` Example: >>> import tempfile, os, shutil >>> tempfolder = tempfile.mkdtemp() >>> scriptfile = os.path.join(tempfolder, 'test.sh') >>> with open(scriptfile, 'w') as script_fh: ... script_fh.writelines(["#!/bin/bash\n", "echo Hello $1\n"]) >>> set_executable(scriptfile) >>> run_cmd(['./test.sh', 'World'], remote=None, workdir=tempfolder) 'Hello World\n' >>> run_cmd("./test.sh World | tr '[:upper:]' '[:lower:]'", remote=None, ... workdir=tempfolder) 'hello world\n' >>> shutil.rmtree(tempfolder) ''' logger = logging.getLogger(__name__) workdir = os.path.join(rootdir, workdir) if type(cmd) in [list, tuple]: use_shell = False else: cmd = str(cmd) use_shell = True try: if remote is None: # run locally workdir = os.path.expanduser(workdir) if use_shell: logger.debug("COMMAND: %s", cmd) else: logger.debug("COMMAND: %s", " ".join([quote(part) for part in cmd])) if workdir == '': response = sp.check_output(cmd, stderr=sp.STDOUT, shell=use_shell) else: response = sp.check_output(cmd, stderr=sp.STDOUT, cwd=workdir, shell=use_shell) else: # run remotely if not use_shell: cmd = " ".join(cmd) if workdir == '': cmd = [ssh, remote, cmd] else: cmd = [ssh, remote, 'cd %s && %s' % (workdir, cmd)] logger.debug("COMMAND: %s", " ".join([quote(part) for part in cmd])) response = sp.check_output(cmd, stderr=sp.STDOUT) except sp.CalledProcessError as e: if ignore_exit_code: response = e.output else: raise if sys.version_info >= (3, 0): # For Python 3, we should return a unicode string, so that the backends # can safely assume that string operations such as regex matching are # possible. response = response.decode(CMD_RESPONSE_ENCODING) if logger.getEffectiveLevel() <= logging.DEBUG: if "\n" in response: if len(response.splitlines()) == 1: logger.debug("RESPONSE: %s", response) else: logger.debug("RESPONSE: ---\n%s\n---", response) else: logger.debug("RESPONSE: '%s'", response) return response
r'''Run the given cmd in the given workdir, either locally or remotely, and return the combined stdout/stderr Parameters: cmd (list of str or str): Command to execute, as list consisting of the command, and options. Alternatively, the command can be given a single string, which will then be executed as a shell command. Only use shell commands when necessary, e.g. when the command involves a pipe. remote (None or str): If None, run command locally. Otherwise, run on the given host (via SSH) rootdir (str, optional): Local or remote root directory. The `workdir` variable is taken relative to `rootdir`. If not specified, effectively the current working directory is used as the root for local commands, and the home directory for remote commands. Note that `~` may be used to indicate the home directory locally or remotely. workdir (str, optional): Local or remote directory from which to run the command, relative to `rootdir`. If `rootdir` is empty, `~` may be used to indicate the home directory. ignore_exit_code (boolean, optional): By default, `subprocess.CalledProcessError` will be raised if the call has an exit code other than 0. This exception can be supressed by passing `ignore_exit_code=False` ssh (str, optional): The executable to be used for ssh. If not a full path, the executable must be in ``$PATH`` Example: >>> import tempfile, os, shutil >>> tempfolder = tempfile.mkdtemp() >>> scriptfile = os.path.join(tempfolder, 'test.sh') >>> with open(scriptfile, 'w') as script_fh: ... script_fh.writelines(["#!/bin/bash\n", "echo Hello $1\n"]) >>> set_executable(scriptfile) >>> run_cmd(['./test.sh', 'World'], remote=None, workdir=tempfolder) 'Hello World\n' >>> run_cmd("./test.sh World | tr '[:upper:]' '[:lower:]'", remote=None, ... workdir=tempfolder) 'hello world\n' >>> shutil.rmtree(tempfolder)
def get_object_references(tb, source, max_string_length=1000): """ Find the values of referenced attributes of objects within the traceback scope. :param tb: traceback :return: list of tuples containing (variable name, value) """ global obj_ref_regex referenced_attr = set() for line in source.split('\n'): referenced_attr.update(set(re.findall(obj_ref_regex, line))) referenced_attr = sorted(referenced_attr) info = [] for attr in referenced_attr: v = string_variable_lookup(tb, attr) if v is not ValueError: ref_string = format_reference(v, max_string_length=max_string_length) info.append((attr, ref_string)) return info
Find the values of referenced attributes of objects within the traceback scope. :param tb: traceback :return: list of tuples containing (variable name, value)
def get_context_files(data): """Retrieve pre-installed annotation files for annotating genome context. """ ref_file = dd.get_ref_file(data) all_files = [] for ext in [".bed.gz"]: all_files += sorted(glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "coverage", "problem_regions", "*", "*%s" % ext)))) return sorted(all_files)
Retrieve pre-installed annotation files for annotating genome context.
def perform_permissions_check(self, user, obj, perms): """ Performs the permissions check. """ return self.request.forum_permission_handler.can_update_topics_to_announces(obj, user)
Performs the permissions check.
def add_cell_code(self, cell_str, pos=None): """ Add Python cell :param cell_str: cell content :return: """ cell_str = cell_str.strip() logging.debug("add_cell_code: {}".format(cell_str)) cell = nbf.v4.new_code_cell(cell_str) if pos is None: self.nb['cells'].append(cell) else: self.nb['cells'].insert(pos, cell)
Add Python cell :param cell_str: cell content :return:
def read_content(path: str, limit: Optional[int] = None) -> Iterator[List[str]]: """ Returns a list of tokens for each line in path up to a limit. :param path: Path to files containing sentences. :param limit: How many lines to read from path. :return: Iterator over lists of words. """ with smart_open(path) as indata: for i, line in enumerate(indata): if limit is not None and i == limit: break yield list(get_tokens(line))
Returns a list of tokens for each line in path up to a limit. :param path: Path to files containing sentences. :param limit: How many lines to read from path. :return: Iterator over lists of words.
def dump_json(data, indent=None): """ :param list | dict data: :param Optional[int] indent: :rtype: unicode """ return json.dumps(data, indent=indent, ensure_ascii=False, sort_keys=True, separators=(',', ': '))
:param list | dict data: :param Optional[int] indent: :rtype: unicode
def connect_pores(network, pores1, pores2, labels=[], add_conns=True): r''' Returns the possible connections between two group of pores, and optionally makes the connections. See ``Notes`` for advanced usage. Parameters ---------- network : OpenPNM Network Object pores1 : array_like The first group of pores on the network pores2 : array_like The second group of pores on the network labels : list of strings The labels to apply to the new throats. This argument is only needed if ``add_conns`` is True. add_conns : bool Indicates whether the connections should be added to the supplied network (default is True). Otherwise, the connections are returned as an Nt x 2 array that can be passed directly to ``extend``. Notes ----- (1) The method also works if ``pores1`` and ``pores2`` are list of lists, in which case it consecutively connects corresponding members of the two lists in a 1-to-1 fashion. Example: pores1 = [[0, 1], [2, 3]] and pores2 = [[5], [7, 9]] leads to creation of the following connections: 0 --> 5 2 --> 7 3 --> 7 1 --> 5 2 --> 9 3 --> 9 (2) If you want to use the batch functionality, make sure that each element within ``pores1`` and ``pores2`` are of type list or ndarray. (3) It creates the connections in a format which is acceptable by the default OpenPNM connection ('throat.conns') and either adds them to the network or returns them. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> pn.Nt 300 >>> op.topotools.connect_pores(network=pn, pores1=[22, 32], ... pores2=[16, 80, 68]) >>> pn.Nt 306 >>> pn['throat.conns'][300:306] array([[16, 22], [22, 80], [22, 68], [16, 32], [32, 80], [32, 68]]) ''' # Assert that `pores1` and `pores2` are list of lists try: len(pores1[0]) except (TypeError, IndexError): pores1 = [pores1] try: len(pores2[0]) except (TypeError, IndexError): pores2 = [pores2] if len(pores1) != len(pores2): raise Exception('Running in batch mode! pores1 and pores2 must be' + \ ' of the same length.') arr1, arr2 = [], [] for ps1, ps2 in zip(pores1, pores2): size1 = sp.size(ps1) size2 = sp.size(ps2) arr1.append(sp.repeat(ps1, size2)) arr2.append(sp.tile(ps2, size1)) conns = sp.vstack([sp.concatenate(arr1), sp.concatenate(arr2)]).T if add_conns: extend(network=network, throat_conns=conns, labels=labels) else: return conns
r''' Returns the possible connections between two group of pores, and optionally makes the connections. See ``Notes`` for advanced usage. Parameters ---------- network : OpenPNM Network Object pores1 : array_like The first group of pores on the network pores2 : array_like The second group of pores on the network labels : list of strings The labels to apply to the new throats. This argument is only needed if ``add_conns`` is True. add_conns : bool Indicates whether the connections should be added to the supplied network (default is True). Otherwise, the connections are returned as an Nt x 2 array that can be passed directly to ``extend``. Notes ----- (1) The method also works if ``pores1`` and ``pores2`` are list of lists, in which case it consecutively connects corresponding members of the two lists in a 1-to-1 fashion. Example: pores1 = [[0, 1], [2, 3]] and pores2 = [[5], [7, 9]] leads to creation of the following connections: 0 --> 5 2 --> 7 3 --> 7 1 --> 5 2 --> 9 3 --> 9 (2) If you want to use the batch functionality, make sure that each element within ``pores1`` and ``pores2`` are of type list or ndarray. (3) It creates the connections in a format which is acceptable by the default OpenPNM connection ('throat.conns') and either adds them to the network or returns them. Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> pn.Nt 300 >>> op.topotools.connect_pores(network=pn, pores1=[22, 32], ... pores2=[16, 80, 68]) >>> pn.Nt 306 >>> pn['throat.conns'][300:306] array([[16, 22], [22, 80], [22, 68], [16, 32], [32, 80], [32, 68]])
def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None): """ Returns a description of each Auto Scaling instance in the instance_ids list. If a list is not provided, the service returns the full details of all instances up to a maximum of fifty. This action supports pagination by returning a token if there are more pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter. :type instance_ids: list :param instance_ids: List of Autoscaling Instance IDs which should be searched for. :type max_records: int :param max_records: Maximum number of results to return. :rtype: list :returns: List of :class:`boto.ec2.autoscale.activity.Activity` objects. """ params = {} if instance_ids: self.build_list_params(params, instance_ids, 'InstanceIds') if max_records: params['MaxRecords'] = max_records if next_token: params['NextToken'] = next_token return self.get_list('DescribeAutoScalingInstances', params, [('member', Instance)])
Returns a description of each Auto Scaling instance in the instance_ids list. If a list is not provided, the service returns the full details of all instances up to a maximum of fifty. This action supports pagination by returning a token if there are more pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter. :type instance_ids: list :param instance_ids: List of Autoscaling Instance IDs which should be searched for. :type max_records: int :param max_records: Maximum number of results to return. :rtype: list :returns: List of :class:`boto.ec2.autoscale.activity.Activity` objects.
def dump_tables_to_tskit(pop): """ Converts fwdpy11.TableCollection to an tskit.TreeSequence """ node_view = np.array(pop.tables.nodes, copy=True) node_view['time'] -= node_view['time'].max() node_view['time'][np.where(node_view['time'] != 0.0)[0]] *= -1.0 edge_view = np.array(pop.tables.edges, copy=False) mut_view = np.array(pop.tables.mutations, copy=False) tc = tskit.TableCollection(pop.tables.genome_length) # We must initialize population and individual # tables before we can do anything else. # Attempting to set population to anything # other than -1 in an tskit.NodeTable will # raise an exception if the PopulationTable # isn't set up. _initializePopulationTable(node_view, tc) node_to_individual = _initializeIndividualTable(pop, tc) individual = [-1 for i in range(len(node_view))] for k, v in node_to_individual.items(): individual[k] = v flags = [1]*2*pop.N + [0]*(len(node_view) - 2*pop.N) # Bug fixed in 0.3.1: add preserved nodes to samples list for i in pop.tables.preserved_nodes: flags[i] = 1 tc.nodes.set_columns(flags=flags, time=node_view['time'], population=node_view['population'], individual=individual) tc.edges.set_columns(left=edge_view['left'], right=edge_view['right'], parent=edge_view['parent'], child=edge_view['child']) mpos = np.array([pop.mutations[i].pos for i in mut_view['key']]) ancestral_state = np.zeros(len(mut_view), dtype=np.int8)+ord('0') ancestral_state_offset = np.arange(len(mut_view)+1, dtype=np.uint32) tc.sites.set_columns(position=mpos, ancestral_state=ancestral_state, ancestral_state_offset=ancestral_state_offset) derived_state = np.zeros(len(mut_view), dtype=np.int8)+ord('1') md, mdo = _generate_mutation_metadata(pop) tc.mutations.set_columns(site=np.arange(len(mpos), dtype=np.int32), node=mut_view['node'], derived_state=derived_state, derived_state_offset=ancestral_state_offset, metadata=md, metadata_offset=mdo) return tc.tree_sequence()
Converts fwdpy11.TableCollection to an tskit.TreeSequence
def entrez_batch_webhistory(record, expected, batchsize, *fnargs, **fnkwargs): """Recovers the Entrez data from a prior NCBI webhistory search, in batches of defined size, using Efetch. Returns all results as a list. - record: Entrez webhistory record - expected: number of expected search returns - batchsize: how many search returns to retrieve in a batch - *fnargs: arguments to Efetch - **fnkwargs: keyword arguments to Efetch """ results = [] for start in range(0, expected, batchsize): batch_handle = entrez_retry( Entrez.efetch, retstart=start, retmax=batchsize, webenv=record["WebEnv"], query_key=record["QueryKey"], *fnargs, **fnkwargs) batch_record = Entrez.read(batch_handle, validate=False) results.extend(batch_record) return results
Recovers the Entrez data from a prior NCBI webhistory search, in batches of defined size, using Efetch. Returns all results as a list. - record: Entrez webhistory record - expected: number of expected search returns - batchsize: how many search returns to retrieve in a batch - *fnargs: arguments to Efetch - **fnkwargs: keyword arguments to Efetch
def vnic_compose_empty(device=None): """ Compose empty vNIC for next attaching to a network :param device: <vim.vm.device.VirtualVmxnet3 or None> Device for this this 'spec' will be composed. If 'None' a new device will be composed. 'Operation' - edit/add' depends on if device existed :return: <vim.vm.device.VirtualDeviceSpec> """ nicspec = vim.vm.device.VirtualDeviceSpec() if device: nicspec.device = device nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit else: nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add nicspec.device = vim.vm.device.VirtualVmxnet3() nicspec.device.wakeOnLanEnabled = True nicspec.device.deviceInfo = vim.Description() nicspec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() nicspec.device.connectable.startConnected = True nicspec.device.connectable.allowGuestControl = True return nicspec
Compose empty vNIC for next attaching to a network :param device: <vim.vm.device.VirtualVmxnet3 or None> Device for this this 'spec' will be composed. If 'None' a new device will be composed. 'Operation' - edit/add' depends on if device existed :return: <vim.vm.device.VirtualDeviceSpec>
def _process_key_val(self, instance, key, val): ''' Logic to let the plugin instance process the redis key/val Split out for unit testing @param instance: the plugin instance @param key: the redis key @param val: the key value from redis ''' if instance.check_precondition(key, val): combined = '{k}:{v}'.format(k=key, v=val) self._increment_total_stat(combined) self._increment_plugin_stat( instance.__class__.__name__, combined) instance.handle(key, val) self.redis_conn.delete(key) failkey = self._get_fail_key(key) if self.redis_conn.exists(failkey): self.redis_conn.delete(failkey)
Logic to let the plugin instance process the redis key/val Split out for unit testing @param instance: the plugin instance @param key: the redis key @param val: the key value from redis
def get_package_version(self): """ Get the version of the package :return: """ output = subprocess.check_output([ '{}'.format(self.python), 'setup.py', '--version', ]).decode() return output.rstrip()
Get the version of the package :return:
def _to_args(x): """Convert to args representation""" if not isinstance(x, (list, tuple, np.ndarray)): x = [x] return x
Convert to args representation
def precision_recall(y_true, y_score, ax=None): """ Plot precision-recall curve. Parameters ---------- y_true : array-like, shape = [n_samples] Correct target values (ground truth). y_score : array-like, shape = [n_samples] or [n_samples, 2] for binary classification or [n_samples, n_classes] for multiclass Target scores (estimator predictions). ax : matplotlib Axes Axes object to draw the plot onto, otherwise uses current Axes Notes ----- It is assumed that the y_score parameter columns are in order. For example, if ``y_true = [2, 2, 1, 0, 0, 1, 2]``, then the first column in y_score must countain the scores for class 0, second column for class 1 and so on. Returns ------- ax: matplotlib Axes Axes containing the plot Examples -------- .. plot:: ../../examples/precision_recall.py """ if any((val is None for val in (y_true, y_score))): raise ValueError('y_true and y_score are needed to plot ' 'Precision-Recall') if ax is None: ax = plt.gca() # get the number of classes from y_score y_score_is_vector = is_column_vector(y_score) or is_row_vector(y_score) if y_score_is_vector: n_classes = 2 else: _, n_classes = y_score.shape # check data shape? if n_classes > 2: # convert y_true to binary format y_true_bin = label_binarize(y_true, classes=np.unique(y_true)) _precision_recall_multi(y_true_bin, y_score, ax=ax) for i in range(n_classes): _precision_recall(y_true_bin[:, i], y_score[:, i], ax=ax) else: if y_score_is_vector: _precision_recall(y_true, y_score, ax) else: _precision_recall(y_true, y_score[:, 1], ax) # raise error if n_classes = 1? return ax
Plot precision-recall curve. Parameters ---------- y_true : array-like, shape = [n_samples] Correct target values (ground truth). y_score : array-like, shape = [n_samples] or [n_samples, 2] for binary classification or [n_samples, n_classes] for multiclass Target scores (estimator predictions). ax : matplotlib Axes Axes object to draw the plot onto, otherwise uses current Axes Notes ----- It is assumed that the y_score parameter columns are in order. For example, if ``y_true = [2, 2, 1, 0, 0, 1, 2]``, then the first column in y_score must countain the scores for class 0, second column for class 1 and so on. Returns ------- ax: matplotlib Axes Axes containing the plot Examples -------- .. plot:: ../../examples/precision_recall.py
def bind( self, server_name, script_name=None, subdomain=None, url_scheme="http", default_method="GET", path_info=None, query_args=None, ): """Return a new :class:`MapAdapter` with the details specified to the call. Note that `script_name` will default to ``'/'`` if not further specified or `None`. The `server_name` at least is a requirement because the HTTP RFC requires absolute URLs for redirects and so all redirect exceptions raised by Werkzeug will contain the full canonical URL. If no path_info is passed to :meth:`match` it will use the default path info passed to bind. While this doesn't really make sense for manual bind calls, it's useful if you bind a map to a WSGI environment which already contains the path info. `subdomain` will default to the `default_subdomain` for this map if no defined. If there is no `default_subdomain` you cannot use the subdomain feature. .. versionadded:: 0.7 `query_args` added .. versionadded:: 0.8 `query_args` can now also be a string. .. versionchanged:: 0.15 ``path_info`` defaults to ``'/'`` if ``None``. """ server_name = server_name.lower() if self.host_matching: if subdomain is not None: raise RuntimeError("host matching enabled and a subdomain was provided") elif subdomain is None: subdomain = self.default_subdomain if script_name is None: script_name = "/" if path_info is None: path_info = "/" try: server_name = _encode_idna(server_name) except UnicodeError: raise BadHost() return MapAdapter( self, server_name, script_name, subdomain, url_scheme, path_info, default_method, query_args, )
Return a new :class:`MapAdapter` with the details specified to the call. Note that `script_name` will default to ``'/'`` if not further specified or `None`. The `server_name` at least is a requirement because the HTTP RFC requires absolute URLs for redirects and so all redirect exceptions raised by Werkzeug will contain the full canonical URL. If no path_info is passed to :meth:`match` it will use the default path info passed to bind. While this doesn't really make sense for manual bind calls, it's useful if you bind a map to a WSGI environment which already contains the path info. `subdomain` will default to the `default_subdomain` for this map if no defined. If there is no `default_subdomain` you cannot use the subdomain feature. .. versionadded:: 0.7 `query_args` added .. versionadded:: 0.8 `query_args` can now also be a string. .. versionchanged:: 0.15 ``path_info`` defaults to ``'/'`` if ``None``.
def dispatch_request(self): """Modified version of Flask.dispatch_request to call process_view.""" req = _request_ctx_stack.top.request app = current_app # Return flask's default options response. See issue #40 if req.method == "OPTIONS": return app.make_default_options_response() if req.routing_exception is not None: app.raise_routing_exception(req) # The routing rule has some handy attributes to extract how Flask found # this endpoint rule = req.url_rule # Wrap the real view_func view_func = self.wrap_view_func( app, rule, req, app.view_functions[rule.endpoint], req.view_args ) return view_func(**req.view_args)
Modified version of Flask.dispatch_request to call process_view.
def autoencoder_range(rhp): """Tuning grid of the main autoencoder params.""" rhp.set_float("dropout", 0.01, 0.3) rhp.set_float("gan_loss_factor", 0.01, 0.1) rhp.set_float("bottleneck_l2_factor", 0.001, 0.1, scale=rhp.LOG_SCALE) rhp.set_discrete("bottleneck_warmup_steps", [200, 2000]) rhp.set_float("gumbel_temperature", 0, 1) rhp.set_float("gumbel_noise_factor", 0, 0.5)
Tuning grid of the main autoencoder params.
def array_from_nested_dictionary( nested_dict, array_fn, dtype="float32", square_result=False): """ Parameters ---------- nested_dict : dict Dictionary which contains dictionaries array_fn : function Takes shape and dtype as arguments, returns empty array. dtype : dtype NumPy dtype of result array square_result : bool Combine keys from outer and inner dictionaries. Returns array and sorted lists of the outer and inner keys. """ if square_result: outer_key_indices = inner_key_indices = flattened_nested_key_indices( nested_dict) else: outer_key_indices, inner_key_indices = nested_key_indices( nested_dict) n_rows = len(outer_key_indices) n_cols = len(inner_key_indices) shape = (n_rows, n_cols) result = array_fn(shape, dtype) for outer_key, sub_dictionary in nested_dict.items(): i = outer_key_indices[outer_key] for inner_key, value in sub_dictionary.items(): j = inner_key_indices[inner_key] result[i, j] = value outer_key_list = index_dict_to_sorted_list(outer_key_indices) inner_key_list = index_dict_to_sorted_list(inner_key_indices) return result, outer_key_list, inner_key_list
Parameters ---------- nested_dict : dict Dictionary which contains dictionaries array_fn : function Takes shape and dtype as arguments, returns empty array. dtype : dtype NumPy dtype of result array square_result : bool Combine keys from outer and inner dictionaries. Returns array and sorted lists of the outer and inner keys.
def init(): """Execute init tasks for all components (virtualenv, pip).""" if not os.path.isdir('venv'): print(cyan('\nCreating the virtual env...')) local('pyvenv-3.4 venv') print(green('Virtual env created.')) print(green('Virtual Environment ready.'))
Execute init tasks for all components (virtualenv, pip).
def post(request): """Returns a serialized object""" data = request.POST or json.loads(request.body)['body'] guid = data.get('guid', None) res = Result() if guid: obj = getObjectsFromGuids([guid,])[0] comment = Comment() comment.comment = data.get('comment', 'No comment') comment.user = request.user comment.user_name = request.user.get_full_name() comment.user_email = request.user.email comment.content_object = obj # For our purposes, we never have more than one site comment.site_id = 1 comment.save() obj.comment_count += 1 obj.save() emailComment(comment, obj, request) res.append(commentToJson(comment)) return JsonResponse(res.asDict())
Returns a serialized object
def get_interface_name(): """ Returns the interface name of the first not link_local and not loopback interface. """ interface_name = '' interfaces = psutil.net_if_addrs() for name, details in interfaces.items(): for detail in details: if detail.family == socket.AF_INET: ip_address = ipaddress.ip_address(detail.address) if not (ip_address.is_link_local or ip_address.is_loopback): interface_name = name break return interface_name
Returns the interface name of the first not link_local and not loopback interface.
def get_content_of_file(self, project, repository, filename, at=None, markup=None): """ Retrieve the raw content for a file path at a specified revision. The authenticated user must have REPO_READ permission for the specified repository to call this resource. :param project: :param repository: :param filename: :param at: OPTIONAL ref string :param markup: if present or "true", triggers the raw content to be markup-rendered and returned as HTML; otherwise, if not specified, or any value other than "true", the content is streamed without markup :return: """ headers = self.form_token_headers url = 'projects/{project}/repos/{repository}/raw/{filename}/'.format(project=project, repository=repository, filename=filename) params = {} if at is not None: params['at'] = at if markup is not None: params['markup'] = markup return self.get(url, params=params, not_json_response=True, headers=headers)
Retrieve the raw content for a file path at a specified revision. The authenticated user must have REPO_READ permission for the specified repository to call this resource. :param project: :param repository: :param filename: :param at: OPTIONAL ref string :param markup: if present or "true", triggers the raw content to be markup-rendered and returned as HTML; otherwise, if not specified, or any value other than "true", the content is streamed without markup :return:
def register_identity(self, id_stmt): """Register `id_stmt` with its base identity, if any. """ bst = id_stmt.search_one("base") if bst: bder = self.identity_deps.setdefault(bst.i_identity, []) bder.append(id_stmt)
Register `id_stmt` with its base identity, if any.
def auth(self, auth_key): """ Performs the initial authentication on connect """ if self._socket is None: self._socket = self._connect() return self.call('auth', {"AuthKey": auth_key}, expect_body=False)
Performs the initial authentication on connect
def extract_zipdir(zip_file): """ Extract contents of zip file into subfolder in parent directory. Parameters ---------- zip_file : str Path to zip file Returns ------- str : folder where the zip was extracted """ if not os.path.exists(zip_file): raise ValueError('{} does not exist'.format(zip_file)) directory = os.path.dirname(zip_file) filename = os.path.basename(zip_file) dirpath = os.path.join(directory, filename.replace('.zip', '')) with zipfile.ZipFile(zip_file, 'r', zipfile.ZIP_DEFLATED) as zipf: zipf.extractall(dirpath) return dirpath
Extract contents of zip file into subfolder in parent directory. Parameters ---------- zip_file : str Path to zip file Returns ------- str : folder where the zip was extracted
def remove(name=None, pkgs=None, purge=False, **kwargs): ''' Remove a single package with pkg_delete Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = [x.split('--')[0] for x in __salt__['pkg_resource.parse_targets'](name, pkgs)[0]] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = ['pkg_delete', '-Ix', '-Ddependencies'] if purge: cmd.append('-cqq') cmd.extend(targets) out = __salt__['cmd.run_all']( cmd, python_shell=False, output_loglevel='trace' ) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret
Remove a single package with pkg_delete Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]'
def streaming_market_data_filter(fields=None, ladder_levels=None): """ :param list fields: EX_BEST_OFFERS_DISP, EX_BEST_OFFERS, EX_ALL_OFFERS, EX_TRADED, EX_TRADED_VOL, EX_LTP, EX_MARKET_DEF, SP_TRADED, SP_PROJECTED :param int ladder_levels: 1->10 :return: dict """ args = locals() return { to_camel_case(k): v for k, v in args.items() if v is not None }
:param list fields: EX_BEST_OFFERS_DISP, EX_BEST_OFFERS, EX_ALL_OFFERS, EX_TRADED, EX_TRADED_VOL, EX_LTP, EX_MARKET_DEF, SP_TRADED, SP_PROJECTED :param int ladder_levels: 1->10 :return: dict
def _predicted(self): """The predicted values of y ('yhat').""" return np.squeeze( np.matmul(self.xwins, np.expand_dims(self.solution, axis=-1)) )
The predicted values of y ('yhat').
def get_apps_json(self, url, timeout, auth, acs_url, ssl_verify, tags, group): """ The dictionary containing the apps is cached during collection and reset at every `check()` call. """ if self.apps_response is not None: return self.apps_response # Marathon apps if group is None: # embed=apps.counts is not a required parameter but will be in the future: # http://mesosphere.github.io/marathon/1.4/docs/rest-api.html#get-v2apps marathon_path = urljoin(url, "v2/apps?embed=apps.counts") else: marathon_path = urljoin( url, "v2/groups/{}?embed=group.groups".format(group) + "&embed=group.apps&embed=group.apps.counts" ) self.apps_response = self.get_json(marathon_path, timeout, auth, acs_url, ssl_verify, tags) return self.apps_response
The dictionary containing the apps is cached during collection and reset at every `check()` call.
def horiz_string(*args, **kwargs): """ Horizontally concatenates strings reprs preserving indentation Concats a list of objects ensuring that the next item in the list is all the way to the right of any previous items. Args: *args: list of strings to concat **kwargs: precision, sep CommandLine: python -m utool.util_str --test-horiz_string Example1: >>> # ENABLE_DOCTEST >>> # Pretty printing of matrices demo / test >>> import utool >>> import numpy as np >>> # Wouldn't it be nice if we could print this operation easily? >>> B = np.array(((1, 2), (3, 4))) >>> C = np.array(((5, 6), (7, 8))) >>> A = B.dot(C) >>> # Eg 1: >>> result = (utool.hz_str('A = ', A, ' = ', B, ' * ', C)) >>> print(result) A = [[19 22] = [[1 2] * [[5 6] [43 50]] [3 4]] [7 8]] Exam2: >>> # Eg 2: >>> str_list = ['A = ', str(B), ' * ', str(C)] >>> horizstr = (utool.horiz_string(*str_list)) >>> result = (horizstr) >>> print(result) A = [[1 2] * [[5 6] [3 4]] [7 8]] """ import unicodedata precision = kwargs.get('precision', None) sep = kwargs.get('sep', '') if len(args) == 1 and not isinstance(args[0], six.string_types): val_list = args[0] else: val_list = args val_list = [unicodedata.normalize('NFC', ensure_unicode(val)) for val in val_list] all_lines = [] hpos = 0 # for each value in the list or args for sx in range(len(val_list)): # Ensure value is a string val = val_list[sx] str_ = None if precision is not None: # Hack in numpy precision if util_type.HAVE_NUMPY: try: if isinstance(val, np.ndarray): str_ = np.array_str(val, precision=precision, suppress_small=True) except ImportError: pass if str_ is None: str_ = six.text_type(val_list[sx]) # continue with formating lines = str_.split('\n') line_diff = len(lines) - len(all_lines) # Vertical padding if line_diff > 0: all_lines += [' ' * hpos] * line_diff # Add strings for lx, line in enumerate(lines): all_lines[lx] += line hpos = max(hpos, len(all_lines[lx])) # Horizontal padding for lx in range(len(all_lines)): hpos_diff = hpos - len(all_lines[lx]) all_lines[lx] += ' ' * hpos_diff + sep all_lines = [line.rstrip(' ') for line in all_lines] ret = '\n'.join(all_lines) return ret
Horizontally concatenates strings reprs preserving indentation Concats a list of objects ensuring that the next item in the list is all the way to the right of any previous items. Args: *args: list of strings to concat **kwargs: precision, sep CommandLine: python -m utool.util_str --test-horiz_string Example1: >>> # ENABLE_DOCTEST >>> # Pretty printing of matrices demo / test >>> import utool >>> import numpy as np >>> # Wouldn't it be nice if we could print this operation easily? >>> B = np.array(((1, 2), (3, 4))) >>> C = np.array(((5, 6), (7, 8))) >>> A = B.dot(C) >>> # Eg 1: >>> result = (utool.hz_str('A = ', A, ' = ', B, ' * ', C)) >>> print(result) A = [[19 22] = [[1 2] * [[5 6] [43 50]] [3 4]] [7 8]] Exam2: >>> # Eg 2: >>> str_list = ['A = ', str(B), ' * ', str(C)] >>> horizstr = (utool.horiz_string(*str_list)) >>> result = (horizstr) >>> print(result) A = [[1 2] * [[5 6] [3 4]] [7 8]]
def thread_function(self): """Thread function.""" self.__subscribed = True url = SUBSCRIBE_ENDPOINT + "?token=" + self._session_token data = self._session.query(url, method='GET', raw=True, stream=True) if not data or not data.ok: _LOGGER.debug("Did not receive a valid response. Aborting..") return None self.__sseclient = sseclient.SSEClient(data) try: for event in (self.__sseclient).events(): if not self.__subscribed: break data = json.loads(event.data) if data.get('status') == "connected": _LOGGER.debug("Successfully subscribed this base station") elif data.get('action'): action = data.get('action') resource = data.get('resource') if action == "logout": _LOGGER.debug("Logged out by some other entity") self.__subscribed = False break elif action == "is" and "subscriptions/" not in resource: self.__events.append(data) self.__event_handle.set() except TypeError as error: _LOGGER.debug("Got unexpected error: %s", error) return None return True
Thread function.
def set_base_headers(self, hdr): """Set metadata in FITS headers.""" hdr['NUMXVER'] = (__version__, 'Numina package version') hdr['NUMRNAM'] = (self.__class__.__name__, 'Numina recipe name') hdr['NUMRVER'] = (self.__version__, 'Numina recipe version') return hdr
Set metadata in FITS headers.
def removeThing(self, thingTypeId, thingId): """ Delete an existing thing. It accepts thingTypeId (string) and thingId (string) as parameters In case of failure it throws APIException """ thingUrl = ApiClient.thingUrl % (self.host, thingTypeId, thingId) r = requests.delete(thingUrl, auth=self.credentials, verify=self.verify) status = r.status_code if status == 204: self.logger.debug("Thing was successfully removed") return True elif status == 401: raise ibmiotf.APIException(401, "The authentication token is empty or invalid", None) elif status == 403: raise ibmiotf.APIException(403, "The authentication method is invalid or the api key used does not exist", None) elif status == 404: raise ibmiotf.APIException(404, "A thing type or thing instance with the specified id does not exist.", None) elif status == 409: raise ibmiotf.APIException(409, "The thing instance is aggregated into another thing instance.", None) elif status == 500: raise ibmiotf.APIException(500, "Unexpected error", None) else: raise ibmiotf.APIException(None, "Unexpected error", None)
Delete an existing thing. It accepts thingTypeId (string) and thingId (string) as parameters In case of failure it throws APIException
def enc_setup(self, enc_alg, msg, auth_data=b'', key=None, iv=""): """ Encrypt JWE content. :param enc_alg: The JWE "enc" value specifying the encryption algorithm :param msg: The plain text message :param auth_data: Additional authenticated data :param key: Key (CEK) :return: Tuple (ciphertext, tag), both as bytes """ iv = self._generate_iv(enc_alg, iv) if enc_alg in ["A192GCM", "A128GCM", "A256GCM"]: aes = AES_GCMEncrypter(key=key) ctx, tag = split_ctx_and_tag(aes.encrypt(msg, iv, auth_data)) elif enc_alg in ["A128CBC-HS256", "A192CBC-HS384", "A256CBC-HS512"]: aes = AES_CBCEncrypter(key=key) ctx, tag = aes.encrypt(msg, iv, auth_data) else: raise NotSupportedAlgorithm(enc_alg) return ctx, tag, aes.key
Encrypt JWE content. :param enc_alg: The JWE "enc" value specifying the encryption algorithm :param msg: The plain text message :param auth_data: Additional authenticated data :param key: Key (CEK) :return: Tuple (ciphertext, tag), both as bytes
def create(cls, zmq_context, endpoint): """Create new server transport. Instead of creating the socket yourself, you can call this function and merely pass the :py:class:`zmq.core.context.Context` instance. By passing a context imported from :py:mod:`zmq.green`, you can use green (gevent) 0mq sockets as well. :param zmq_context: A 0mq context. :param endpoint: The endpoint clients will connect to. """ socket = zmq_context.socket(zmq.ROUTER) socket.bind(endpoint) return cls(socket)
Create new server transport. Instead of creating the socket yourself, you can call this function and merely pass the :py:class:`zmq.core.context.Context` instance. By passing a context imported from :py:mod:`zmq.green`, you can use green (gevent) 0mq sockets as well. :param zmq_context: A 0mq context. :param endpoint: The endpoint clients will connect to.
def formfield_for_dbfield(self, db_field, **kwargs): """ Allow formfield_overrides to contain field names too. """ overrides = self.formfield_overrides.get(db_field.name) if overrides: kwargs.update(overrides) field = super(AbstractEntryBaseAdmin, self).formfield_for_dbfield(db_field, **kwargs) # Pass user to the form. if db_field.name == 'author': field.user = kwargs['request'].user return field
Allow formfield_overrides to contain field names too.
def email_action_view(self, id, action): """ Perform action 'action' on UserEmail object 'id' """ # Retrieve UserEmail by id user_email = self.db_manager.get_user_email_by_id(id=id) # Users may only change their own UserEmails if not user_email or user_email.user_id != current_user.id: return self.unauthorized_view() # Delete UserEmail if action == 'delete': # Primary UserEmail can not be deleted if user_email.is_primary: return self.unauthorized_view() # Delete UserEmail self.db_manager.delete_object(user_email) self.db_manager.commit() # Set UserEmail.is_primary elif action == 'make-primary': # Disable previously primary emails user_emails = self.db_manager.find_user_emails(current_user) for other_user_email in user_emails: if other_user_email.is_primary: other_user_email.is_primary=False self.db_manager.save_object(other_user_email) # Enable current primary email user_email.is_primary=True self.db_manager.save_object(user_email) self.db_manager.commit() # Send confirm email elif action == 'confirm': self._send_confirm_email_email(user_email.user, user_email) else: return self.unauthorized_view() return redirect(url_for('user.manage_emails'))
Perform action 'action' on UserEmail object 'id'
def read_table(self): """ Read an AMQP table, and return as a Python dictionary. """ self.bitcount = self.bits = 0 tlen = unpack('>I', self.input.read(4))[0] table_data = AMQPReader(self.input.read(tlen)) result = {} while table_data.input.tell() < tlen: name = table_data.read_shortstr() ftype = ord(table_data.input.read(1)) if ftype == 83: # 'S' val = table_data.read_longstr() elif ftype == 73: # 'I' val = unpack('>i', table_data.input.read(4))[0] elif ftype == 68: # 'D' d = table_data.read_octet() n = unpack('>i', table_data.input.read(4))[0] val = Decimal(n) / Decimal(10 ** d) elif ftype == 84: # 'T' val = table_data.read_timestamp() elif ftype == 70: # 'F' val = table_data.read_table() # recurse else: raise ValueError('Unknown table item type: %s' % repr(ftype)) result[name] = val return result
Read an AMQP table, and return as a Python dictionary.