code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _expand_scheduledict(scheduledict): """Converts a dict of items, some of which are scalar and some of which are lists, to a list of dicts with scalar items.""" result = [] def f(d): nonlocal result #print(d) d2 = {} for k,v in d.items(): if isinstance(v, str) and _cronslash(v, k) is not None: d[k] = _cronslash(v, k) for k,v in d.items(): if isinstance(v, Iterable): continue else: d2[k] = v if len(d2.keys()) == len(d.keys()): result.append(d2) return for k,v in d.items(): if isinstance(v, Iterable): for i in v: dprime = dict(**d) dprime[k] = i f(dprime) break f(scheduledict) return result
Converts a dict of items, some of which are scalar and some of which are lists, to a list of dicts with scalar items.
def get_ecf_props(ep_id, ep_id_ns, rsvc_id=None, ep_ts=None): """ Prepares the ECF properties :param ep_id: Endpoint ID :param ep_id_ns: Namespace of the Endpoint ID :param rsvc_id: Remote service ID :param ep_ts: Timestamp of the endpoint :return: A dictionary of ECF properties """ results = {} if not ep_id: raise ArgumentError("ep_id", "ep_id must be a valid endpoint id") results[ECF_ENDPOINT_ID] = ep_id if not ep_id_ns: raise ArgumentError("ep_id_ns", "ep_id_ns must be a valid namespace") results[ECF_ENDPOINT_CONTAINERID_NAMESPACE] = ep_id_ns if not rsvc_id: rsvc_id = get_next_rsid() results[ECF_RSVC_ID] = rsvc_id if not ep_ts: ep_ts = time_since_epoch() results[ECF_ENDPOINT_TIMESTAMP] = ep_ts return results
Prepares the ECF properties :param ep_id: Endpoint ID :param ep_id_ns: Namespace of the Endpoint ID :param rsvc_id: Remote service ID :param ep_ts: Timestamp of the endpoint :return: A dictionary of ECF properties
def inspect(self, tab_width=2, ident_char='-'): """ Inspects a project file structure based based on the instance folder property. :param tab_width: width size for subfolders and files. :param ident_char: char to be used to show identation level Returns A string containing the project structure. """ startpath = self.path output = [] for (root, dirs, files) in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent = ident_char * tab_width * (level) if level == 0: output.append('{}{}/'.format(indent, os.path.basename(root))) else: output.append('|{}{}/'.format(indent, os.path.basename(root))) subindent = ident_char * tab_width * (level + 1) [output.append('|{}{}'.format(subindent, f)) for f in files] return '\n'.join(output)
Inspects a project file structure based based on the instance folder property. :param tab_width: width size for subfolders and files. :param ident_char: char to be used to show identation level Returns A string containing the project structure.
def hclust_linearize(U): """Sorts the rows of a matrix by hierarchical clustering. Parameters: U (ndarray) : matrix of data Returns: prm (ndarray) : permutation of the rows """ from scipy.cluster import hierarchy Z = hierarchy.ward(U) return hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, U))
Sorts the rows of a matrix by hierarchical clustering. Parameters: U (ndarray) : matrix of data Returns: prm (ndarray) : permutation of the rows
def calc_surfdist(surface, labels, annot, reg, origin, target): import nibabel as nib import numpy as np import os from surfdist import load, utils, surfdist import csv """ inputs: surface - surface file (e.g. lh.pial, with full path) labels - label file (e.g. lh.cortex.label, with full path) annot - annot file (e.g. lh.aparc.a2009s.annot, with full path) reg - registration file (lh.sphere.reg) origin - the label from which we calculate distances target - target surface (e.g. fsaverage4) """ # Load stuff surf = nib.freesurfer.read_geometry(surface) cort = np.sort(nib.freesurfer.read_label(labels)) src = load.load_freesurfer_label(annot, origin, cort) # Calculate distances dist = surfdist.dist_calc(surf, cort, src) # Project distances to target trg = nib.freesurfer.read_geometry(target)[0] native = nib.freesurfer.read_geometry(reg)[0] idx_trg_to_native = utils.find_node_match(trg, native)[0] # Get indices in trg space distt = dist[idx_trg_to_native] # Write to file and return file handle filename = os.path.join(os.getcwd(),'distances.csv') distt.tofile(filename,sep=",") return filename
inputs: surface - surface file (e.g. lh.pial, with full path) labels - label file (e.g. lh.cortex.label, with full path) annot - annot file (e.g. lh.aparc.a2009s.annot, with full path) reg - registration file (lh.sphere.reg) origin - the label from which we calculate distances target - target surface (e.g. fsaverage4)
def signOp(self, op: Dict, identifier: Identifier=None) -> Request: """ Signs the message if a signer is configured :param identifier: signing identifier; if not supplied the default for the wallet is used. :param op: Operation to be signed :return: a signed Request object """ request = Request(operation=op, protocolVersion=CURRENT_PROTOCOL_VERSION) return self.signRequest(request, identifier)
Signs the message if a signer is configured :param identifier: signing identifier; if not supplied the default for the wallet is used. :param op: Operation to be signed :return: a signed Request object
def add_wic(self, old_wic, wic): """ Convert the old style WIC slot to a new style WIC slot and add the WIC to the node properties :param str old_wic: Old WIC slot :param str wic: WIC name """ new_wic = 'wic' + old_wic[-1] self.node['properties'][new_wic] = wic
Convert the old style WIC slot to a new style WIC slot and add the WIC to the node properties :param str old_wic: Old WIC slot :param str wic: WIC name
def ckchol(M): """ CKCHOL This function computes the Cholesky decomposition of the matrix if it's positive-definite; else it returns the identity matrix. It was written to handle the "matrix must be positive definite" error in linalg.cholesky. Version: 2011may03 """ from numpy import linalg, matrix, eye, size try: # First, try the Cholesky decomposition output=linalg.cholesky(M) except: # If not, just return garbage print('WARNING: Cholesky failed, so returning (invalid) identity matrix!') output=matrix(eye(size(M,0))) return output
CKCHOL This function computes the Cholesky decomposition of the matrix if it's positive-definite; else it returns the identity matrix. It was written to handle the "matrix must be positive definite" error in linalg.cholesky. Version: 2011may03
def set_monitoring_transaction_name(name, group=None, priority=None): """ Sets the transaction name for monitoring. This is not cached, and only support reporting to New Relic. """ if not newrelic: return newrelic.agent.set_transaction_name(name, group, priority)
Sets the transaction name for monitoring. This is not cached, and only support reporting to New Relic.
def spoolable(*, pre_condition=True, body_params=()): """ Decorates a function to make it spoolable using uWSGI, but if no spooling mechanism is available, the function is called synchronously. All decorated function arguments must be picklable and the first annotated with `Context` will receive an object that defines the current execution state. Return values are always ignored and all exceptions are caught in spooled mode. :param pre_condition: additional condition needed to use spooler :param body_params: parameter names that can have large values and should use spooler body """ def decorator(func): context_name = None keyword_kinds = {inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY} invalid_body_params = set(body_params) for name, parameter in inspect.signature(func).parameters.items(): if parameter.kind not in keyword_kinds: continue if not context_name and parameter.annotation is Context: context_name = name elif name in invalid_body_params: invalid_body_params.remove(name) if invalid_body_params: raise TypeError('Spoolable task body_params must be keyword arguments') task = Task(func, context_name=context_name, pre_condition=pre_condition, body_params=body_params) spooler.register(task) return task return decorator
Decorates a function to make it spoolable using uWSGI, but if no spooling mechanism is available, the function is called synchronously. All decorated function arguments must be picklable and the first annotated with `Context` will receive an object that defines the current execution state. Return values are always ignored and all exceptions are caught in spooled mode. :param pre_condition: additional condition needed to use spooler :param body_params: parameter names that can have large values and should use spooler body
def _advance_window(self): """Update values in current window and the current window means and variances.""" x_to_remove, y_to_remove = self._x_in_window[0], self._y_in_window[0] self._window_bound_lower += 1 self._update_values_in_window() x_to_add, y_to_add = self._x_in_window[-1], self._y_in_window[-1] self._remove_observation(x_to_remove, y_to_remove) self._add_observation(x_to_add, y_to_add)
Update values in current window and the current window means and variances.
def add_stack_frame(self, stack_frame): """Add StackFrame to frames list.""" if len(self.stack_frames) >= MAX_FRAMES: self.dropped_frames_count += 1 else: self.stack_frames.append(stack_frame.format_stack_frame_json())
Add StackFrame to frames list.
def get_insertions(aln_df): """Get a list of tuples indicating the first and last residues of a insertion region, as well as the length of the insertion. If the first tuple is: (-1, 1) that means the insertion is at the beginning of the original protein (X, Inf) where X is the length of the original protein, that means the insertion is at the end of the protein Examples: # Insertion at beginning, length 3 >>> test = {'id_a': {0: 'a', 1: 'a', 2: 'a', 3: 'a'}, 'id_a_aa': {0: np.nan, 1: np.nan, 2: np.nan, 3: 'M'}, 'id_a_pos': {0: np.nan, 1: np.nan, 2: np.nan, 3: 1.0}, 'id_b': {0: 'b', 1: 'b', 2: 'b', 3: 'b'}, 'id_b_aa': {0: 'M', 1: 'M', 2: 'L', 3: 'M'}, 'id_b_pos': {0: 1, 1: 2, 2: 3, 3: 4}, 'type': {0: 'insertion', 1: 'insertion', 2: 'insertion', 3: 'match'}} >>> my_alignment = pd.DataFrame.from_dict(test) >>> get_insertions(my_alignment) [((-1, 1.0), 3)] Args: aln_df (DataFrame): Alignment DataFrame Returns: list: A list of tuples with the format ((insertion_start_resnum, insertion_end_resnum), insertion_length) """ insertion_df = aln_df[aln_df['type'] == 'insertion'] # if not insertion_df.empty: # don't need to do this for insertions # insertion_df['id_a_pos'] = insertion_df['id_a_pos'].astype(int) insertions = [] for k, g in groupby(insertion_df.index, key=lambda n, c=count(): n - next(c)): tmp = list(g) insertion_indices = (min(tmp), max(tmp)) insertion_start = insertion_indices[0] - 1 insertion_end = insertion_indices[1] + 1 # Checking if insertion is at the beginning or end if insertion_start < 0: insertion_start = insertion_indices[0] insertion_length = insertion_end - insertion_start elif insertion_end >= len(aln_df): insertion_end = insertion_indices[1] insertion_length = insertion_end - insertion_start else: insertion_length = insertion_end - insertion_start - 1 id_a_pos_insertion_start = aln_df.ix[insertion_start].id_a_pos id_a_pos_insertion_end = aln_df.ix[insertion_end].id_a_pos # Checking if insertion is at the beginning or end if np.isnan(id_a_pos_insertion_start) and id_a_pos_insertion_end == 1: insertion_region = (-1, id_a_pos_insertion_end) elif np.isnan(id_a_pos_insertion_end): insertion_region = (id_a_pos_insertion_start, float('Inf')) else: insertion_region = (id_a_pos_insertion_start, id_a_pos_insertion_end) # Logging where the insertion is if insertion_region[0] == -1: log.debug('Insertion of length {} at beginning'.format(insertion_length)) elif insertion_region[1] == float('Inf'): log.debug('Insertion of length {} at end'.format(insertion_length)) else: log.debug('Insertion of length {} at residues {}'.format(insertion_length, insertion_region)) to_append = (insertion_region, insertion_length) insertions.append(to_append) return insertions
Get a list of tuples indicating the first and last residues of a insertion region, as well as the length of the insertion. If the first tuple is: (-1, 1) that means the insertion is at the beginning of the original protein (X, Inf) where X is the length of the original protein, that means the insertion is at the end of the protein Examples: # Insertion at beginning, length 3 >>> test = {'id_a': {0: 'a', 1: 'a', 2: 'a', 3: 'a'}, 'id_a_aa': {0: np.nan, 1: np.nan, 2: np.nan, 3: 'M'}, 'id_a_pos': {0: np.nan, 1: np.nan, 2: np.nan, 3: 1.0}, 'id_b': {0: 'b', 1: 'b', 2: 'b', 3: 'b'}, 'id_b_aa': {0: 'M', 1: 'M', 2: 'L', 3: 'M'}, 'id_b_pos': {0: 1, 1: 2, 2: 3, 3: 4}, 'type': {0: 'insertion', 1: 'insertion', 2: 'insertion', 3: 'match'}} >>> my_alignment = pd.DataFrame.from_dict(test) >>> get_insertions(my_alignment) [((-1, 1.0), 3)] Args: aln_df (DataFrame): Alignment DataFrame Returns: list: A list of tuples with the format ((insertion_start_resnum, insertion_end_resnum), insertion_length)
def on_update(self, value, *args, **kwargs): """ Inform the parent of progress. :param value: The value of this subprogresscallback :param args: Extra positional arguments :param kwargs: Extra keyword arguments """ parent_value = self._parent_min if self._max != self._min: sub_progress = (value - self._min) / (self._max - self._min) parent_value = self._parent_min + sub_progress * (self._parent_max - self._parent_min) self._parent.update(parent_value, *args, **kwargs)
Inform the parent of progress. :param value: The value of this subprogresscallback :param args: Extra positional arguments :param kwargs: Extra keyword arguments
def getExperimentDescriptionInterfaceFromModule(module): """ :param module: imported description.py module :returns: (:class:`nupic.frameworks.opf.exp_description_api.DescriptionIface`) represents the experiment description """ result = module.descriptionInterface assert isinstance(result, exp_description_api.DescriptionIface), \ "expected DescriptionIface-based instance, but got %s" % type(result) return result
:param module: imported description.py module :returns: (:class:`nupic.frameworks.opf.exp_description_api.DescriptionIface`) represents the experiment description
def colorize(text='', opts=(), **kwargs): """ Returns your text, enclosed in ANSI graphics codes. Depends on the keyword arguments 'fg' and 'bg', and the contents of the opts tuple/list. Returns the RESET code if no parameters are given. Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold' 'underscore' 'blink' 'reverse' 'conceal' 'noreset' - string will not be auto-terminated with the RESET code Examples: colorize('hello', fg='red', bg='blue', opts=('blink',)) colorize() colorize('goodbye', opts=('underscore',)) print colorize('first line', fg='red', opts=('noreset',)) print 'this should be red too' print colorize('and so should this') print 'this should not be red' """ code_list = [] if text == '' and len(opts) == 1 and opts[0] == 'reset': return '\x1b[%sm' % RESET for k, v in kwargs.iteritems(): if k == 'fg': code_list.append(foreground[v]) elif k == 'bg': code_list.append(background[v]) for o in opts: if o in opt_dict: code_list.append(opt_dict[o]) if 'noreset' not in opts: text = text + '\x1b[%sm' % RESET return ('\x1b[%sm' % ';'.join(code_list)) + text
Returns your text, enclosed in ANSI graphics codes. Depends on the keyword arguments 'fg' and 'bg', and the contents of the opts tuple/list. Returns the RESET code if no parameters are given. Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold' 'underscore' 'blink' 'reverse' 'conceal' 'noreset' - string will not be auto-terminated with the RESET code Examples: colorize('hello', fg='red', bg='blue', opts=('blink',)) colorize() colorize('goodbye', opts=('underscore',)) print colorize('first line', fg='red', opts=('noreset',)) print 'this should be red too' print colorize('and so should this') print 'this should not be red'
def import_patch(self, patch_name, new_name=None): """ Import patch into the patch queue The patch is inserted as the next unapplied patch. """ if new_name: dir_name = os.path.dirname(new_name) name = os.path.basename(new_name) dest_dir = self.quilt_patches + Directory(dir_name) dest_dir.create() else: name = os.path.basename(patch_name) dest_dir = self.quilt_patches patch_file = File(patch_name) dest_file = dest_dir + File(name) patch_file.copy(dest_file) self._import_patches([name])
Import patch into the patch queue The patch is inserted as the next unapplied patch.
def print_hex(self, value, justify_right=True): """Print a numeric value in hexadecimal. Value should be from 0 to FFFF. """ if value < 0 or value > 0xFFFF: # Ignore out of range values. return self.print_str('{0:X}'.format(value), justify_right)
Print a numeric value in hexadecimal. Value should be from 0 to FFFF.
def interp(mapping, x): """Compute the piecewise linear interpolation given by mapping for input x. >>> interp(((1, 1), (2, 4)), 1.5) 2.5 """ mapping = sorted(mapping) if len(mapping) == 1: xa, ya = mapping[0] if xa == x: return ya return x for (xa, ya), (xb, yb) in zip(mapping[:-1], mapping[1:]): if xa <= x <= xb: return ya + float(x - xa) / (xb - xa) * (yb - ya) return x
Compute the piecewise linear interpolation given by mapping for input x. >>> interp(((1, 1), (2, 4)), 1.5) 2.5
def append_sibling_field(self, linenum, indent, field_name, field_value): """ :param linenum: The line number of the frame. :type linenum: int :param indent: The indentation level of the frame. :type indent: int :param path: :type path: Path :param field_name: :type field_name: str :param field_value: :type field_value: str """ frame = self.current_frame() assert frame.indent is not None and frame.indent == indent self.pop_frame() self.append_child_field(linenum, indent, field_name, field_value)
:param linenum: The line number of the frame. :type linenum: int :param indent: The indentation level of the frame. :type indent: int :param path: :type path: Path :param field_name: :type field_name: str :param field_value: :type field_value: str
def create_external_table(self, external_project_dataset_table, schema_fields, source_uris, source_format='CSV', autodetect=False, compression='NONE', ignore_unknown_values=False, max_bad_records=0, skip_leading_rows=0, field_delimiter=',', quote_character=None, allow_quoted_newlines=False, allow_jagged_rows=False, src_fmt_configs=None, labels=None ): """ Creates a new external table in the dataset with the data in Google Cloud Storage. See here: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource for more details about these parameters. :param external_project_dataset_table: The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery table name to create external table. If ``<project>`` is not included, project will be the project defined in the connection json. :type external_project_dataset_table: str :param schema_fields: The schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource :type schema_fields: list :param source_uris: The source Google Cloud Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild per-object name can be used. :type source_uris: list :param source_format: File format to export. :type source_format: str :param autodetect: Try to detect schema and format options automatically. Any option specified explicitly will be honored. :type autodetect: bool :param compression: [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. :type compression: str :param ignore_unknown_values: [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. :type ignore_unknown_values: bool :param max_bad_records: The maximum number of bad records that BigQuery can ignore when running the job. :type max_bad_records: int :param skip_leading_rows: Number of rows to skip when loading from a CSV. :type skip_leading_rows: int :param field_delimiter: The delimiter to use when loading from a CSV. :type field_delimiter: str :param quote_character: The value that is used to quote data sections in a CSV file. :type quote_character: str :param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false). :type allow_quoted_newlines: bool :param allow_jagged_rows: Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. Only applicable when soure_format is CSV. :type allow_jagged_rows: bool :param src_fmt_configs: configure optional fields specific to the source format :type src_fmt_configs: dict :param labels: a dictionary containing labels for the table, passed to BigQuery :type labels: dict """ if src_fmt_configs is None: src_fmt_configs = {} project_id, dataset_id, external_table_id = \ _split_tablename(table_input=external_project_dataset_table, default_project_id=self.project_id, var_name='external_project_dataset_table') # bigquery only allows certain source formats # we check to make sure the passed source format is valid # if it's not, we raise a ValueError # Refer to this link for more details: # https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.sourceFormat source_format = source_format.upper() allowed_formats = [ "CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS", "DATASTORE_BACKUP", "PARQUET" ] if source_format not in allowed_formats: raise ValueError("{0} is not a valid source format. " "Please use one of the following types: {1}" .format(source_format, allowed_formats)) compression = compression.upper() allowed_compressions = ['NONE', 'GZIP'] if compression not in allowed_compressions: raise ValueError("{0} is not a valid compression format. " "Please use one of the following types: {1}" .format(compression, allowed_compressions)) table_resource = { 'externalDataConfiguration': { 'autodetect': autodetect, 'sourceFormat': source_format, 'sourceUris': source_uris, 'compression': compression, 'ignoreUnknownValues': ignore_unknown_values }, 'tableReference': { 'projectId': project_id, 'datasetId': dataset_id, 'tableId': external_table_id, } } if schema_fields: table_resource['externalDataConfiguration'].update({ 'schema': { 'fields': schema_fields } }) self.log.info('Creating external table: %s', external_project_dataset_table) if max_bad_records: table_resource['externalDataConfiguration']['maxBadRecords'] = max_bad_records # if following fields are not specified in src_fmt_configs, # honor the top-level params for backward-compatibility if 'skipLeadingRows' not in src_fmt_configs: src_fmt_configs['skipLeadingRows'] = skip_leading_rows if 'fieldDelimiter' not in src_fmt_configs: src_fmt_configs['fieldDelimiter'] = field_delimiter if 'quote_character' not in src_fmt_configs: src_fmt_configs['quote'] = quote_character if 'allowQuotedNewlines' not in src_fmt_configs: src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines if 'allowJaggedRows' not in src_fmt_configs: src_fmt_configs['allowJaggedRows'] = allow_jagged_rows src_fmt_to_param_mapping = { 'CSV': 'csvOptions', 'GOOGLE_SHEETS': 'googleSheetsOptions' } src_fmt_to_configs_mapping = { 'csvOptions': [ 'allowJaggedRows', 'allowQuotedNewlines', 'fieldDelimiter', 'skipLeadingRows', 'quote' ], 'googleSheetsOptions': ['skipLeadingRows'] } if source_format in src_fmt_to_param_mapping.keys(): valid_configs = src_fmt_to_configs_mapping[ src_fmt_to_param_mapping[source_format] ] src_fmt_configs = { k: v for k, v in src_fmt_configs.items() if k in valid_configs } table_resource['externalDataConfiguration'][src_fmt_to_param_mapping[ source_format]] = src_fmt_configs if labels: table_resource['labels'] = labels try: self.service.tables().insert( projectId=project_id, datasetId=dataset_id, body=table_resource ).execute(num_retries=self.num_retries) self.log.info('External table created successfully: %s', external_project_dataset_table) except HttpError as err: raise Exception( 'BigQuery job failed. Error was: {}'.format(err.content) )
Creates a new external table in the dataset with the data in Google Cloud Storage. See here: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource for more details about these parameters. :param external_project_dataset_table: The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery table name to create external table. If ``<project>`` is not included, project will be the project defined in the connection json. :type external_project_dataset_table: str :param schema_fields: The schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource :type schema_fields: list :param source_uris: The source Google Cloud Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild per-object name can be used. :type source_uris: list :param source_format: File format to export. :type source_format: str :param autodetect: Try to detect schema and format options automatically. Any option specified explicitly will be honored. :type autodetect: bool :param compression: [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. :type compression: str :param ignore_unknown_values: [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. :type ignore_unknown_values: bool :param max_bad_records: The maximum number of bad records that BigQuery can ignore when running the job. :type max_bad_records: int :param skip_leading_rows: Number of rows to skip when loading from a CSV. :type skip_leading_rows: int :param field_delimiter: The delimiter to use when loading from a CSV. :type field_delimiter: str :param quote_character: The value that is used to quote data sections in a CSV file. :type quote_character: str :param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false). :type allow_quoted_newlines: bool :param allow_jagged_rows: Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. Only applicable when soure_format is CSV. :type allow_jagged_rows: bool :param src_fmt_configs: configure optional fields specific to the source format :type src_fmt_configs: dict :param labels: a dictionary containing labels for the table, passed to BigQuery :type labels: dict
def get_notebook_status(self, name): """Get the running named Notebook status. :return: None if no notebook is running, otherwise context dictionary """ context = comm.get_context(self.get_pid(name)) if not context: return None return context
Get the running named Notebook status. :return: None if no notebook is running, otherwise context dictionary
def fit(self, X, y=None, **fit_params): """Fits the inverse covariance model according to the given training data and parameters. Parameters ----------- X : 2D ndarray, shape (n_features, n_features) Input data. Returns ------- self """ # quic-specific outputs self.opt_ = None self.cputime_ = None self.iters_ = None self.duality_gap_ = None # these must be updated upon self.fit() self.path_ = None self.sample_covariance_ = None self.lam_scale_ = None self.lam_ = None self.is_fitted_ = False X = check_array(X, ensure_min_features=2, estimator=self) X = as_float_array(X, copy=False, force_all_finite=False) self.init_coefs(X) # either use passed in path, or make our own path lam_1 = self.lam_scale_ lam_0 = 1e-2 * lam_1 if self.path is None: self.path_ = np.logspace(np.log10(lam_0), np.log10(lam_1), 100)[::-1] elif isinstance(self.path, int): self.path_ = np.logspace(np.log10(lam_0), np.log10(lam_1), self.path)[::-1] else: self.path_ = self.path self.path_ = _validate_path(self.path_) # fit along the path, temporarily populate # self.precision_, self.covariance_ with path values so we can use our # inherited selection function if self.method == "quic": (self.precision_, self.covariance_, _, _, _, _) = quic( self.sample_covariance_, self.lam * self.lam_scale_, mode="path", tol=self.tol, max_iter=self.max_iter, Theta0=self.Theta0, Sigma0=self.Sigma0, path=self.path_, msg=self.verbose, ) self.is_fitted_ = True else: raise NotImplementedError("Only method='quic' has been implemented.") # apply EBIC criteria best_lam_idx = self.ebic_select(gamma=self.gamma) self.lam_ = self.lam * self.lam_scale_ * self.path_[best_lam_idx] self.precision_ = self.precision_[best_lam_idx] self.covariance_ = self.covariance_[best_lam_idx] self.is_fitted_ = True return self
Fits the inverse covariance model according to the given training data and parameters. Parameters ----------- X : 2D ndarray, shape (n_features, n_features) Input data. Returns ------- self
def save_config(self): """ Save configuration: opened projects & tree widget state. Also save whether dock widget is visible if a project is open. """ self.set_option('recent_projects', self.recent_projects) self.set_option('expanded_state', self.explorer.treewidget.get_expanded_state()) self.set_option('scrollbar_position', self.explorer.treewidget.get_scrollbar_position()) if self.current_active_project and self.dockwidget: self.set_option('visible_if_project_open', self.dockwidget.isVisible())
Save configuration: opened projects & tree widget state. Also save whether dock widget is visible if a project is open.
def task(ft): """ to create loading progress bar """ ft.pack(expand = True, fill = BOTH, side = TOP) pb_hD = ttk.Progressbar(ft, orient = 'horizontal', mode = 'indeterminate') pb_hD.pack(expand = True, fill = BOTH, side = TOP) pb_hD.start(50) ft.mainloop()
to create loading progress bar
def saml_provider_absent(name, region=None, key=None, keyid=None, profile=None): ''' .. versionadded:: 2016.11.0 Ensure the SAML provider with the specified name is absent. name (string) The name of the SAML provider. saml_metadata_document (string) The xml document of the SAML provider. region (string) Region to connect to. key (string) Secret key to be used. keyid (string) Access key to be used. profile (dict) A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} provider = __salt__['boto_iam.list_saml_providers'](region=region, key=key, keyid=keyid, profile=profile) if not provider: ret['comment'] = 'SAML provider {0} is absent.'.format(name) return ret if __opts__['test']: ret['comment'] = 'SAML provider {0} is set to be removed.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_iam.delete_saml_provider'](name, region=region, key=key, keyid=keyid, profile=profile) if deleted is not False: ret['comment'] = 'SAML provider {0} was deleted.'.format(name) ret['changes']['old'] = name return ret ret['result'] = False ret['comment'] = 'SAML provider {0} failed to be deleted.'.format(name) return ret
.. versionadded:: 2016.11.0 Ensure the SAML provider with the specified name is absent. name (string) The name of the SAML provider. saml_metadata_document (string) The xml document of the SAML provider. region (string) Region to connect to. key (string) Secret key to be used. keyid (string) Access key to be used. profile (dict) A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
def start(self): """ Starts the upload. :raises SbgError: If upload is not in PREPARING state. """ if self._status == TransferState.PREPARING: super(Upload, self).start() else: raise SbgError( 'Unable to start. Upload not in PREPARING state.' )
Starts the upload. :raises SbgError: If upload is not in PREPARING state.
async def create(self, query, *, dc=None): """Creates a new prepared query Parameters: Query (Object): Query definition dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: Object: New query ID The create operation expects a body that defines the prepared query, like this example:: { "Name": "my-query", "Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "Token": "", "Near": "node1", "Service": { "Service": "redis", "Failover": { "NearestN": 3, "Datacenters": ["dc1", "dc2"] }, "OnlyPassing": False, "Tags": ["master", "!experimental"] }, "DNS": { "TTL": timedelta(seconds=10) } } Only the **Service** field inside the **Service** structure is mandatory, all other fields will take their default values if they are not included. **Name** is an optional friendly name that can be used to execute a query instead of using its ID. **Session** provides a way to automatically remove a prepared query when the given session is invalidated. This is optional, and if not given the prepared query must be manually removed when no longer needed. **Token**, if specified, is a captured ACL Token that is reused as the ACL Token every time the query is executed. This allows queries to be executed by clients with lesser or even no ACL Token, so this should be used with care. The token itself can only be seen by clients with a management token. If the **Token** field is left blank or omitted, the client's ACL Token will be used to determine if they have access to the service being queried. If the client does not supply an ACL Token, the anonymous token will be used. **Near** allows specifying a particular node to sort near based on distance sorting using Network Coordinates. The nearest instance to the specified node will be returned first, and subsequent nodes in the response will be sorted in ascending order of estimated round-trip times. If the node given does not exist, the nodes in the response will be shuffled. Using the magic **_agent** value is supported, and will automatically return results nearest the agent servicing the request. If unspecified, the response will be shuffled by default. The set of fields inside the **Service** structure define the query's behavior. **Service** is the name of the service to query. This is required. **Failover** contains two fields, both of which are optional, and determine what happens if no healthy nodes are available in the local datacenter when the query is executed. It allows the use of nodes in other datacenters with very little configuration. If **NearestN** is set to a value greater than zero, then the query will be forwarded to up to **NearestN** other datacenters based on their estimated network round trip time using Network Coordinates from the WAN gossip pool. The median round trip time from the server handling the query to the servers in the remote datacenter is used to determine the priority. The default value is zero. All Consul servers must be running version 0.6.0 or above in order for this feature to work correctly. If any servers are not running the required version of Consul they will be considered last since they won't have any available network coordinate information. **Datacenters** contains a fixed list of remote datacenters to forward the query to if there are no healthy nodes in the local datacenter. Datacenters are queried in the order given in the list. If this option is combined with **NearestN**, then the **NearestN** queries will be performed first, followed by the list given by **Datacenters**. A given datacenter will only be queried one time during a failover, even if it is selected by both **NearestN** and is listed in **Datacenters**. The default value is an empty list. **OnlyPassing** controls the behavior of the query's health check filtering. If this is set to false, the results will include nodes with checks in the passing as well as the warning states. If this is set to true, only nodes with checks in the passing state will be returned. The default value is False. **Tags** provides a list of service tags to filter the query results. For a service to pass the tag filter it must have all of the required tags, and none of the excluded tags (prefixed with ``!``). The default value is an empty list, which does no tag filtering. **TTL** in the **DNS** structure is a duration string that can use "s" as a suffix for seconds. It controls how the TTL is set when query results are served over DNS. If this isn't specified, then the Consul agent configuration for the given service will be used (see DNS Caching). If this is specified, it will take precedence over any Consul agent-specific configuration. If no TTL is specified here or at the Consul agent level, then the TTL will default to 0. It returns the ID of the created query:: { "ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05" } """ if "Token" in query: # in case of a full token object... query["Token"] = extract_attr(query["Token"], keys=["ID"]) response = await self._api.post("/v1/query", params={"dc": dc}, data=query) return response.body
Creates a new prepared query Parameters: Query (Object): Query definition dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: Object: New query ID The create operation expects a body that defines the prepared query, like this example:: { "Name": "my-query", "Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "Token": "", "Near": "node1", "Service": { "Service": "redis", "Failover": { "NearestN": 3, "Datacenters": ["dc1", "dc2"] }, "OnlyPassing": False, "Tags": ["master", "!experimental"] }, "DNS": { "TTL": timedelta(seconds=10) } } Only the **Service** field inside the **Service** structure is mandatory, all other fields will take their default values if they are not included. **Name** is an optional friendly name that can be used to execute a query instead of using its ID. **Session** provides a way to automatically remove a prepared query when the given session is invalidated. This is optional, and if not given the prepared query must be manually removed when no longer needed. **Token**, if specified, is a captured ACL Token that is reused as the ACL Token every time the query is executed. This allows queries to be executed by clients with lesser or even no ACL Token, so this should be used with care. The token itself can only be seen by clients with a management token. If the **Token** field is left blank or omitted, the client's ACL Token will be used to determine if they have access to the service being queried. If the client does not supply an ACL Token, the anonymous token will be used. **Near** allows specifying a particular node to sort near based on distance sorting using Network Coordinates. The nearest instance to the specified node will be returned first, and subsequent nodes in the response will be sorted in ascending order of estimated round-trip times. If the node given does not exist, the nodes in the response will be shuffled. Using the magic **_agent** value is supported, and will automatically return results nearest the agent servicing the request. If unspecified, the response will be shuffled by default. The set of fields inside the **Service** structure define the query's behavior. **Service** is the name of the service to query. This is required. **Failover** contains two fields, both of which are optional, and determine what happens if no healthy nodes are available in the local datacenter when the query is executed. It allows the use of nodes in other datacenters with very little configuration. If **NearestN** is set to a value greater than zero, then the query will be forwarded to up to **NearestN** other datacenters based on their estimated network round trip time using Network Coordinates from the WAN gossip pool. The median round trip time from the server handling the query to the servers in the remote datacenter is used to determine the priority. The default value is zero. All Consul servers must be running version 0.6.0 or above in order for this feature to work correctly. If any servers are not running the required version of Consul they will be considered last since they won't have any available network coordinate information. **Datacenters** contains a fixed list of remote datacenters to forward the query to if there are no healthy nodes in the local datacenter. Datacenters are queried in the order given in the list. If this option is combined with **NearestN**, then the **NearestN** queries will be performed first, followed by the list given by **Datacenters**. A given datacenter will only be queried one time during a failover, even if it is selected by both **NearestN** and is listed in **Datacenters**. The default value is an empty list. **OnlyPassing** controls the behavior of the query's health check filtering. If this is set to false, the results will include nodes with checks in the passing as well as the warning states. If this is set to true, only nodes with checks in the passing state will be returned. The default value is False. **Tags** provides a list of service tags to filter the query results. For a service to pass the tag filter it must have all of the required tags, and none of the excluded tags (prefixed with ``!``). The default value is an empty list, which does no tag filtering. **TTL** in the **DNS** structure is a duration string that can use "s" as a suffix for seconds. It controls how the TTL is set when query results are served over DNS. If this isn't specified, then the Consul agent configuration for the given service will be used (see DNS Caching). If this is specified, it will take precedence over any Consul agent-specific configuration. If no TTL is specified here or at the Consul agent level, then the TTL will default to 0. It returns the ID of the created query:: { "ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05" }
def Set(self, interface_name, property_name, value, *args, **kwargs): '''Standard D-Bus API for setting a property value''' self.log('Set %s.%s%s' % (interface_name, property_name, self.format_args((value,)))) try: iface_props = self.props[interface_name] except KeyError: raise dbus.exceptions.DBusException( 'no such interface ' + interface_name, name=self.interface + '.UnknownInterface') if property_name not in iface_props: raise dbus.exceptions.DBusException( 'no such property ' + property_name, name=self.interface + '.UnknownProperty') iface_props[property_name] = value self.EmitSignal('org.freedesktop.DBus.Properties', 'PropertiesChanged', 'sa{sv}as', [interface_name, dbus.Dictionary({property_name: value}, signature='sv'), dbus.Array([], signature='s') ])
Standard D-Bus API for setting a property value
def poke(self, context): """ Check for message on subscribed channels and write to xcom the message with key ``message`` An example of message ``{'type': 'message', 'pattern': None, 'channel': b'test', 'data': b'hello'}`` :param context: the context object :type context: dict :return: ``True`` if message (with type 'message') is available or ``False`` if not """ self.log.info('RedisPubSubSensor checking for message on channels: %s', self.channels) message = self.pubsub.get_message() self.log.info('Message %s from channel %s', message, self.channels) # Process only message types if message and message['type'] == 'message': context['ti'].xcom_push(key='message', value=message) self.pubsub.unsubscribe(self.channels) return True return False
Check for message on subscribed channels and write to xcom the message with key ``message`` An example of message ``{'type': 'message', 'pattern': None, 'channel': b'test', 'data': b'hello'}`` :param context: the context object :type context: dict :return: ``True`` if message (with type 'message') is available or ``False`` if not
def parse_plotProfile(self): """Find plotProfile output""" self.deeptools_plotProfile = dict() for f in self.find_log_files('deeptools/plotProfile', filehandles=False): parsed_data, bin_labels, converted_bin_labels = self.parsePlotProfileData(f) for k, v in parsed_data.items(): if k in self.deeptools_plotProfile: log.warning("Replacing duplicate sample {}.".format(k)) self.deeptools_plotProfile[k] = v if len(parsed_data) > 0: self.add_data_source(f, section='plotProfile') if len(self.deeptools_plotProfile) > 0: config = { 'id': 'read_distribution_profile', 'title': 'deeptools: Read Distribution Profile after Annotation', 'ylab': 'Occurrence', 'xlab': None, 'smooth_points': 100, 'xPlotBands': [ {'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'}, {'from': converted_bin_labels[bin_labels.index('TSS')], 'to': converted_bin_labels[bin_labels.index('TES')], 'color': '#ffffe2'}, {'from': converted_bin_labels[0], 'to': converted_bin_labels[bin_labels.index('TSS')], 'color': '#e5fce0'}, ], 'xPlotLines': [ {'width': 1, 'value': converted_bin_labels[bin_labels.index('TES')], 'dashStyle': 'Dash', 'color': '#000000'}, {'width': 1, 'value': converted_bin_labels[bin_labels.index('TSS')], 'dashStyle': 'Dash', 'color': '#000000'}, ], } self.add_section ( name = 'Read Distribution Profile after Annotation', anchor = 'read_distribution_profile_plot', description="Accumulated view of the distribution of sequence reads related to the closest annotated gene. All annotated genes have been normalized to the same size. Green: {} upstream of gene to {}; Yellow: {} to {}; Pink: {} to {} downstream of gene".format(list(filter(None,bin_labels))[0], list(filter(None,bin_labels))[1], list(filter(None,bin_labels))[1], list(filter(None,bin_labels))[2], list(filter(None,bin_labels))[2], list(filter(None,bin_labels))[3]), plot=linegraph.plot(self.deeptools_plotProfile, config) ) return len(self.deeptools_bamPEFragmentSizeDistribution)
Find plotProfile output
def Policy(self, data=None, subset=None): """{dynamic_docstring}""" return self.factory.get_object(jssobjects.Policy, data, subset)
{dynamic_docstring}
def make_sentences(self, stream_item): 'assemble Sentence and Token objects' self.make_label_index(stream_item) sentences = [] token_num = 0 new_mention_id = 0 for sent_start, sent_end, sent_str in self._sentences( stream_item.body.clean_visible): assert isinstance(sent_str, unicode) sent = Sentence() sentence_pos = 0 for start, end in self.word_tokenizer.span_tokenize(sent_str): token_str = sent_str[start:end].encode('utf8') tok = Token( token_num=token_num, token=token_str, sentence_pos=sentence_pos, ) tok.offsets[OffsetType.CHARS] = Offset( type=OffsetType.CHARS, first=sent_start + start, length=end - start, ) # whitespace tokenizer will never get a token # boundary in the middle of an 'author' label try: label = self.label_index.find_le(sent_start + start) except ValueError: label = None if label: off = label.offsets[OffsetType.CHARS] if off.first + off.length > sent_start + start: streamcorpus.add_annotation(tok, label) logger.debug('adding label to tok: %r has %r', tok.token, label.target.target_id) if label in self.label_to_mention_id: mention_id = self.label_to_mention_id[label] else: mention_id = new_mention_id new_mention_id += 1 self.label_to_mention_id[label] = mention_id tok.mention_id = mention_id token_num += 1 sentence_pos += 1 sent.tokens.append(tok) sentences.append(sent) return sentences
assemble Sentence and Token objects
def uppercase(self, value): """Validate and set the uppercase flag.""" if not isinstance(value, bool): raise TypeError('uppercase attribute must be a logical type.') self._uppercase = value
Validate and set the uppercase flag.
def _parse_current_network_settings(): ''' Parse /etc/default/networking and return current configuration ''' opts = salt.utils.odict.OrderedDict() opts['networking'] = '' if os.path.isfile(_DEB_NETWORKING_FILE): with salt.utils.files.fopen(_DEB_NETWORKING_FILE) as contents: for line in contents: salt.utils.stringutils.to_unicode(line) if line.startswith('#'): continue elif line.startswith('CONFIGURE_INTERFACES'): opts['networking'] = line.split('=', 1)[1].strip() hostname = _parse_hostname() domainname = _parse_domainname() searchdomain = _parse_searchdomain() opts['hostname'] = hostname opts['domainname'] = domainname opts['searchdomain'] = searchdomain return opts
Parse /etc/default/networking and return current configuration
def get_semester_title(self, node: BaseNode): """ get the semester of a node """ log.debug("Getting Semester Title for %s" % node.course.id) return self._get_semester_from_id(node.course.semester)
get the semester of a node
def read(self): "Read and interpret data from the daemon." status = gpscommon.read(self) if status <= 0: return status if self.response.startswith("{") and self.response.endswith("}\r\n"): self.unpack(self.response) self.__oldstyle_shim() self.newstyle = True self.valid |= PACKET_SET elif self.response.startswith("GPSD"): self.__oldstyle_unpack(self.response) self.valid |= PACKET_SET return 0
Read and interpret data from the daemon.
def get_concept(self, conceptId, lang='en'): """ Fetch the concept from the Knowledge base Args: id (str): The concept id to be fetched, it can be Wikipedia page id or Wikiedata id. Returns: dict, int: A dict containing the concept information; an integer representing the response code. """ url = urljoin(self.concept_service + '/', conceptId) res, status_code = self.get(url, params={'lang': lang}) if status_code != 200: logger.debug('Fetch concept failed.') return self.decode(res), status_code
Fetch the concept from the Knowledge base Args: id (str): The concept id to be fetched, it can be Wikipedia page id or Wikiedata id. Returns: dict, int: A dict containing the concept information; an integer representing the response code.
def _double_centered_imp(a, out=None): """ Real implementation of :func:`double_centered`. This function is used to make parameter ``out`` keyword-only in Python 2. """ out = _float_copy_to_out(out, a) dim = np.size(a, 0) mu = np.sum(a) / (dim * dim) sum_cols = np.sum(a, 0, keepdims=True) sum_rows = np.sum(a, 1, keepdims=True) mu_cols = sum_cols / dim mu_rows = sum_rows / dim # Do one operation at a time, to improve broadcasting memory usage. out -= mu_rows out -= mu_cols out += mu return out
Real implementation of :func:`double_centered`. This function is used to make parameter ``out`` keyword-only in Python 2.
def ReadAllFlowObjects( self, client_id = None, min_create_time = None, max_create_time = None, include_child_flows = True, ): """Returns all flow objects.""" res = [] for flow in itervalues(self.flows): if ((client_id is None or flow.client_id == client_id) and (min_create_time is None or flow.create_time >= min_create_time) and (max_create_time is None or flow.create_time <= max_create_time) and (include_child_flows or not flow.parent_flow_id)): res.append(flow.Copy()) return res
Returns all flow objects.
def languages(self, key, value): """Populate the ``languages`` key.""" languages = self.get('languages', []) values = force_list(value.get('a')) for value in values: for language in RE_LANGUAGE.split(value): try: name = language.strip().capitalize() languages.append(pycountry.languages.get(name=name).alpha_2) except KeyError: pass return languages
Populate the ``languages`` key.
def res_from_en(pst,enfile): """load ensemble file for residual into a pandas.DataFrame Parameters ---------- enfile : str ensemble file name Returns ------- pandas.DataFrame : pandas.DataFrame """ converters = {"name": str_con, "group": str_con} try: #substitute ensemble for res, 'base' if there, otherwise mean obs=pst.observation_data if isinstance(enfile,str): df=pd.read_csv(enfile,converters=converters) df.columns=df.columns.str.lower() df = df.set_index('real_name').T.rename_axis('name').rename_axis(None, 1) else: df = enfile.T if 'base' in df.columns: df['modelled']=df['base'] df['std']=df.std(axis=1) else: df['modelled']=df.mean(axis=1) df['std']=df.std(axis=1) #probably a more pandastic way to do this res_df=df[['modelled','std']].copy() res_df['group']=obs.loc[:,'obgnme'].copy() res_df['measured']=obs['obsval'].copy() res_df['weight']=obs['weight'].copy() res_df['residual']=res_df['measured']-res_df['modelled'] except Exception as e: raise Exception("Pst.res_from_en:{0}".format(str(e))) return res_df
load ensemble file for residual into a pandas.DataFrame Parameters ---------- enfile : str ensemble file name Returns ------- pandas.DataFrame : pandas.DataFrame
def change_execution_time(self, job, date_time): """ Change a job's execution time. """ with self.connection.pipeline() as pipe: while 1: try: pipe.watch(self.scheduled_jobs_key) if pipe.zscore(self.scheduled_jobs_key, job.id) is None: raise ValueError('Job not in scheduled jobs queue') pipe.zadd(self.scheduled_jobs_key, {job.id: to_unix(date_time)}) break except WatchError: # If job is still in the queue, retry otherwise job is already executed # so we raise an error if pipe.zscore(self.scheduled_jobs_key, job.id) is None: raise ValueError('Job not in scheduled jobs queue') continue
Change a job's execution time.
def media_new(self, mrl, *options): """Create a new Media instance. If mrl contains a colon (:) preceded by more than 1 letter, it will be treated as a URL. Else, it will be considered as a local path. If you need more control, directly use media_new_location/media_new_path methods. Options can be specified as supplementary string parameters, but note that many options cannot be set at the media level, and rather at the Instance level. For instance, the marquee filter must be specified when creating the vlc.Instance or vlc.MediaPlayer. Alternatively, options can be added to the media using the Media.add_options method (with the same limitation). @param options: optional media option=value strings """ if ':' in mrl and mrl.index(':') > 1: # Assume it is a URL m = libvlc_media_new_location(self, str_to_bytes(mrl)) else: # Else it should be a local path. m = libvlc_media_new_path(self, str_to_bytes(os.path.normpath(mrl))) for o in options: libvlc_media_add_option(m, str_to_bytes(o)) m._instance = self return m
Create a new Media instance. If mrl contains a colon (:) preceded by more than 1 letter, it will be treated as a URL. Else, it will be considered as a local path. If you need more control, directly use media_new_location/media_new_path methods. Options can be specified as supplementary string parameters, but note that many options cannot be set at the media level, and rather at the Instance level. For instance, the marquee filter must be specified when creating the vlc.Instance or vlc.MediaPlayer. Alternatively, options can be added to the media using the Media.add_options method (with the same limitation). @param options: optional media option=value strings
def url(self): """URL to the affiliation's profile page.""" url = self.xml.find('coredata/link[@rel="scopus-affiliation"]') if url is not None: url = url.get('href') return url
URL to the affiliation's profile page.
def add_jira_status(test_key, test_status, test_comment): """Save test status and comments to update Jira later :param test_key: test case key in Jira :param test_status: test case status :param test_comment: test case comments """ global attachments if test_key and enabled: if test_key in jira_tests_status: # Merge data with previous test status previous_status = jira_tests_status[test_key] test_status = 'Pass' if previous_status[1] == 'Pass' and test_status == 'Pass' else 'Fail' if previous_status[2] and test_comment: test_comment = '{}\n{}'.format(previous_status[2], test_comment) elif previous_status[2] and not test_comment: test_comment = previous_status[2] attachments += previous_status[3] # Add or update test status jira_tests_status[test_key] = (test_key, test_status, test_comment, attachments)
Save test status and comments to update Jira later :param test_key: test case key in Jira :param test_status: test case status :param test_comment: test case comments
def get_inverses(self, keys): """ Returns ------- Tuple of inverse indices """ return tuple([as_index(k, axis=0).inverse for k in keys])
Returns ------- Tuple of inverse indices
def get_jids(): ''' Return all job data from all returners ''' ret = {} for returner_ in __opts__[CONFIG_KEY]: ret.update(_mminion().returners['{0}.get_jids'.format(returner_)]()) return ret
Return all job data from all returners
def delete_sp_template_for_vlan(self, vlan_id): """Deletes SP Template for a vlan_id if it exists.""" with self.session.begin(subtransactions=True): try: self.session.query( ucsm_model.ServiceProfileTemplate).filter_by( vlan_id=vlan_id).delete() except orm.exc.NoResultFound: return
Deletes SP Template for a vlan_id if it exists.
def visit_raise(self, node): """Visit a raise statement and check for raising strings or old-raise-syntax. """ # Ignore empty raise. if node.exc is None: return expr = node.exc if self._check_raise_value(node, expr): return try: value = next(astroid.unpack_infer(expr)) except astroid.InferenceError: return self._check_raise_value(node, value)
Visit a raise statement and check for raising strings or old-raise-syntax.
def get_details(self): """ :rtype list[VmDataField] """ data = [] if self.deployment == 'vCenter Clone VM From VM': data.append(VmDetailsProperty(key='Cloned VM Name',value= self.dep_attributes.get('vCenter VM',''))) if self.deployment == 'VCenter Deploy VM From Linked Clone': template = self.dep_attributes.get('vCenter VM','') snapshot = self.dep_attributes.get('vCenter VM Snapshot','') data.append(VmDetailsProperty(key='Cloned VM Name',value= '{0} (snapshot: {1})'.format(template, snapshot))) if self.deployment == 'vCenter VM From Image': data.append(VmDetailsProperty(key='Base Image Name',value= self.dep_attributes.get('vCenter Image','').split('/')[-1])) if self.deployment == 'vCenter VM From Template': data.append(VmDetailsProperty(key='Template Name',value= self.dep_attributes.get('vCenter Template',''))) return data
:rtype list[VmDataField]
def save_model(self, request, obj, form, change): """ Set the ID of the parent page if passed in via querystring, and make sure the new slug propagates to all descendant pages. """ if change and obj._old_slug != obj.slug: # _old_slug was set in PageAdminForm.clean_slug(). new_slug = obj.slug or obj.generate_unique_slug() obj.slug = obj._old_slug obj.set_slug(new_slug) # Force parent to be saved to trigger handling of ordering and slugs. parent = request.GET.get("parent") if parent is not None and not change: obj.parent_id = parent obj.save() super(PageAdmin, self).save_model(request, obj, form, change)
Set the ID of the parent page if passed in via querystring, and make sure the new slug propagates to all descendant pages.
def rsdl(self): """Compute fixed point residual.""" return np.linalg.norm((self.X - self.Yprv).ravel())
Compute fixed point residual.
def to_bqstorage(self): """Construct a BigQuery Storage API representation of this table. Install the ``google-cloud-bigquery-storage`` package to use this feature. If the ``table_id`` contains a partition identifier (e.g. ``my_table$201812``) or a snapshot identifier (e.g. ``mytable@1234567890``), it is ignored. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableReadOptions` to filter rows by partition. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableModifiers` to select a specific snapshot to read from. Returns: google.cloud.bigquery_storage_v1beta1.types.TableReference: A reference to this table in the BigQuery Storage API. Raises: ValueError: If the :mod:`google.cloud.bigquery_storage_v1beta1` module cannot be imported. """ if bigquery_storage_v1beta1 is None: raise ValueError(_NO_BQSTORAGE_ERROR) table_ref = bigquery_storage_v1beta1.types.TableReference() table_ref.project_id = self._project table_ref.dataset_id = self._dataset_id table_id = self._table_id if "@" in table_id: table_id = table_id.split("@")[0] if "$" in table_id: table_id = table_id.split("$")[0] table_ref.table_id = table_id return table_ref
Construct a BigQuery Storage API representation of this table. Install the ``google-cloud-bigquery-storage`` package to use this feature. If the ``table_id`` contains a partition identifier (e.g. ``my_table$201812``) or a snapshot identifier (e.g. ``mytable@1234567890``), it is ignored. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableReadOptions` to filter rows by partition. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableModifiers` to select a specific snapshot to read from. Returns: google.cloud.bigquery_storage_v1beta1.types.TableReference: A reference to this table in the BigQuery Storage API. Raises: ValueError: If the :mod:`google.cloud.bigquery_storage_v1beta1` module cannot be imported.
def _make_single_subvolume(self, only_one=True, **args): """Creates a subvolume, adds it to this class, sets the volume index to 0 and returns it. :param bool only_one: if this volume system already has at least one volume, it is returned instead. """ if only_one and self.volumes: return self.volumes[0] if self.parent.index is None: index = '0' else: index = '{0}.0'.format(self.parent.index) volume = self._make_subvolume(index=index, **args) return volume
Creates a subvolume, adds it to this class, sets the volume index to 0 and returns it. :param bool only_one: if this volume system already has at least one volume, it is returned instead.
def get_message(self, timeout=0.5): """ Attempts to retrieve the latest message received by the instance. If no message is available it blocks for given timeout or until a message is received, or else returns None (whichever is shorter). This method does not block after :meth:`can.BufferedReader.stop` has been called. :param float timeout: The number of seconds to wait for a new message. :rytpe: can.Message or None :return: the message if there is one, or None if there is not. """ try: return self.buffer.get(block=not self.is_stopped, timeout=timeout) except Empty: return None
Attempts to retrieve the latest message received by the instance. If no message is available it blocks for given timeout or until a message is received, or else returns None (whichever is shorter). This method does not block after :meth:`can.BufferedReader.stop` has been called. :param float timeout: The number of seconds to wait for a new message. :rytpe: can.Message or None :return: the message if there is one, or None if there is not.
def dice(edge=15, fn=32): """ dice """ edge = float(edge) # dice c = ops.Cube(edge, center=True) s = ops.Sphere(edge * 3 / 4, center=True) dice = c & s # points c = ops.Circle(edge / 12, _fn=fn) h = 0.7 point = c.linear_extrude(height=h) point1 = point.translate([0, 0, edge / 2 - h / 2]) point2_1 = point1.rotate(a=90, v=[1, 0, 0]).translate([edge / 6, 0, edge / 6]) point2_2 = point2_1.mirror([-edge / 6, 0, -edge / 6]) point2 = point2_1 + point2_2 point3 = point2.rotate(a=90, v=[0, 0, 1]) + point1.rotate(a=90, v=[0, 1, 0]) point4_12 = point2.rotate(a=-90, v=[0, 0, 1]) point4 = point4_12 + point4_12.mirror([0, 1, 0]) point5_123 = point3.rotate(a=90, v=[0, 0, 1]) point5 = point5_123 + point5_123.mirror([1, 0, 0]) point6_1 = point.translate([0, 0, -(edge / 2 + h / 2)]).translate([0, edge / 6, 0]) point6_2 = point6_1.translate([edge / 4, 0, 0]) point6_3 = point6_1.translate([-edge / 4, 0, 0]) point6_123 = point6_1 + point6_2 + point6_3 point6_456 = point6_123.mirror([0, 1, 0]) point6 = point6_123 + point6_456 dice_with_holes = dice - point1 - point2 - point3 - point4 - point5 - point6 dice_with_holes = dice_with_holes.mirror([0, 0, 1]) return(dice_with_holes)
dice
def gather(obj): """Retrieve objects that have been distributed, making them local again""" if hasattr(obj, '__distob_gather__'): return obj.__distob_gather__() elif (isinstance(obj, collections.Sequence) and not isinstance(obj, string_types)): return [gather(subobj) for subobj in obj] else: return obj
Retrieve objects that have been distributed, making them local again
def to_xml(self): """Convert to XML message.""" element = etree.Element(self._tag_name) struct_to_xml(element, [ {"author": self.handle}, {"target_guid": self.target_guid}, {"target_type": DiasporaRetraction.entity_type_to_remote(self.entity_type)}, ]) return element
Convert to XML message.
async def play_url(self, url, position=0): """Play media from an URL on the device.""" headers = {'User-Agent': 'MediaControl/1.0', 'Content-Type': 'application/x-apple-binary-plist'} body = {'Content-Location': url, 'Start-Position': position} address = self._url(self.port, 'play') _LOGGER.debug('AirPlay %s to %s', url, address) resp = None try: # pylint: disable=no-member resp = await self.session.post( address, headers=headers, data=plistlib.dumps(body, fmt=plistlib.FMT_BINARY), timeout=TIMEOUT) await self._wait_for_media_to_end() finally: if resp is not None: resp.close()
Play media from an URL on the device.
def lazy_reverse_binmap(f, xs): """ Same as lazy_binmap, except the parameters are flipped for the binary function """ return (f(y, x) for x, y in zip(xs, xs[1:]))
Same as lazy_binmap, except the parameters are flipped for the binary function
def exists(self, regex): """ See what :meth:`skip_until` would return without advancing the pointer. >>> s = Scanner("test string") >>> s.exists(' ') 5 >>> s.pos 0 Returns the number of characters matched if it does exist, or ``None`` otherwise. """ return self.search_full(regex, return_string=False, advance_pointer=False)
See what :meth:`skip_until` would return without advancing the pointer. >>> s = Scanner("test string") >>> s.exists(' ') 5 >>> s.pos 0 Returns the number of characters matched if it does exist, or ``None`` otherwise.
def _dialect(self, filepath): """returns detected dialect of filepath and sets self.has_header if not passed in __init__ kwargs Arguments: filepath (str): filepath of target csv file """ with open(filepath, self.read_mode) as csvfile: sample = csvfile.read(1024) dialect = csv.Sniffer().sniff(sample) if self.has_header == None: # detect header if header not specified self.has_header = csv.Sniffer().has_header(sample) csvfile.seek(0) return dialect
returns detected dialect of filepath and sets self.has_header if not passed in __init__ kwargs Arguments: filepath (str): filepath of target csv file
def _defineVariables(self): """ Helper funtion to define pertinent variables from catalog data. ADW (20170627): This has largely been replaced by properties. """ logger.info('Catalog contains %i objects'%(len(self.data))) mc_source_id_field = self.config['catalog']['mc_source_id_field'] if mc_source_id_field is not None: if mc_source_id_field not in self.data.dtype.names: array = np.zeros(len(self.data),dtype='>i8') # FITS byte-order convention self.data = mlab.rec_append_fields(self.data, names=mc_source_id_field, arrs=array) logger.info('Found %i simulated objects'%(np.sum(self.mc_source_id>0)))
Helper funtion to define pertinent variables from catalog data. ADW (20170627): This has largely been replaced by properties.
def libvlc_vlm_add_vod(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux): '''Add a vod, with one input. @param p_instance: the instance. @param psz_name: the name of the new vod media. @param psz_input: the input MRL. @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new vod. @param psz_mux: the muxer of the vod media. @return: 0 on success, -1 on error. ''' f = _Cfunctions.get('libvlc_vlm_add_vod', None) or \ _Cfunction('libvlc_vlm_add_vod', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_char_p) return f(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux)
Add a vod, with one input. @param p_instance: the instance. @param psz_name: the name of the new vod media. @param psz_input: the input MRL. @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new vod. @param psz_mux: the muxer of the vod media. @return: 0 on success, -1 on error.
def close(self): """Close the canvas Notes ----- This will usually destroy the GL context. For Qt, the context (and widget) will be destroyed only if the widget is top-level. To avoid having the widget destroyed (more like standard Qt behavior), consider making the widget a sub-widget. """ if self._backend is not None and not self._closed: self._closed = True self.events.close() self._backend._vispy_close() forget_canvas(self)
Close the canvas Notes ----- This will usually destroy the GL context. For Qt, the context (and widget) will be destroyed only if the widget is top-level. To avoid having the widget destroyed (more like standard Qt behavior), consider making the widget a sub-widget.
def from_any(cls, obj, bucket): """ Ensure the current object is an index. Always returns a new object :param obj: string or IndexInfo object :param bucket: The bucket name :return: A new IndexInfo object """ if isinstance(obj, cls): return cls(obj.raw) return cls({ 'namespace_id': 'default', 'keyspace_id': bucket, 'name': obj if obj else N1QL_PRIMARY_INDEX, 'using': 'gsi' })
Ensure the current object is an index. Always returns a new object :param obj: string or IndexInfo object :param bucket: The bucket name :return: A new IndexInfo object
def running(self): """ Returns true if job still in running state :return: """ r = self._client._redis flag = '{}:flag'.format(self._queue) if bool(r.exists(flag)): return r.ttl(flag) is None return False
Returns true if job still in running state :return:
def read(self, line, f, data): """See :meth:`PunchParser.read`""" line = f.readline() assert(line == " $HESS\n") while line != " $END\n": line = f.readline()
See :meth:`PunchParser.read`
def _arm_thumb_filter_jump_successors(self, addr, size, successors, get_ins_addr, get_exit_stmt_idx): """ Filter successors for THUMB mode basic blocks, and remove those successors that won't be taken normally. :param int addr: Address of the basic block / SimIRSB. :param int size: Size of the basic block. :param list successors: A list of successors. :param func get_ins_addr: A callable that returns the source instruction address for a successor. :param func get_exit_stmt_idx: A callable that returns the source statement ID for a successor. :return: A new list of successors after filtering. :rtype: list """ if not successors: return [ ] it_counter = 0 conc_temps = {} can_produce_exits = set() bb = self._lift(addr, size=size, thumb=True, opt_level=0) for stmt in bb.vex.statements: if stmt.tag == 'Ist_IMark': if it_counter > 0: it_counter -= 1 can_produce_exits.add(stmt.addr + stmt.delta) elif stmt.tag == 'Ist_WrTmp': val = stmt.data if val.tag == 'Iex_Const': conc_temps[stmt.tmp] = val.con.value elif stmt.tag == 'Ist_Put': if stmt.offset == self.project.arch.registers['itstate'][0]: val = stmt.data if val.tag == 'Iex_RdTmp': if val.tmp in conc_temps: # We found an IT instruction!! # Determine how many instructions are conditional it_counter = 0 itstate = conc_temps[val.tmp] while itstate != 0: it_counter += 1 itstate >>= 8 if it_counter != 0: l.debug('Basic block ends before calculated IT block (%#x)', addr) THUMB_BRANCH_INSTRUCTIONS = ('beq', 'bne', 'bcs', 'bhs', 'bcc', 'blo', 'bmi', 'bpl', 'bvs', 'bvc', 'bhi', 'bls', 'bge', 'blt', 'bgt', 'ble', 'cbz', 'cbnz') for cs_insn in bb.capstone.insns: if cs_insn.mnemonic.split('.')[0] in THUMB_BRANCH_INSTRUCTIONS: can_produce_exits.add(cs_insn.address) successors_filtered = [suc for suc in successors if get_ins_addr(suc) in can_produce_exits or get_exit_stmt_idx(suc) == DEFAULT_STATEMENT] return successors_filtered
Filter successors for THUMB mode basic blocks, and remove those successors that won't be taken normally. :param int addr: Address of the basic block / SimIRSB. :param int size: Size of the basic block. :param list successors: A list of successors. :param func get_ins_addr: A callable that returns the source instruction address for a successor. :param func get_exit_stmt_idx: A callable that returns the source statement ID for a successor. :return: A new list of successors after filtering. :rtype: list
def father(self): """Parent of this individual""" if self._father == []: self._father = self.sub_tag("FAMC/HUSB") return self._father
Parent of this individual
def match(pattern): """ Validates that a field value matches the regex given to this validator. """ regex = re.compile(pattern) def validate(value): if not regex.match(value): return e("{} does not match the pattern {}", value, pattern) return validate
Validates that a field value matches the regex given to this validator.
def generic_http_header_parser_for(header_name): """ A parser factory to extract the request id from an HTTP header :return: A parser that can be used to extract the request id from the current request context :rtype: ()->str|None """ def parser(): request_id = request.headers.get(header_name, '').strip() if not request_id: # If the request id is empty return None return None return request_id return parser
A parser factory to extract the request id from an HTTP header :return: A parser that can be used to extract the request id from the current request context :rtype: ()->str|None
def copy_and_verify(path, source_path, sha256): """ Copy a file to a given path from a given path, if it does not exist. After copying it, verify it integrity by checking the SHA-256 hash. Parameters ---------- path: str The (destination) path of the file on the local filesystem source_path: str The path from which to copy the file sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- str or None The path of the file if successfully downloaded otherwise `None` """ if os.path.exists(path): # Already exists? # Nothing to do, except print the SHA-256 if necessary if sha256 is None: print('The SHA-256 of {} is "{}"'.format( path, compute_sha256(path))) return path if not os.path.exists(source_path): return None # Compute the path of the unverified file unverified_path = path + '.unverified' # Copy it dir_path = os.path.dirname(path) if not os.path.exists(dir_path): os.makedirs(dir_path) shutil.copy(source_path, unverified_path) if os.path.exists(unverified_path): # Got something... if verify_file(unverified_path, sha256): # Success: rename the unverified file to the destination # filename os.rename(unverified_path, path) return path else: # Report failure print('SHA verification of file {} failed'.format(source_path)) # Delete os.remove(unverified_path) return None
Copy a file to a given path from a given path, if it does not exist. After copying it, verify it integrity by checking the SHA-256 hash. Parameters ---------- path: str The (destination) path of the file on the local filesystem source_path: str The path from which to copy the file sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- str or None The path of the file if successfully downloaded otherwise `None`
def plot(self, vertices, show=False): """ Plot the text using matplotlib. Parameters -------------- vertices : (n, 2) float Vertices in space show : bool If True, call plt.show() """ if vertices.shape[1] != 2: raise ValueError('only for 2D points!') import matplotlib.pyplot as plt # get rotation angle in degrees angle = np.degrees(self.angle(vertices)) plt.text(*vertices[self.origin], s=self.text, rotation=angle, ha=self.align[0], va=self.align[1], size=18) if show: plt.show()
Plot the text using matplotlib. Parameters -------------- vertices : (n, 2) float Vertices in space show : bool If True, call plt.show()
def run(self, *args, **kwargs): """ Queue a first item to execute, then wait for the queue to be empty before returning. This should be the default way of starting any scraper. """ if self._source is not None: return self._source.run(*args, **kwargs) else: self.queue(*args, **kwargs) return self.wait()
Queue a first item to execute, then wait for the queue to be empty before returning. This should be the default way of starting any scraper.
def isconnected(mask): """ Checks that all nodes are reachable from the first node - i.e. that the graph is fully connected. """ nodes_to_check = list((np.where(mask[0, :])[0])[1:]) seen = [True] + [False] * (len(mask) - 1) while nodes_to_check and not all(seen): node = nodes_to_check.pop() reachable = np.where(mask[node, :])[0] for i in reachable: if not seen[i]: nodes_to_check.append(i) seen[i] = True return all(seen)
Checks that all nodes are reachable from the first node - i.e. that the graph is fully connected.
def _shared_features(adense, bdense): """ Number of features in ``adense`` that are also in ``bdense``. """ a_indices = set(nonzero(adense)) b_indices = set(nonzero(bdense)) shared = list(a_indices & b_indices) diff = list(a_indices - b_indices) Ndiff = len(diff) return Ndiff
Number of features in ``adense`` that are also in ``bdense``.
def _log_prob_with_logsf_and_logcdf(self, y): """Compute log_prob(y) using log survival_function and cdf together.""" # There are two options that would be equal if we had infinite precision: # Log[ sf(y - 1) - sf(y) ] # = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ] # Log[ cdf(y) - cdf(y - 1) ] # = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ] logsf_y = self.log_survival_function(y) logsf_y_minus_1 = self.log_survival_function(y - 1) logcdf_y = self.log_cdf(y) logcdf_y_minus_1 = self.log_cdf(y - 1) # Important: Here we use select in a way such that no input is inf, this # prevents the troublesome case where the output of select can be finite, # but the output of grad(select) will be NaN. # In either case, we are doing Log[ exp{big} - exp{small} ] # We want to use the sf items precisely when we are on the right side of the # median, which occurs when logsf_y < logcdf_y. big = tf.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y) small = tf.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1) return _logsum_expbig_minus_expsmall(big, small)
Compute log_prob(y) using log survival_function and cdf together.
def getAPIKey(self, keyID=None): """ Retrieve the NS1 API Key for the given keyID :param str keyID: optional keyID to retrieve, or current if not passed :return: API Key for the given keyID """ kcfg = self.getKeyConfig(keyID) if 'key' not in kcfg: raise ConfigException('invalid config: missing api key') return kcfg['key']
Retrieve the NS1 API Key for the given keyID :param str keyID: optional keyID to retrieve, or current if not passed :return: API Key for the given keyID
def rollback(self): """Implementation of NAPALM method rollback.""" commands = [] commands.append('configure replace flash:rollback-0') commands.append('write memory') self.device.run_commands(commands)
Implementation of NAPALM method rollback.
def set_user_password(environment, parameter, password): """ Sets a user's password in the keyring storage """ username = '%s:%s' % (environment, parameter) return password_set(username, password)
Sets a user's password in the keyring storage
def flush_job(self, job_id, body=None, params=None): """ `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html>`_ :arg job_id: The name of the job to flush :arg body: Flush parameters :arg advance_time: Advances time to the given value generating results and updating the model for the advanced interval :arg calc_interim: Calculates interim results for the most recent bucket or all buckets within the latency period :arg end: When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results :arg skip_time: Skips time to the given value without generating results or updating the model for the skipped interval :arg start: When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") return self.transport.perform_request( "POST", _make_path("_ml", "anomaly_detectors", job_id, "_flush"), params=params, body=body, )
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html>`_ :arg job_id: The name of the job to flush :arg body: Flush parameters :arg advance_time: Advances time to the given value generating results and updating the model for the advanced interval :arg calc_interim: Calculates interim results for the most recent bucket or all buckets within the latency period :arg end: When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results :arg skip_time: Skips time to the given value without generating results or updating the model for the skipped interval :arg start: When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results
def create_channel(current): """ Create a public channel. Can be a broadcast channel or normal chat room. Chat room and broadcast distinction will be made at user subscription phase. .. code-block:: python # request: { 'view':'_zops_create_channel', 'name': string, 'description': string, } # response: { 'description': string, 'name': string, 'no_of_members': int, 'member_list': [ {'name': string, 'is_online': bool, 'avatar_url': string, }], 'last_messages': [MSG_DICT] 'status': 'Created', 'code': 201, 'key': key, # of just created channel } """ channel = Channel(name=current.input['name'], description=current.input['description'], owner=current.user, typ=15).save() with BlockSave(Subscriber): Subscriber.objects.get_or_create(user=channel.owner, channel=channel, can_manage=True, can_leave=False) current.input['key'] = channel.key show_channel(current) current.output.update({ 'status': 'Created', 'code': 201 })
Create a public channel. Can be a broadcast channel or normal chat room. Chat room and broadcast distinction will be made at user subscription phase. .. code-block:: python # request: { 'view':'_zops_create_channel', 'name': string, 'description': string, } # response: { 'description': string, 'name': string, 'no_of_members': int, 'member_list': [ {'name': string, 'is_online': bool, 'avatar_url': string, }], 'last_messages': [MSG_DICT] 'status': 'Created', 'code': 201, 'key': key, # of just created channel }
def get_conn(self): """ Returns an SFTP connection object """ if self.conn is None: cnopts = pysftp.CnOpts() if self.no_host_key_check: cnopts.hostkeys = None cnopts.compression = self.compress conn_params = { 'host': self.remote_host, 'port': self.port, 'username': self.username, 'cnopts': cnopts } if self.password and self.password.strip(): conn_params['password'] = self.password if self.key_file: conn_params['private_key'] = self.key_file if self.private_key_pass: conn_params['private_key_pass'] = self.private_key_pass self.conn = pysftp.Connection(**conn_params) return self.conn
Returns an SFTP connection object
def weighted_pixel_signals_from_images(pixels, signal_scale, regular_to_pix, galaxy_image): """Compute the (scaled) signal in each pixel, where the signal is the sum of its datas_-pixel fluxes. \ These pixel-signals are used to compute the effective regularization weight of each pixel. The pixel signals are scaled in the following ways: 1) Divided by the number of datas_-pixels in the pixel, to ensure all pixels have the same \ 'relative' signal (i.e. a pixel with 10 regular-pixels doesn't have x2 the signal of one with 5). 2) Divided by the maximum pixel-signal, so that all signals vary between 0 and 1. This ensures that the \ regularizations weights are defined identically for any datas_ units or signal-to-noise_map ratio. 3) Raised to the power of the hyper-parameter *signal_scale*, so the method can control the relative \ contribution regularization in different regions of pixelization. Parameters ----------- pixels : int The total number of pixels in the pixelization the regularization scheme is applied to. signal_scale : float A factor which controls how rapidly the smoothness of regularization varies from high signal regions to \ low signal regions. regular_to_pix : ndarray A 1D array mapping every pixel on the regular-grid to a pixel on the pixelization. galaxy_image : ndarray The image of the galaxy which is used to compute the weigghted pixel signals. """ pixel_signals = np.zeros((pixels,)) pixel_sizes = np.zeros((pixels,)) for regular_index in range(galaxy_image.shape[0]): pixel_signals[regular_to_pix[regular_index]] += galaxy_image[regular_index] pixel_sizes[regular_to_pix[regular_index]] += 1 pixel_signals /= pixel_sizes pixel_signals /= np.max(pixel_signals) return pixel_signals ** signal_scale
Compute the (scaled) signal in each pixel, where the signal is the sum of its datas_-pixel fluxes. \ These pixel-signals are used to compute the effective regularization weight of each pixel. The pixel signals are scaled in the following ways: 1) Divided by the number of datas_-pixels in the pixel, to ensure all pixels have the same \ 'relative' signal (i.e. a pixel with 10 regular-pixels doesn't have x2 the signal of one with 5). 2) Divided by the maximum pixel-signal, so that all signals vary between 0 and 1. This ensures that the \ regularizations weights are defined identically for any datas_ units or signal-to-noise_map ratio. 3) Raised to the power of the hyper-parameter *signal_scale*, so the method can control the relative \ contribution regularization in different regions of pixelization. Parameters ----------- pixels : int The total number of pixels in the pixelization the regularization scheme is applied to. signal_scale : float A factor which controls how rapidly the smoothness of regularization varies from high signal regions to \ low signal regions. regular_to_pix : ndarray A 1D array mapping every pixel on the regular-grid to a pixel on the pixelization. galaxy_image : ndarray The image of the galaxy which is used to compute the weigghted pixel signals.
def odinweb_node_formatter(path_node): # type: (PathParam) -> str """ Format a node to be consumable by the `UrlPath.parse`. """ args = [path_node.name] if path_node.type: args.append(path_node.type.name) if path_node.type_args: args.append(path_node.type_args) return "{{{}}}".format(':'.join(args))
Format a node to be consumable by the `UrlPath.parse`.
def get_storyline(self, timezone_offset, first_date, start=0.0, end=0.0, track_points=False): ''' a method to retrieve storyline details for a period of time NOTE: start and end must be no more than 30 days, 1 second apart NOTE: if track_points=True, start and end must be no more than 6 days, 1 second apart :param timezone_offset: integer with timezone offset from user profile details :param first_date: string with ISO date from user profile details firstDate :param start: [optional] float with starting datetime for daily summaries :param end: [optional] float with ending datetime for daily summaries :param track_points: [optional] boolean to provide detailed tracking of user movement :return: dictionary of response details with storyline list inside json key { 'headers': { ... }, 'code': 200, 'error': '', 'url': 'https://api.moves-app.com/api/1.1/user/storyline/daily' 'json': [ SEE RESPONSE in https://dev.moves-app.com/docs/api_storyline ] } ''' title = '%s.get_storyline' % self.__class__.__name__ # validate scope if {'location', 'activity'} - set(self.service_scope): raise ValueError('%s requires service scope to contain "location" and "activity".' % title) # construct request fields url_string = '%s/user/storyline/daily' % self.endpoint parameters = self._process_dates(timezone_offset, first_date, start, end, title, track_points) if track_points: parameters['trackPoints'] = 'true' # send request storyline_details = self._get_request(url_string, params=parameters) return storyline_details
a method to retrieve storyline details for a period of time NOTE: start and end must be no more than 30 days, 1 second apart NOTE: if track_points=True, start and end must be no more than 6 days, 1 second apart :param timezone_offset: integer with timezone offset from user profile details :param first_date: string with ISO date from user profile details firstDate :param start: [optional] float with starting datetime for daily summaries :param end: [optional] float with ending datetime for daily summaries :param track_points: [optional] boolean to provide detailed tracking of user movement :return: dictionary of response details with storyline list inside json key { 'headers': { ... }, 'code': 200, 'error': '', 'url': 'https://api.moves-app.com/api/1.1/user/storyline/daily' 'json': [ SEE RESPONSE in https://dev.moves-app.com/docs/api_storyline ] }
def check_proxy_setting(): """ If the environmental variable 'HTTP_PROXY' is set, it will most likely be in one of these forms: proxyhost:8080 http://proxyhost:8080 urlllib2 requires the proxy URL to start with 'http://' This routine does that, and returns the transport for xmlrpc. """ try: http_proxy = os.environ['HTTP_PROXY'] except KeyError: return if not http_proxy.startswith('http://'): match = re.match('(http://)?([-_\.A-Za-z]+):(\d+)', http_proxy) #if not match: # raise Exception('Proxy format not recognised: [%s]' % http_proxy) os.environ['HTTP_PROXY'] = 'http://%s:%s' % (match.group(2), match.group(3)) return
If the environmental variable 'HTTP_PROXY' is set, it will most likely be in one of these forms: proxyhost:8080 http://proxyhost:8080 urlllib2 requires the proxy URL to start with 'http://' This routine does that, and returns the transport for xmlrpc.
def convert_nonParametricSeismicSource(self, node): """ Convert the given node into a non parametric source object. :param node: a node with tag areaGeometry :returns: a :class:`openquake.hazardlib.source.NonParametricSeismicSource` instance """ trt = node.attrib.get('tectonicRegion') rup_pmf_data = [] rups_weights = None if 'rup_weights' in node.attrib: tmp = node.attrib.get('rup_weights') rups_weights = numpy.array([float(s) for s in tmp.split()]) for i, rupnode in enumerate(node): probs = pmf.PMF(valid.pmf(rupnode['probs_occur'])) rup = RuptureConverter.convert_node(self, rupnode) rup.tectonic_region_type = trt rup.weight = None if rups_weights is None else rups_weights[i] rup_pmf_data.append((rup, probs)) nps = source.NonParametricSeismicSource( node['id'], node['name'], trt, rup_pmf_data) nps.splittable = 'rup_weights' not in node.attrib return nps
Convert the given node into a non parametric source object. :param node: a node with tag areaGeometry :returns: a :class:`openquake.hazardlib.source.NonParametricSeismicSource` instance
def check_meta_tag(domain, prefix, code): """ Validates a domain by checking the existance of a <meta name="{prefix}" content="{code}"> tag in the <head> of the home page of the domain using either HTTP or HTTPs protocols. Returns true if verification suceeded. """ url = '://{}'.format(domain) for proto in ('http', 'https'): try: req = Request(proto + url, headers={'User-Agent': 'Mozilla/5.0; Domcheck/1.0'}) res = urlopen(req, timeout=2) if res.code == 200: # Expect the </head> to be found in the first 100k of the page content = str(res.read(100000)) res.close() return search_meta_tag(content, prefix, code) else: res.close() except: logger.debug('', exc_info=True) return False
Validates a domain by checking the existance of a <meta name="{prefix}" content="{code}"> tag in the <head> of the home page of the domain using either HTTP or HTTPs protocols. Returns true if verification suceeded.
def _create_PmtInf_node(self): """ Method to create the blank payment information nodes as a dict. """ ED = dict() # ED is element dict ED['PmtInfNode'] = ET.Element("PmtInf") ED['PmtInfIdNode'] = ET.Element("PmtInfId") ED['PmtMtdNode'] = ET.Element("PmtMtd") ED['BtchBookgNode'] = ET.Element("BtchBookg") ED['NbOfTxsNode'] = ET.Element("NbOfTxs") ED['CtrlSumNode'] = ET.Element("CtrlSum") ED['PmtTpInfNode'] = ET.Element("PmtTpInf") ED['SvcLvlNode'] = ET.Element("SvcLvl") ED['Cd_SvcLvl_Node'] = ET.Element("Cd") ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt") ED['DbtrNode'] = ET.Element("Dbtr") ED['Nm_Dbtr_Node'] = ET.Element("Nm") ED['DbtrAcctNode'] = ET.Element("DbtrAcct") ED['Id_DbtrAcct_Node'] = ET.Element("Id") ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN") ED['DbtrAgtNode'] = ET.Element("DbtrAgt") ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId") if 'BIC' in self._config: ED['BIC_DbtrAgt_Node'] = ET.Element("BIC") ED['ChrgBrNode'] = ET.Element("ChrgBr") return ED
Method to create the blank payment information nodes as a dict.
def validate(self, instance, value): """Check shape and dtype of vector validate also coerces the vector from valid strings (these include ZERO, X, Y, -X, -Y, EAST, WEST, NORTH, and SOUTH) and scales it to the given length. """ if isinstance(value, string_types): if ( value.upper() not in VECTOR_DIRECTIONS or value.upper() in ('Z', '-Z', 'UP', 'DOWN') ): self.error(instance, value) value = VECTOR_DIRECTIONS[value.upper()][:2] return super(Vector2, self).validate(instance, value)
Check shape and dtype of vector validate also coerces the vector from valid strings (these include ZERO, X, Y, -X, -Y, EAST, WEST, NORTH, and SOUTH) and scales it to the given length.
def _rdsignal(fp, file_size, header_size, n_sig, bit_width, is_signed, cut_end): """ Read the signal Parameters ---------- cut_end : bool, optional If True, enables reading the end of files which appear to terminate with the incorrect number of samples (ie. sample not present for all channels), by checking and skipping the reading the end of such files. Checking this option makes reading slower. """ # Cannot initially figure out signal length because there # are escape sequences. fp.seek(header_size) signal_size = file_size - header_size byte_width = int(bit_width / 8) # numpy dtype dtype = str(byte_width) if is_signed: dtype = 'i' + dtype else: dtype = 'u' + dtype # big endian dtype = '>' + dtype # The maximum possible samples given the file size # All channels must be present max_samples = int(signal_size / byte_width) max_samples = max_samples - max_samples % n_sig # Output information signal = np.empty(max_samples, dtype=dtype) markers = [] triggers = [] # Number of (total) samples read sample_num = 0 # Read one sample for all channels at a time if cut_end: stop_byte = file_size - n_sig * byte_width + 1 while fp.tell() < stop_byte: chunk = fp.read(2) sample_num = _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num) else: while True: chunk = fp.read(2) if not chunk: break sample_num = _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num) # No more bytes to read. Reshape output arguments. signal = signal[:sample_num] signal = signal.reshape((-1, n_sig)) markers = np.array(markers, dtype='int') triggers = np.array(triggers, dtype='int') return signal, markers, triggers
Read the signal Parameters ---------- cut_end : bool, optional If True, enables reading the end of files which appear to terminate with the incorrect number of samples (ie. sample not present for all channels), by checking and skipping the reading the end of such files. Checking this option makes reading slower.
def lock(self, lease_time=-1): """ Acquires the lock. If a lease time is specified, lock will be released after this lease time. If the lock is not available, the current thread becomes disabled for thread scheduling purposes and lies dormant until the lock has been acquired. :param lease_time: (long), time to wait before releasing the lock (optional). """ return self._encode_invoke(lock_lock_codec, invocation_timeout=MAX_SIZE, lease_time=to_millis(lease_time), thread_id=thread_id(), reference_id=self.reference_id_generator.get_and_increment())
Acquires the lock. If a lease time is specified, lock will be released after this lease time. If the lock is not available, the current thread becomes disabled for thread scheduling purposes and lies dormant until the lock has been acquired. :param lease_time: (long), time to wait before releasing the lock (optional).
def distance(p0, p1): r"""Return the distance between two points. Parameters ---------- p0: (X,Y) ndarray Starting coordinate p1: (X,Y) ndarray Ending coordinate Returns ------- d: float distance See Also -------- dist_2 """ return math.sqrt(dist_2(p0[0], p0[1], p1[0], p1[1]))
r"""Return the distance between two points. Parameters ---------- p0: (X,Y) ndarray Starting coordinate p1: (X,Y) ndarray Ending coordinate Returns ------- d: float distance See Also -------- dist_2
def plot_spikes(spikes, view=False, filename=None, title=None): """ Plots the trains for a single spiking neuron. """ t_values = [t for t, I, v, u, f in spikes] v_values = [v for t, I, v, u, f in spikes] u_values = [u for t, I, v, u, f in spikes] I_values = [I for t, I, v, u, f in spikes] f_values = [f for t, I, v, u, f in spikes] fig = plt.figure() plt.subplot(4, 1, 1) plt.ylabel("Potential (mv)") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, v_values, "g-") if title is None: plt.title("Izhikevich's spiking neuron model") else: plt.title("Izhikevich's spiking neuron model ({0!s})".format(title)) plt.subplot(4, 1, 2) plt.ylabel("Fired") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, f_values, "r-") plt.subplot(4, 1, 3) plt.ylabel("Recovery (u)") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, u_values, "r-") plt.subplot(4, 1, 4) plt.ylabel("Current (I)") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, I_values, "r-o") if filename is not None: plt.savefig(filename) if view: plt.show() plt.close() fig = None return fig
Plots the trains for a single spiking neuron.
def _fix_review_dates(self, item): """Convert dates so ES detect them""" for date_field in ['timestamp', 'createdOn', 'lastUpdated']: if date_field in item.keys(): date_ts = item[date_field] item[date_field] = unixtime_to_datetime(date_ts).isoformat() if 'patchSets' in item.keys(): for patch in item['patchSets']: pdate_ts = patch['createdOn'] patch['createdOn'] = unixtime_to_datetime(pdate_ts).isoformat() if 'approvals' in patch: for approval in patch['approvals']: adate_ts = approval['grantedOn'] approval['grantedOn'] = unixtime_to_datetime(adate_ts).isoformat() if 'comments' in item.keys(): for comment in item['comments']: cdate_ts = comment['timestamp'] comment['timestamp'] = unixtime_to_datetime(cdate_ts).isoformat()
Convert dates so ES detect them
def add_module_definition(self, module_definition): """ Add a ModuleDefinition to the document """ if module_definition.identity not in self._module_definitions.keys(): self._module_definitions[module_definition.identity] = module_definition else: raise ValueError("{} has already been defined".format(module_definition.identity))
Add a ModuleDefinition to the document
def lookup_field_orderable(self, field): """ Returns whether the passed in field is sortable or not, by default all 'raw' fields, that is fields that are part of the model are sortable. """ try: self.model._meta.get_field_by_name(field) return True except Exception: # that field doesn't exist, so not sortable return False
Returns whether the passed in field is sortable or not, by default all 'raw' fields, that is fields that are part of the model are sortable.