code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def filter_filenames(filenames): """ Skip files with extentions in `FILE_EXCLUDE_EXTENTIONS` and filenames that contain `FILE_SKIP_PATTENRS`. """ filenames_cleaned = [] for filename in filenames: keep = True for pattern in FILE_EXCLUDE_EXTENTIONS: if filename.endswith(pattern): keep = False for pattern in FILE_SKIP_PATTENRS: # This will reject exercises... if pattern in filename: keep = False if keep: filenames_cleaned.append(filename) return filenames_cleaned
Skip files with extentions in `FILE_EXCLUDE_EXTENTIONS` and filenames that contain `FILE_SKIP_PATTENRS`.
def convert_constants(jmag, hmag, kmag, cjhk, cjh, cjk, chk, cj, ch, ck): '''This converts between JHK and BVRI/SDSS mags. Not meant to be used directly. See the functions below for more sensible interface. This function does the grunt work of converting from JHK to either BVRI or SDSS ugriz. while taking care of missing values for any of jmag, hmag, or kmag. Parameters ---------- jmag,hmag,kmag : float 2MASS J, H, Ks mags to use to convert. cjhk,cjh,cjk,chk,cj,ch,ck : lists Constants to use when converting. Returns ------- float The converted magnitude in SDSS or BVRI system. ''' if jmag is not None: if hmag is not None: if kmag is not None: return cjhk[0] + cjhk[1]*jmag + cjhk[2]*hmag + cjhk[3]*kmag else: return cjh[0] + cjh[1]*jmag + cjh[2]*hmag else: if kmag is not None: return cjk[0] + cjk[1]*jmag + cjk[2]*kmag else: return cj[0] + cj[1]*jmag else: if hmag is not None: if kmag is not None: return chk[0] + chk[1]*hmag + chk[2]*kmag else: return ch[0] + ch[1]*hmag else: if kmag is not None: return ck[0] + ck[1]*kmag else: return np.nan
This converts between JHK and BVRI/SDSS mags. Not meant to be used directly. See the functions below for more sensible interface. This function does the grunt work of converting from JHK to either BVRI or SDSS ugriz. while taking care of missing values for any of jmag, hmag, or kmag. Parameters ---------- jmag,hmag,kmag : float 2MASS J, H, Ks mags to use to convert. cjhk,cjh,cjk,chk,cj,ch,ck : lists Constants to use when converting. Returns ------- float The converted magnitude in SDSS or BVRI system.
def from_dc_code(cls, dc_code): """Retrieve the datacenter id associated to a dc_code""" result = cls.list() dc_codes = {} for dc in result: if dc.get('dc_code'): dc_codes[dc['dc_code']] = dc['id'] return dc_codes.get(dc_code)
Retrieve the datacenter id associated to a dc_code
def access_var(self, id_, lineno, scope=None, default_type=None): """ Since ZX BASIC allows access to undeclared variables, we must allow them, and *implicitly* declare them if they are not declared already. This function just checks if the id_ exists and returns its entry so. Otherwise, creates an implicit declared variable entry and returns it. If the --strict command line flag is enabled (or #pragma option explicit is in use) checks ensures the id_ is already declared. Returns None on error. """ result = self.access_id(id_, lineno, scope, default_type) if result is None: return None if not self.check_class(id_, CLASS.var, lineno, scope): return None assert isinstance(result, symbols.VAR) result.class_ = CLASS.var return result
Since ZX BASIC allows access to undeclared variables, we must allow them, and *implicitly* declare them if they are not declared already. This function just checks if the id_ exists and returns its entry so. Otherwise, creates an implicit declared variable entry and returns it. If the --strict command line flag is enabled (or #pragma option explicit is in use) checks ensures the id_ is already declared. Returns None on error.
def complete_run(self, text, line, b, e): ''' Autocomplete file names with .forth ending. ''' # Don't break on path separators. text = line.split()[-1] # Try to find files with a forth file ending, .fs. forth_files = glob.glob(text + '*.fs') # Failing that, just try and complete something. if len(forth_files) == 0: return [f.split(os.path.sep)[-1] for f in glob.glob(text + '*')] forth_files = [f.split(os.path.sep)[-1] for f in forth_files] return forth_files
Autocomplete file names with .forth ending.
def get_assign_groups(line, ops=ops): """ Split a line into groups by assignment (including augmented assignment) """ group = [] for item in line: group.append(item) if item in ops: yield group group = [] yield group
Split a line into groups by assignment (including augmented assignment)
def decode_exactly(code, bits_per_char=6): """Decode a geohash on a hilbert curve as a lng/lat position with error-margins Decodes the geohash `code` as a lng/lat position with error-margins. It assumes, that the length of `code` corresponds to the precision! And that each character in `code` encodes `bits_per_char` bits. Do not mix geohashes with different `bits_per_char`! Parameters: code: str The geohash to decode. bits_per_char: int The number of bits per coding character Returns: Tuple[float, float, float, float]: (lng, lat, lng-error, lat-error) coordinate for the geohash. """ assert bits_per_char in (2, 4, 6) if len(code) == 0: return 0., 0., _LNG_INTERVAL[1], _LAT_INTERVAL[1] bits = len(code) * bits_per_char level = bits >> 1 dim = 1 << level code_int = decode_int(code, bits_per_char) if CYTHON_AVAILABLE and bits <= MAX_BITS: x, y = hash2xy_cython(code_int, dim) else: x, y = _hash2xy(code_int, dim) lng, lat = _int2coord(x, y, dim) lng_err, lat_err = _lvl_error(level) # level of hilbert curve is bits / 2 return lng + lng_err, lat + lat_err, lng_err, lat_err
Decode a geohash on a hilbert curve as a lng/lat position with error-margins Decodes the geohash `code` as a lng/lat position with error-margins. It assumes, that the length of `code` corresponds to the precision! And that each character in `code` encodes `bits_per_char` bits. Do not mix geohashes with different `bits_per_char`! Parameters: code: str The geohash to decode. bits_per_char: int The number of bits per coding character Returns: Tuple[float, float, float, float]: (lng, lat, lng-error, lat-error) coordinate for the geohash.
def renew(cls, fqdn, duration, background): """Renew a domain.""" fqdn = fqdn.lower() if not background and not cls.intty(): background = True domain_info = cls.info(fqdn) current_year = domain_info['date_registry_end'].year domain_params = { 'duration': duration, 'current_year': current_year, } result = cls.call('domain.renew', fqdn, domain_params) if background: return result # interactive mode, run a progress bar cls.echo('Renewing your domain.') cls.display_progress(result) cls.echo('Your domain %s has been renewed.' % fqdn)
Renew a domain.
def get_plotable3d(self): """ :returns: matplotlib Poly3DCollection :rtype: list of mpl_toolkits.mplot3d """ polyhedra = sum([polyhedron.get_plotable3d() for polyhedron in self.polyhedra], []) return polyhedra + self.surface.get_plotable3d()
:returns: matplotlib Poly3DCollection :rtype: list of mpl_toolkits.mplot3d
def _sync_from_disk(self): """Read any changes made on disk to this Refpkg. This is necessary if other programs are making changes to the Refpkg on disk and your program must be synchronized to them. """ try: fobj = self.open_manifest('r') except IOError as e: if e.errno == errno.ENOENT: raise ValueError( "couldn't find manifest file in %s" % (self.path,)) elif e.errno == errno.ENOTDIR: raise ValueError("%s is not a directory" % (self.path,)) else: raise with fobj: self.contents = json.load(fobj) self._set_defaults() self._check_refpkg()
Read any changes made on disk to this Refpkg. This is necessary if other programs are making changes to the Refpkg on disk and your program must be synchronized to them.
def ready_argument_list(self, arguments): """ready argument list to be passed to the kernel, allocates gpu mem :param arguments: List of arguments to be passed to the kernel. The order should match the argument list on the OpenCL kernel. Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on. :type arguments: list(numpy objects) :returns: A list of arguments that can be passed to an OpenCL kernel. :rtype: list( pyopencl.Buffer, numpy.int32, ... ) """ gpu_args = [] for arg in arguments: # if arg i is a numpy array copy to device if isinstance(arg, numpy.ndarray): gpu_args.append(cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf=arg)) else: # if not an array, just pass argument along gpu_args.append(arg) return gpu_args
ready argument list to be passed to the kernel, allocates gpu mem :param arguments: List of arguments to be passed to the kernel. The order should match the argument list on the OpenCL kernel. Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on. :type arguments: list(numpy objects) :returns: A list of arguments that can be passed to an OpenCL kernel. :rtype: list( pyopencl.Buffer, numpy.int32, ... )
def start(self, input_data, output_data, transform_resources, **kwargs): """Start the Local Transform Job Args: input_data (dict): Describes the dataset to be transformed and the location where it is stored. output_data (dict): Identifies the location where to save the results from the transform job transform_resources (dict): compute instances for the transform job. Currently only supports local or local_gpu **kwargs: additional arguments coming from the boto request object """ self.transform_resources = transform_resources self.input_data = input_data self.output_data = output_data image = self.primary_container['Image'] instance_type = transform_resources['InstanceType'] instance_count = 1 environment = self._get_container_environment(**kwargs) # Start the container, pass the environment and wait for it to start up self.container = _SageMakerContainer(instance_type, instance_count, image, self.local_session) self.container.serve(self.primary_container['ModelDataUrl'], environment) serving_port = get_config_value('local.serving_port', self.local_session.config) or 8080 _wait_for_serving_container(serving_port) # Get capabilities from Container if needed endpoint_url = 'http://localhost:%s/execution-parameters' % serving_port response, code = _perform_request(endpoint_url) if code == 200: execution_parameters = json.loads(response.read()) # MaxConcurrentTransforms is ignored because we currently only support 1 for setting in ('BatchStrategy', 'MaxPayloadInMB'): if setting not in kwargs and setting in execution_parameters: kwargs[setting] = execution_parameters[setting] # Apply Defaults if none was provided kwargs.update(self._get_required_defaults(**kwargs)) self.start_time = datetime.datetime.now() self.batch_strategy = kwargs['BatchStrategy'] if 'Environment' in kwargs: self.environment = kwargs['Environment'] # run the batch inference requests self._perform_batch_inference(input_data, output_data, **kwargs) self.end_time = datetime.datetime.now() self.state = self._COMPLETED
Start the Local Transform Job Args: input_data (dict): Describes the dataset to be transformed and the location where it is stored. output_data (dict): Identifies the location where to save the results from the transform job transform_resources (dict): compute instances for the transform job. Currently only supports local or local_gpu **kwargs: additional arguments coming from the boto request object
def id(self): """Return a unique id for the detected board, if any.""" # There are some times we want to trick the platform detection # say if a raspberry pi doesn't have the right ID, or for testing try: return os.environ['BLINKA_FORCEBOARD'] except KeyError: # no forced board, continue with testing! pass chip_id = self.detector.chip.id board_id = None if chip_id == ap_chip.BCM2XXX: board_id = self._pi_id() elif chip_id == ap_chip.AM33XX: board_id = self._beaglebone_id() elif chip_id == ap_chip.GENERIC_X86: board_id = GENERIC_LINUX_PC elif chip_id == ap_chip.SUN8I: board_id = self._armbian_id() elif chip_id == ap_chip.SAMA5: board_id = self._sama5_id() elif chip_id == ap_chip.ESP8266: board_id = FEATHER_HUZZAH elif chip_id == ap_chip.SAMD21: board_id = FEATHER_M0_EXPRESS elif chip_id == ap_chip.STM32: board_id = PYBOARD elif chip_id == ap_chip.S805: board_id = ODROID_C1 elif chip_id == ap_chip.S905: board_id = ODROID_C2 elif chip_id == ap_chip.FT232H: board_id = FTDI_FT232H elif chip_id in (ap_chip.T210, ap_chip.T186, ap_chip.T194): board_id = self._tegra_id() return board_id
Return a unique id for the detected board, if any.
def _job_to_text(self, job): """ Return a standard formatting of a Job serialization. """ next_run = self._format_date(job.get('next_run', None)) tasks = '' for task in job.get('tasks', []): tasks += self._task_to_text(task) tasks += '\n\n' return '\n'.join(['Job name: %s' % job.get('name', None), 'Cron schedule: %s' % job.get('cron_schedule', None), 'Next run: %s' % next_run, '', 'Parent ID: %s' % job.get('parent_id', None), 'Job ID: %s' % job.get('job_id', None), '', 'Tasks Detail', '', tasks])
Return a standard formatting of a Job serialization.
def load_mlf(filename, utf8_normalization=None): """Load an HTK Master Label File. :param filename: The filename of the MLF file. :param utf8_normalization: None """ with codecs.open(filename, 'r', 'string_escape') as f: data = f.read().decode('utf8') if utf8_normalization: data = unicodedata.normalize(utf8_normalization, data) mlfs = {} for mlf_object in HTK_MLF_RE.finditer(data): mlfs[mlf_object.group('file')] = [[Label(**mo.groupdict()) for mo in HTK_HYPOTHESIS_RE.finditer(recognition_data)] for recognition_data in re.split(r'\n///\n', mlf_object.group('hypotheses'))] return mlfs
Load an HTK Master Label File. :param filename: The filename of the MLF file. :param utf8_normalization: None
def base64(self, charset=None): '''Data encoded as base 64''' return b64encode(self.bytes()).decode(charset or self.charset)
Data encoded as base 64
def streamline(self): """Streamline the language represented by this parser to make queries run faster.""" t = time.time() self.language.streamline() log.info('streamlined %s in %.02f seconds', self.__class__.__name__, time.time() - t)
Streamline the language represented by this parser to make queries run faster.
def _make_session(connection: Optional[str] = None) -> Session: """Make a session.""" if connection is None: connection = get_global_connection() engine = create_engine(connection) create_all(engine) session_cls = sessionmaker(bind=engine) session = session_cls() return session
Make a session.
def _on_rpc_done(self, future): """Triggered whenever the underlying RPC terminates without recovery. This is typically triggered from one of two threads: the background consumer thread (when calling ``recv()`` produces a non-recoverable error) or the grpc management thread (when cancelling the RPC). This method is *non-blocking*. It will start another thread to deal with shutting everything down. This is to prevent blocking in the background consumer and preventing it from being ``joined()``. """ _LOGGER.info("RPC termination has signaled manager shutdown.") future = _maybe_wrap_exception(future) thread = threading.Thread( name=_RPC_ERROR_THREAD_NAME, target=self.close, kwargs={"reason": future} ) thread.daemon = True thread.start()
Triggered whenever the underlying RPC terminates without recovery. This is typically triggered from one of two threads: the background consumer thread (when calling ``recv()`` produces a non-recoverable error) or the grpc management thread (when cancelling the RPC). This method is *non-blocking*. It will start another thread to deal with shutting everything down. This is to prevent blocking in the background consumer and preventing it from being ``joined()``.
def remove(self, obj, commit=True): """ Remove indexes for `obj` from the database. We delete all instances of `Q<app_name>.<model_name>.<pk>` which should be unique to this object. Optional arguments: `commit` -- ignored """ database = self._database(writable=True) database.delete_document(TERM_PREFIXES[ID] + get_identifier(obj)) database.close()
Remove indexes for `obj` from the database. We delete all instances of `Q<app_name>.<model_name>.<pk>` which should be unique to this object. Optional arguments: `commit` -- ignored
def get_relationship(self, relationship_id): """Gets the ``Relationship`` specified by its ``Id``. arg: relationship_id (osid.id.Id): the ``Id`` of the ``Relationship`` to retrieve return: (osid.relationship.Relationship) - the returned ``Relationship`` raise: NotFound - no ``Relationship`` found with the given ``Id`` raise: NullArgument - ``relationship_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('relationship', collection='Relationship', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(relationship_id, 'relationship').get_identifier())}, **self._view_filter())) return objects.Relationship(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
Gets the ``Relationship`` specified by its ``Id``. arg: relationship_id (osid.id.Id): the ``Id`` of the ``Relationship`` to retrieve return: (osid.relationship.Relationship) - the returned ``Relationship`` raise: NotFound - no ``Relationship`` found with the given ``Id`` raise: NullArgument - ``relationship_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def add_column(self, position, source_header, datatype, **kwargs): """ Add a column to the source table. :param position: Integer position of the column started from 1. :param source_header: Name of the column, as it exists in the source file :param datatype: Python datatype ( str, int, float, None ) for the column :param kwargs: Other source record args. :return: """ from ..identity import GeneralNumber2 c = self.column(source_header) c_by_pos = self.column(position) datatype = 'str' if datatype == 'unicode' else datatype assert not c or not c_by_pos or c.vid == c_by_pos.vid # Convert almost anything to True / False if 'has_codes' in kwargs: FALSE_VALUES = ['False', 'false', 'F', 'f', '', None, 0, '0'] kwargs['has_codes'] = False if kwargs['has_codes'] in FALSE_VALUES else True if c: # Changing the position can result in conflicts assert not c_by_pos or c_by_pos.vid == c.vid c.update( position=position, datatype=datatype.__name__ if isinstance(datatype, type) else datatype, **kwargs) elif c_by_pos: # FIXME This feels wrong; there probably should not be any changes to the both # of the table, since then it won't represent the previouls source. Maybe all of the sources # should get their own tables initially, then affterward the duplicates can be removed. assert not c or c_by_pos.vid == c.vid c_by_pos.update( source_header=source_header, datatype=datatype.__name__ if isinstance(datatype, type) else datatype, **kwargs) else: assert not c and not c_by_pos # Hacking an id number, since I don't want to create a new Identity ObjectNUmber type c = SourceColumn( vid=str(GeneralNumber2('C', self.d_vid, self.sequence_id, int(position))), position=position, st_vid=self.vid, d_vid=self.d_vid, datatype=datatype.__name__ if isinstance(datatype, type) else datatype, source_header=source_header, **kwargs) self.columns.append(c) return c
Add a column to the source table. :param position: Integer position of the column started from 1. :param source_header: Name of the column, as it exists in the source file :param datatype: Python datatype ( str, int, float, None ) for the column :param kwargs: Other source record args. :return:
def freeze_graph_tpu(model_path): """Custom freeze_graph implementation for Cloud TPU.""" assert model_path assert FLAGS.tpu_name if FLAGS.tpu_name.startswith('grpc://'): tpu_grpc_url = FLAGS.tpu_name else: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=None, project=None) tpu_grpc_url = tpu_cluster_resolver.get_master() sess = tf.Session(tpu_grpc_url) output_names = [] with sess.graph.as_default(): # Replicate the inference function for each TPU core. replicated_features = [] for i in range(FLAGS.num_tpu_cores): features = tf.placeholder( tf.float32, [None, go.N, go.N, features_lib.NEW_FEATURES_PLANES], name='pos_tensor_%d' % i) replicated_features.append((features,)) outputs = tf.contrib.tpu.replicate( tpu_model_inference_fn, replicated_features) # The replicate op assigns names like output_0_shard_0 to the output # names. Give them human readable names. for i, (policy_output, value_output, _) in enumerate(outputs): policy_name = 'policy_output_%d' % i value_name = 'value_output_%d' % i output_names.extend([policy_name, value_name]) tf.identity(policy_output, policy_name) tf.identity(value_output, value_name) tf.train.Saver().restore(sess, model_path) # Freeze the graph. model_def = tf.graph_util.convert_variables_to_constants( sess, sess.graph.as_graph_def(), output_names) with tf.gfile.GFile(model_path + '.pb', 'wb') as f: f.write(model_def.SerializeToString())
Custom freeze_graph implementation for Cloud TPU.
def calculate_size(transaction_id, thread_id): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(transaction_id) data_size += LONG_SIZE_IN_BYTES return data_size
Calculates the request payload size
def merge(self, other): """ Merges the two values """ other = IntervalCell.coerce(other) if self.is_equal(other): # pick among dependencies return self elif other.is_entailed_by(self): return self elif self.is_entailed_by(other): self.low, self.high = other.low, other.high elif self.is_contradictory(other): #import traceback #for line in traceback.format_stack(): print line.strip() raise Contradiction("Cannot merge [%0.2f, %0.2f] with [%0.2f, %0.2f]" \ % (self.low, self.high, other.low, other.high)) else: # information in both self.low = max(self.low, other.low) self.high = min(self.high, other.high) return self
Merges the two values
def operate(config): "Interface to do simple operations on the database." app = make_app(config=config) print "Operate Mode" with app.app_context(): operate_menu()
Interface to do simple operations on the database.
def update_port_ip_address(self): """Find the ip address that assinged to a port via DHCP The port database will be updated with the ip address. """ leases = None req = dict(ip='0.0.0.0') instances = self.get_vms_for_this_req(**req) if instances is None: return for vm in instances: if not leases: # For the first time finding the leases file. leases = self._get_ip_leases() if not leases: # File does not exist. return for line in leases: if line.startswith('lease') and line.endswith('{\n'): ip_addr = line.split()[1] if 'hardware ethernet' in line: if vm.mac == line.replace(';', '').split()[2]: LOG.info('Find IP address %(ip)s for %(mac)s', {'ip': ip_addr, 'mac': vm.mac}) try: rule_info = dict(ip=ip_addr, mac=vm.mac, port=vm.port_id, status='up') self.neutron_event.update_ip_rule(str(vm.host), str(rule_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error("RPC error: Failed to update" "rules.") else: params = dict(columns=dict(ip=ip_addr)) self.update_vm_db(vm.port_id, **params) # Send update to the agent. vm_info = dict(status=vm.status, vm_mac=vm.mac, segmentation_id=vm.segmentation_id, host=vm.host, port_uuid=vm.port_id, net_uuid=vm.network_id, oui=dict(ip_addr=ip_addr, vm_name=vm.name, vm_uuid=vm.instance_id, gw_mac=vm.gw_mac, fwd_mod=vm.fwd_mod, oui_id='cisco')) try: self.neutron_event.send_vm_info(vm.host, str(vm_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error('Failed to send VM info to ' 'agent.')
Find the ip address that assinged to a port via DHCP The port database will be updated with the ip address.
def _create_threads(self): """ This method creates job instances. """ creator = JobCreator( self.config, self.observers.jobs, self.logger ) self.jobs = creator.job_factory()
This method creates job instances.
def base_exception_handler(*args): """ Provides the base exception handler. :param \*args: Arguments. :type \*args: \* :return: Definition success. :rtype: bool """ header, frames, trcback = format_report(*extract_exception(*args)) LOGGER.error("!> {0}".format(Constants.logging_separators)) map(lambda x: LOGGER.error("!> {0}".format(x)), header) LOGGER.error("!> {0}".format(Constants.logging_separators)) map(lambda x: LOGGER.error("!> {0}".format(x)), frames) LOGGER.error("!> {0}".format(Constants.logging_separators)) sys.stderr.write("\n".join(trcback)) return True
Provides the base exception handler. :param \*args: Arguments. :type \*args: \* :return: Definition success. :rtype: bool
def generate_seasonal_averages(qout_file, seasonal_average_file, num_cpus=multiprocessing.cpu_count()): """ This function loops through a CF compliant rapid streamflow file to produce a netCDF file with a seasonal average for 365 days a year """ with RAPIDDataset(qout_file) as qout_nc_file: print("Generating seasonal average file ...") seasonal_avg_nc = Dataset(seasonal_average_file, 'w') seasonal_avg_nc.createDimension('rivid', qout_nc_file.size_river_id) seasonal_avg_nc.createDimension('day_of_year', 365) time_series_var = seasonal_avg_nc.createVariable('rivid', 'i4', ('rivid',)) time_series_var.long_name = ( 'unique identifier for each river reach') average_flow_var = \ seasonal_avg_nc.createVariable('average_flow', 'f8', ('rivid', 'day_of_year')) average_flow_var.long_name = 'seasonal average streamflow' average_flow_var.units = 'm3/s' std_dev_flow_var = \ seasonal_avg_nc.createVariable('std_dev_flow', 'f8', ('rivid', 'day_of_year')) std_dev_flow_var.long_name = 'seasonal std. dev. streamflow' std_dev_flow_var.units = 'm3/s' std_dev_flow_var = \ seasonal_avg_nc.createVariable('max_flow', 'f8', ('rivid', 'day_of_year')) std_dev_flow_var.long_name = 'seasonal max streamflow' std_dev_flow_var.units = 'm3/s' std_dev_flow_var = \ seasonal_avg_nc.createVariable('min_flow', 'f8', ('rivid', 'day_of_year')) std_dev_flow_var.long_name = 'seasonal min streamflow' std_dev_flow_var.units = 'm3/s' lat_var = seasonal_avg_nc.createVariable('lat', 'f8', ('rivid',), fill_value=-9999.0) lon_var = seasonal_avg_nc.createVariable('lon', 'f8', ('rivid',), fill_value=-9999.0) add_latlon_metadata(lat_var, lon_var) seasonal_avg_nc.variables['lat'][:] = \ qout_nc_file.qout_nc.variables['lat'][:] seasonal_avg_nc.variables['lon'][:] = \ qout_nc_file.qout_nc.variables['lon'][:] river_id_list = qout_nc_file.get_river_id_array() seasonal_avg_nc.variables['rivid'][:] = river_id_list seasonal_avg_nc.close() # generate multiprocessing jobs mp_lock = multiprocessing.Manager().Lock() # pylint: disable=no-member job_combinations = [] for day_of_year in range(1, 366): job_combinations.append((qout_file, seasonal_average_file, day_of_year, mp_lock )) pool = multiprocessing.Pool(num_cpus) pool.map(generate_single_seasonal_average, job_combinations) pool.close() pool.join()
This function loops through a CF compliant rapid streamflow file to produce a netCDF file with a seasonal average for 365 days a year
def _get_caller_globals_and_locals(): """ Returns the globals and locals of the calling frame. Is there an alternative to frame hacking here? """ caller_frame = inspect.stack()[2] myglobals = caller_frame[0].f_globals mylocals = caller_frame[0].f_locals return myglobals, mylocals
Returns the globals and locals of the calling frame. Is there an alternative to frame hacking here?
def _get_session(self): """Start session with email server.""" if self.port in (465, "465"): session = self._get_ssl() elif self.port in (587, "587"): session = self._get_tls() try: session.login(self.from_, self._auth) except SMTPResponseException as e: raise MessageSendError(e.smtp_error.decode("unicode_escape")) return session
Start session with email server.
def inline(self) -> str: """ Return inline string format of the Membership instance :return: """ return "{0}:{1}:{2}:{3}:{4}".format(self.issuer, self.signatures[0], self.membership_ts, self.identity_ts, self.uid)
Return inline string format of the Membership instance :return:
def fit(self, train_set, test_set): """Fit the model to the given data. :param train_set: training data :param test_set: test data """ with tf.Graph().as_default(), tf.Session() as self.tf_session: self.build_model() tf.global_variables_initializer().run() third = self.num_epochs // 3 for i in range(self.num_epochs): lr_decay = self.lr_decay ** max(i - third, 0.0) self.tf_session.run( tf.assign(self.lr_var, tf.multiply(self.learning_rate, lr_decay))) train_perplexity = self._run_train_step(train_set, 'train') print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) test_perplexity = self._run_train_step(test_set, 'test') print("Test Perplexity: %.3f" % test_perplexity)
Fit the model to the given data. :param train_set: training data :param test_set: test data
def add_slide(self, slide_layout): """ Return an (rId, slide) pair of a newly created blank slide that inherits appearance from *slide_layout*. """ partname = self._next_slide_partname slide_layout_part = slide_layout.part slide_part = SlidePart.new(partname, self.package, slide_layout_part) rId = self.relate_to(slide_part, RT.SLIDE) return rId, slide_part.slide
Return an (rId, slide) pair of a newly created blank slide that inherits appearance from *slide_layout*.
def parse_mixed_delim_str(line): """Turns .obj face index string line into [verts, texcoords, normals] numeric tuples.""" arrs = [[], [], []] for group in line.split(' '): for col, coord in enumerate(group.split('/')): if coord: arrs[col].append(int(coord)) return [tuple(arr) for arr in arrs]
Turns .obj face index string line into [verts, texcoords, normals] numeric tuples.
def _get_filename(self): """Return a unique file name.""" if self._fname is None: timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") fname = "%s-%s.log" % (timestamp, abs(id(self))) self._fname = os.path.join(self.file_path, fname) return self._fname
Return a unique file name.
def devices_l(self) -> Dict: '''List connected devices (-l for long output).''' output, _ = self._execute('devices', '-l') devices = output.split()[4::6] models = output.split()[7::6] return dict(zip(devices, models))
List connected devices (-l for long output).
def _create_sequences(self): '''Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.''' # Create the Rosetta sequences and the maps from the Rosetta sequences to the ATOM sequences try: self.pdb.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, rosetta_database_path = self.rosetta_database_path, cache_dir = self.cache_dir) except PDBMissingMainchainAtomsException: self.pdb_to_rosetta_residue_map_error = True # Get all the Sequences if self.pdb_id not in do_not_use_the_sequence_aligner: self.uniparc_sequences = self.PDB_UniParc_SA.uniparc_sequences else: self.uniparc_sequences = self.sifts.get_uniparc_sequences() self.fasta_sequences = self.FASTA.get_sequences(self.pdb_id) self.seqres_sequences = self.pdb.seqres_sequences self.atom_sequences = self.pdb.atom_sequences if self.pdb_to_rosetta_residue_map_error: self.rosetta_sequences = {} for c in self.atom_sequences.keys(): self.rosetta_sequences[c] = Sequence() else: self.rosetta_sequences = self.pdb.rosetta_sequences # Update the chain types for the UniParc sequences uniparc_pdb_chain_mapping = {} if self.pdb_id not in do_not_use_the_sequence_aligner: for pdb_chain_id, matches in self.PDB_UniParc_SA.clustal_matches.iteritems(): if matches: # we are not guaranteed to have a match e.g. the short chain J in 1A2C, chimeras, etc. uniparc_chain_id = matches.keys()[0] assert(len(matches) == 1) uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, []) uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id) else: for pdb_chain_id, uniparc_chain_ids in self.sifts.get_pdb_chain_to_uniparc_id_map().iteritems(): for uniparc_chain_id in uniparc_chain_ids: uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, []) uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id) for uniparc_chain_id, pdb_chain_ids in uniparc_pdb_chain_mapping.iteritems(): sequence_type = set([self.seqres_sequences[p].sequence_type for p in pdb_chain_ids]) assert(len(sequence_type) == 1) sequence_type = sequence_type.pop() assert(self.uniparc_sequences[uniparc_chain_id].sequence_type == None) self.uniparc_sequences[uniparc_chain_id].set_type(sequence_type) for p in pdb_chain_ids: self.pdb_chain_to_uniparc_chain_mapping[p] = uniparc_chain_id # Update the chain types for the FASTA sequences for chain_id, sequence in self.seqres_sequences.iteritems(): self.fasta_sequences[chain_id].set_type(sequence.sequence_type)
Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.
def _sort_tensor(tensor): """Use `top_k` to sort a `Tensor` along the last dimension.""" sorted_, _ = tf.nn.top_k(tensor, k=tf.shape(input=tensor)[-1]) sorted_.set_shape(tensor.shape) return sorted_
Use `top_k` to sort a `Tensor` along the last dimension.
def callback(self): '''Run the callback''' self._callback(*self._args, **self._kwargs) self._last_checked = time.time()
Run the callback
def pick_q_v1(self): """Assign the actual value of the inlet sequence to the upper joint of the subreach upstream.""" inl = self.sequences.inlets.fastaccess new = self.sequences.states.fastaccess_new new.qjoints[0] = 0. for idx in range(inl.len_q): new.qjoints[0] += inl.q[idx][0]
Assign the actual value of the inlet sequence to the upper joint of the subreach upstream.
def html(self): """ Render this test case as HTML :return: """ failure = "" skipped = None stdout = tag.text(self.stdout) stderr = tag.text(self.stderr) if self.skipped: skipped = """ <hr size="1"/> <div class="skipped"><b>Skipped: {msg}</b><br/> <pre>{skip}</pre> </div> """.format(msg=tag.text(self.skipped_msg), skip=tag.text(self.skipped)) if self.failed(): failure = """ <hr size="1"/> <div class="failure"><b>Failed: {msg}</b><br/> <pre>{fail}</pre> </div> """.format(msg=tag.text(self.failure_msg), fail=tag.text(self.failure)) properties = [x.html() for x in self.properties] return """ <a name="{anchor}"> <div class="testcase"> <div class="details"> <span class="testname"><b>{testname}</b></span><br/> <span class="testclassname">{testclassname}</span><br/> <span class="duration">Time Taken: {duration}s</span> </div> {skipped} {failure} <hr size="1"/> {properties} <div class="stdout"><i>Stdout</i><br/> <pre>{stdout}</pre></div> <hr size="1"/> <div class="stderr"><i>Stderr</i><br/> <pre>{stderr}</pre></div> </div> </a> """.format(anchor=self.anchor(), testname=self.name, testclassname=self.testclass.name, duration=self.duration, failure=failure, skipped=skipped, properties="".join(properties), stdout=stdout, stderr=stderr)
Render this test case as HTML :return:
def print_async_event(self, suffix, event): ''' Print all of the events with the prefix 'tag' ''' if not isinstance(event, dict): return # if we are "quiet", don't print if self.opts.get('quiet', False): return # some suffixes we don't want to print if suffix in ('new',): return try: outputter = self.opts.get('output', event.get('outputter', None) or event.get('return').get('outputter')) except AttributeError: outputter = None # if this is a ret, we have our own set of rules if suffix == 'ret': # Check if outputter was passed in the return data. If this is the case, # then the return data will be a dict two keys: 'data' and 'outputter' if isinstance(event.get('return'), dict) \ and set(event['return']) == set(('data', 'outputter')): event_data = event['return']['data'] outputter = event['return']['outputter'] else: event_data = event['return'] else: event_data = {'suffix': suffix, 'event': event} salt.output.display_output(event_data, outputter, self.opts)
Print all of the events with the prefix 'tag'
def render_html(self): """Render an HTML report.""" return self._template.safe_substitute( report_type=self._report_type, results=self.render_json() )
Render an HTML report.
def _finish_disconnection_action(self, action): """Finish a disconnection attempt There are two possible outcomes: - if we were successful at disconnecting, we transition to disconnected - if we failed at disconnecting, we transition back to idle Args: action (ConnectionAction): the action object describing what we are disconnecting from and what the result of the operation was """ success = action.data['success'] conn_key = action.data['id'] if self._get_connection_state(conn_key) != self.Disconnecting: self._logger.error("Invalid finish_disconnection action on a connection whose state is not Disconnecting, conn_key=%s", str(conn_key)) return # Cannot be None since we checked above to make sure it exists data = self._get_connection(conn_key) callback = data['callback'] conn_id = data['conn_id'] int_id = data['int_id'] if success is False: reason = action.data['reason'] if reason is None: reason = "No reason was given" data['state'] = self.Idle data['microstate'] = None data['callback'] = None callback(conn_id, self.id, False, reason) else: del self._connections[conn_id] del self._int_connections[int_id] callback(conn_id, self.id, True, None)
Finish a disconnection attempt There are two possible outcomes: - if we were successful at disconnecting, we transition to disconnected - if we failed at disconnecting, we transition back to idle Args: action (ConnectionAction): the action object describing what we are disconnecting from and what the result of the operation was
def unsign_data(self, data, url_safe=True): """ Retrieve the signed data. If it is expired, it will throw an exception :param data: token/signed data :param url_safe: bool. If true it will allow it to be passed in URL :return: mixed, the data in its original form """ if url_safe: return utils.unsign_url_safe(data, secret_key=self.secret_key, salt=self.user_salt) else: return utils.unsign_data(data, secret_key=self.secret_key, salt=self.user_salt)
Retrieve the signed data. If it is expired, it will throw an exception :param data: token/signed data :param url_safe: bool. If true it will allow it to be passed in URL :return: mixed, the data in its original form
def execute(self, args, kwargs): ''' Dispatch a call. Call the first function whose type signature matches the arguemts. ''' return self.lookup_explicit(args, kwargs)(*args, **kwargs)
Dispatch a call. Call the first function whose type signature matches the arguemts.
def session(self, session=None): '''Override :meth:`Manager.session` so that this :class:`RelatedManager` can retrieve the session from the :attr:`related_instance` if available. ''' if self.related_instance: session = self.related_instance.session # we have a session, we either create a new one return the same session if session is None: raise QuerySetError('Related manager can be accessed only from\ a loaded instance of its related model.') return session
Override :meth:`Manager.session` so that this :class:`RelatedManager` can retrieve the session from the :attr:`related_instance` if available.
def eval_py(self, _globals, _locals): """ Evaluates a file containing a Python params dictionary. """ try: params = eval(self.script, _globals, _locals) except NameError as e: raise Exception( 'Failed to evaluate parameters: {}' .format(str(e)) ) except ResolutionError as e: raise Exception('GetOutput: {}'.format(str(e))) return params
Evaluates a file containing a Python params dictionary.
def _encode_params(**kw): ''' do url-encode parameters >>> _encode_params(a=1, b='R&D') 'a=1&b=R%26D' >>> _encode_params(a=u'\u4e2d\u6587', b=['A', 'B', 123]) 'a=%E4%B8%AD%E6%96%87&b=A&b=B&b=123' ''' args = [] for k, v in kw.iteritems(): if isinstance(v, basestring): qv = v.encode('utf-8') if isinstance(v, unicode) else v args.append('%s=%s' % (k, urllib.quote(qv))) elif isinstance(v, collections.Iterable): for i in v: qv = i.encode('utf-8') if isinstance(i, unicode) else str(i) args.append('%s=%s' % (k, urllib.quote(qv))) else: qv = str(v) args.append('%s=%s' % (k, urllib.quote(qv))) return '&'.join(args)
do url-encode parameters >>> _encode_params(a=1, b='R&D') 'a=1&b=R%26D' >>> _encode_params(a=u'\u4e2d\u6587', b=['A', 'B', 123]) 'a=%E4%B8%AD%E6%96%87&b=A&b=B&b=123'
def attach(gandi, disk, vm, position, read_only, background, force): """ Attach disk to vm. disk can be a disk name, or ID vm can be a vm name, or ID """ if not force: proceed = click.confirm("Are you sure you want to attach disk '%s'" " to vm '%s'?" % (disk, vm)) if not proceed: return disk_info = gandi.disk.info(disk) attached = disk_info.get('vms_id', False) if attached and not force: gandi.echo('This disk is still attached') proceed = click.confirm('Are you sure you want to detach %s?' % disk) if not proceed: return result = gandi.disk.attach(disk, vm, background, position, read_only) if background and result: gandi.pretty_echo(result) return result
Attach disk to vm. disk can be a disk name, or ID vm can be a vm name, or ID
def shorten_text(self, text): """Shortens text to fit into the :attr:`width`.""" if len(text) > self.width: return text[:self.width - 3] + '...' return text
Shortens text to fit into the :attr:`width`.
def GetIPAddresses(self): """Return a list of IP addresses.""" results = [] for address in self.addresses: human_readable_address = address.human_readable_address if human_readable_address is not None: results.append(human_readable_address) return results
Return a list of IP addresses.
def run(self, arguments, show_help=True): """ Program entry point. Please note that the first item in ``arguments`` is discarded, as it is assumed to be the script/invocation name; pass a "dumb" placeholder if you call this method with an argument different that ``sys.argv``. :param arguments: the list of arguments :type arguments: list :param show_help: if ``False``, do not show help on ``-h`` and ``--help`` :type show_help: bool :rtype: int """ # convert arguments into Unicode strings if self.use_sys: # check that sys.stdin.encoding and sys.stdout.encoding are set to utf-8 if not gf.FROZEN: if sys.stdin.encoding not in ["UTF-8", "UTF8"]: self.print_warning(u"The default input encoding is not UTF-8.") self.print_warning(u"You might want to set 'PYTHONIOENCODING=UTF-8' in your shell.") if sys.stdout.encoding not in ["UTF-8", "UTF8"]: self.print_warning(u"The default output encoding is not UTF-8.") self.print_warning(u"You might want to set 'PYTHONIOENCODING=UTF-8' in your shell.") # decode using sys.stdin.encoding args = [gf.safe_unicode_stdin(arg) for arg in arguments] else: # decode using utf-8 (but you should pass Unicode strings as parameters anyway) args = [gf.safe_unicode(arg) for arg in arguments] if show_help: if u"-h" in args: return self.print_help(short=True) if u"--help" in args: return self.print_help(short=False) if u"--help-rconf" in args: return self.print_rconf_parameters() if u"--version" in args: return self.print_name_version() # store formal arguments self.formal_arguments_raw = arguments self.formal_arguments = args # to obtain the actual arguments, # remove the first one and "special" switches args = args[1:] set_args = set(args) # set verbosity, if requested for flag in set([u"-v", u"--verbose"]) & set_args: self.verbose = True args.remove(flag) for flag in set([u"-vv", u"--very-verbose"]) & set_args: self.verbose = True self.very_verbose = True args.remove(flag) # set RuntimeConfiguration string, if specified for flag in [u"-r", u"--runtime-configuration"]: rconf_string = self.has_option_with_value(flag, actual_arguments=False) if rconf_string is not None: self.rconf = RuntimeConfiguration(rconf_string) args.remove("%s=%s" % (flag, rconf_string)) # set log file path, if requested log_path = None for flag in [u"-l", u"--log"]: log_path = self.has_option_with_value(flag, actual_arguments=False) if log_path is not None: args.remove("%s=%s" % (flag, log_path)) elif flag in set_args: handler, log_path = gf.tmp_file(suffix=u".log", root=self.rconf[RuntimeConfiguration.TMP_PATH]) args.remove(flag) if log_path is not None: self.log_file_path = log_path # if no actual arguments left, print help if (len(args) < 1) and (show_help): return self.print_help(short=True) # store actual arguments self.actual_arguments = args # create logger self.logger = Logger(tee=self.verbose, tee_show_datetime=self.very_verbose) self.log([u"Running aeneas %s", aeneas_version]) self.log([u"Formal arguments: %s", self.formal_arguments]) self.log([u"Actual arguments: %s", self.actual_arguments]) self.log([u"Runtime configuration: '%s'", self.rconf.config_string]) # perform command exit_code = self.perform_command() self.log([u"Execution completed with code %d", exit_code]) # output log if requested if self.log_file_path is not None: self.log([u"User requested saving log to file '%s'", self.log_file_path]) self.logger.write(self.log_file_path) if self.use_sys: self.print_info(u"Log written to file '%s'" % self.log_file_path) return self.exit(exit_code)
Program entry point. Please note that the first item in ``arguments`` is discarded, as it is assumed to be the script/invocation name; pass a "dumb" placeholder if you call this method with an argument different that ``sys.argv``. :param arguments: the list of arguments :type arguments: list :param show_help: if ``False``, do not show help on ``-h`` and ``--help`` :type show_help: bool :rtype: int
def _GetDirectory(self): """Retrieves a directory. Returns: VShadowDirectory: a directory None if not available. """ if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY: return None return VShadowDirectory(self._file_system, self.path_spec)
Retrieves a directory. Returns: VShadowDirectory: a directory None if not available.
def _CheckKeyPath(self, registry_key, search_depth): """Checks the key path find specification. Args: registry_key (WinRegistryKey): Windows Registry key. search_depth (int): number of key path segments to compare. Returns: bool: True if the Windows Registry key matches the find specification, False if not. """ if self._key_path_segments is None: return False if search_depth < 0 or search_depth > self._number_of_key_path_segments: return False # Note that the root has no entry in the key path segments and # no name to match. if search_depth == 0: segment_name = '' else: segment_name = self._key_path_segments[search_depth - 1] if self._is_regex: if isinstance(segment_name, py2to3.STRING_TYPES): # Allow '\n' to be matched by '.' and make '\w', '\W', '\b', '\B', # '\d', '\D', '\s' and '\S' Unicode safe. flags = re.DOTALL | re.IGNORECASE | re.UNICODE try: segment_name = r'^{0:s}$'.format(segment_name) segment_name = re.compile(segment_name, flags=flags) except sre_constants.error: # TODO: set self._key_path_segments[search_depth - 1] to None ? return False self._key_path_segments[search_depth - 1] = segment_name else: segment_name = segment_name.lower() self._key_path_segments[search_depth - 1] = segment_name if search_depth > 0: if self._is_regex: # pylint: disable=no-member if not segment_name.match(registry_key.name): return False elif segment_name != registry_key.name.lower(): return False return True
Checks the key path find specification. Args: registry_key (WinRegistryKey): Windows Registry key. search_depth (int): number of key path segments to compare. Returns: bool: True if the Windows Registry key matches the find specification, False if not.
def _format_time(seconds): """ Args: seconds (float): amount of time Format time string for eta and elapsed """ # Always do minutes and seconds in mm:ss format minutes = seconds // 60 hours = minutes // 60 rtn = u'{0:02.0f}:{1:02.0f}'.format(minutes % 60, seconds % 60) # Add hours if there are any if hours: rtn = u'{0:d}h {1}'.format(int(hours % 24), rtn) # Add days if there are any days = int(hours // 24) if days: rtn = u'{0:d}d {1}'.format(days, rtn) return rtn
Args: seconds (float): amount of time Format time string for eta and elapsed
def parse_size(image, size): """ Parse a size string (i.e. "200", "200x100", "x200", etc.) into a (width, height) tuple. """ bits = size.split("x") if image.size[0] == 0 or image.size[1] == 0: ratio = 1.0 else: ratio = float(image.size[0]) / float(image.size[1]) if len(bits) == 1 or not bits[1]: width = int(bits[0]) height = int(1 / ratio * width) elif not bits[0]: height = int(bits[1]) width = int(height * ratio) else: width, height = map(int, bits) return width, height
Parse a size string (i.e. "200", "200x100", "x200", etc.) into a (width, height) tuple.
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Storm Pipe Network File Read from File Method """ # Set file extension property self.fileExtension = extension # Dictionary of keywords/cards and parse function names KEYWORDS = {'CONNECT': spc.connectChunk, 'SJUNC': spc.sjuncChunk, 'SLINK': spc.slinkChunk} sjuncs = [] slinks = [] connections = [] # Parse file into chunks associated with keywords/cards with open(path, 'r') as f: chunks = pt.chunk(KEYWORDS, f) # Parse chunks associated with each key for key, chunkList in iteritems(chunks): # Parse each chunk in the chunk list for chunk in chunkList: # Call chunk specific parsers for each chunk result = KEYWORDS[key](key, chunk) # Cases if key == 'CONNECT': connections.append(result) elif key == 'SJUNC': sjuncs.append(result) elif key == 'SLINK': slinks.append(result) # Create GSSHAPY objects self._createConnection(connections) self._createSjunc(sjuncs) self._createSlink(slinks)
Storm Pipe Network File Read from File Method
def _consolidate_coordinateList( self, coordinateList): """*match the coordinate list against itself with the parameters of the NED search queries to minimise duplicated NED queries* **Key Arguments:** - ``coordinateList`` -- the original coordinateList. **Return:** - ``updatedCoordinateList`` -- the coordinate list with duplicated search areas removed **Usage:** .. todo:: - add usage info - create a sublime snippet for usage - update package tutorial if needed .. code-block:: python usage code .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring """ self.log.debug('starting the ``_consolidate_coordinateList`` method') raList = [] raList[:] = np.array([c[0] for c in coordinateList]) decList = [] decList[:] = np.array([c[1] for c in coordinateList]) nedStreamRadius = self.settings[ "ned stream search radius arcec"] / (60. * 60.) firstPassNedSearchRadius = self.settings[ "first pass ned search radius arcec"] / (60. * 60.) radius = nedStreamRadius - firstPassNedSearchRadius # LET'S BE CONSERVATIVE # radius = radius * 0.9 xmatcher = sets( log=self.log, ra=raList, dec=decList, radius=radius, # in degrees sourceList=coordinateList, convertToArray=False ) allMatches = xmatcher.match updatedCoordianteList = [] for aSet in allMatches: updatedCoordianteList.append(aSet[0]) self.log.debug('completed the ``_consolidate_coordinateList`` method') return updatedCoordianteList
*match the coordinate list against itself with the parameters of the NED search queries to minimise duplicated NED queries* **Key Arguments:** - ``coordinateList`` -- the original coordinateList. **Return:** - ``updatedCoordinateList`` -- the coordinate list with duplicated search areas removed **Usage:** .. todo:: - add usage info - create a sublime snippet for usage - update package tutorial if needed .. code-block:: python usage code .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring
def cashFlow(symbol, token='', version=''): '''Pulls cash flow data. Available quarterly (4 quarters) or annually (4 years). https://iexcloud.io/docs/api/#cash-flow Updates at 8am, 9am UTC daily Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result ''' _raiseIfNotStr(symbol) return _getJson('stock/' + symbol + '/cash-flow', token, version)
Pulls cash flow data. Available quarterly (4 quarters) or annually (4 years). https://iexcloud.io/docs/api/#cash-flow Updates at 8am, 9am UTC daily Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result
def get_sd_auth(val, sd_auth_pillar_name='serverdensity'): ''' Returns requested Server Density authentication value from pillar. CLI Example: .. code-block:: bash salt '*' serverdensity_device.get_sd_auth <val> ''' sd_pillar = __pillar__.get(sd_auth_pillar_name) log.debug('Server Density Pillar: %s', sd_pillar) if not sd_pillar: log.error('Could not load %s pillar', sd_auth_pillar_name) raise CommandExecutionError( '{0} pillar is required for authentication'.format(sd_auth_pillar_name) ) try: return sd_pillar[val] except KeyError: log.error('Could not find value %s in pillar', val) raise CommandExecutionError('{0} value was not found in pillar'.format(val))
Returns requested Server Density authentication value from pillar. CLI Example: .. code-block:: bash salt '*' serverdensity_device.get_sd_auth <val>
def p_array_literal_2(self, p): """array_literal : LBRACKET element_list RBRACKET | LBRACKET element_list COMMA elision_opt RBRACKET """ items = p[2] if len(p) == 6: items.extend(p[4]) p[0] = ast.Array(items=items)
array_literal : LBRACKET element_list RBRACKET | LBRACKET element_list COMMA elision_opt RBRACKET
def sort_pkglist(pkgs): ''' Accepts a dict obtained from pkg.list_pkgs() and sorts in place the list of versions for any packages that have multiple versions installed, so that two package lists can be compared to one another. CLI Example: .. code-block:: bash salt '*' pkg_resource.sort_pkglist '["3.45", "2.13"]' ''' # It doesn't matter that ['4.9','4.10'] would be sorted to ['4.10','4.9'], # so long as the sorting is consistent. try: for key in pkgs: # Passing the pkglist to set() also removes duplicate version # numbers (if present). pkgs[key] = sorted(set(pkgs[key])) except AttributeError as exc: log.exception(exc)
Accepts a dict obtained from pkg.list_pkgs() and sorts in place the list of versions for any packages that have multiple versions installed, so that two package lists can be compared to one another. CLI Example: .. code-block:: bash salt '*' pkg_resource.sort_pkglist '["3.45", "2.13"]'
def get_block_from_time(self, timestring, error_margin=10): """ Estimate block number from given time :param str timestring: String representing time :param int error_margin: Estimate block number within this interval (in seconds) """ known_block = self.get_current_block()['block_num'] known_block_timestamp = self.block_timestamp(known_block) timestring_timestamp = parse_time(timestring).timestamp() delta = known_block_timestamp - timestring_timestamp block_delta = delta / 3 guess_block = known_block - block_delta guess_block_timestamp = self.block_timestamp(guess_block) error = timestring_timestamp - guess_block_timestamp while abs(error) > error_margin: guess_block += error / 3 guess_block_timestamp = self.block_timestamp(guess_block) error = timestring_timestamp - guess_block_timestamp return int(guess_block)
Estimate block number from given time :param str timestring: String representing time :param int error_margin: Estimate block number within this interval (in seconds)
def retry(self, delay=0, group=None, message=None): '''Retry this job in a little bit, in the same queue. This is meant for the times when you detect a transient failure yourself''' args = ['retry', self.jid, self.queue_name, self.worker_name, delay] if group is not None and message is not None: args.append(group) args.append(message) return self.client(*args)
Retry this job in a little bit, in the same queue. This is meant for the times when you detect a transient failure yourself
def to_python(self, value: Optional[str]) -> Optional[Any]: """ Called during deserialization and during form ``clean()`` calls. Must deal with an instance of the correct type; a string; or ``None`` (if the field allows ``null=True``). Should raise ``ValidationError`` if problems. """ # https://docs.djangoproject.com/en/1.8/howto/custom-model-fields/ # log.debug("to_python: {}, {}", value, type(value)) if isinstance(value, datetime.datetime): return value if value is None: return value if value == '': return None return iso_string_to_python_datetime(value)
Called during deserialization and during form ``clean()`` calls. Must deal with an instance of the correct type; a string; or ``None`` (if the field allows ``null=True``). Should raise ``ValidationError`` if problems.
def _compile_lock(self, query, value): """ Compile the lock into SQL :param query: A QueryBuilder instance :type query: QueryBuilder :param value: The lock value :type value: bool or str :return: The compiled lock :rtype: str """ if isinstance(value, basestring): return value if value is True: return 'FOR UPDATE' elif value is False: return 'LOCK IN SHARE MODE'
Compile the lock into SQL :param query: A QueryBuilder instance :type query: QueryBuilder :param value: The lock value :type value: bool or str :return: The compiled lock :rtype: str
def check_file(filepath): ''' - Checks if the parent directories for this path exist. - Checks that the file exists. - Donates the file to the web server user. TODO: This is Debian / Ubuntu specific. ''' check_path(filepath) if not os.path.exists(filepath): print("WARNING: File does not exist. Creating it: %s" % filepath) open(filepath, 'a').close() try: print("Setting access rights for %s for www-data user" % (filepath)) uid = pwd.getpwnam("www-data").pw_uid gid = grp.getgrnam("www-data").gr_gid os.chown(filepath, uid, gid) os.chmod(filepath, 0o660) # rw-rw--- except Exception: print("WARNING: Could not adjust file system permissions for %s. Make sure your web server can write into it." % filepath)
- Checks if the parent directories for this path exist. - Checks that the file exists. - Donates the file to the web server user. TODO: This is Debian / Ubuntu specific.
def create(self, request): """ Log in django staff user """ # TODO: Decorate api with sensitive post parameters as Django admin do? # from django.utils.decorators import method_decorator # from django.views.decorators.debug import sensitive_post_parameters # sensitive_post_parameters_m = method_decorator(sensitive_post_parameters()) login_form = AuthenticationForm(request, data=request.data) if not login_form.is_valid(): raise serializers.ValidationError(login_form.errors) auth_login(request, login_form.get_user()) serializer = UserSerializer(request.user) return Response(serializer.data, status=status.HTTP_200_OK)
Log in django staff user
def vars_to_array(self): """ Convert `self.vars` to a numpy array Returns ------- numpy.array """ logger.warn('This function is deprecated. You can inspect `self.np_vars` directly as NumPy arrays ' 'without conversion.') if not self.vars: return None vars_matrix = matrix(self.vars, size=(self.vars[0].size[0], len(self.vars))).trans() self.vars_array = np.array(vars_matrix) return self.vars_array
Convert `self.vars` to a numpy array Returns ------- numpy.array
def same_origin(url1, url2): ''' Return True if the urls have the same origin, else False. Copied from Django: https://github.com/django/django/blob/master/django/utils/http.py#L255 ''' p1, p2 = urlparse(url1), urlparse(url2) try: o1 = (p1.scheme, p1.hostname, p1.port or PROTOCOL_TO_PORT[p1.scheme]) o2 = (p2.scheme, p2.hostname, p2.port or PROTOCOL_TO_PORT[p2.scheme]) return o1 == o2 except (ValueError, KeyError): return False
Return True if the urls have the same origin, else False. Copied from Django: https://github.com/django/django/blob/master/django/utils/http.py#L255
def p_partselect_pointer_minus(self, p): 'partselect : pointer LBRACKET expression MINUSCOLON expression RBRACKET' p[0] = Partselect(p[1], p[3], Minus( p[3], p[5], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
partselect : pointer LBRACKET expression MINUSCOLON expression RBRACKET
def validate_auth_option(option, value): """Validate optional authentication parameters. """ lower, value = validate(option, value) if lower not in _AUTH_OPTIONS: raise ConfigurationError('Unknown ' 'authentication option: %s' % (option,)) return lower, value
Validate optional authentication parameters.
def json2space(x, oldy=None, name=NodeType.Root.value): """Change search space from json format to hyperopt format """ y = list() if isinstance(x, dict): if NodeType.Type.value in x.keys(): _type = x[NodeType.Type.value] name = name + '-' + _type if _type == 'choice': if oldy != None: _index = oldy[NodeType.Index.value] y += json2space(x[NodeType.Value.value][_index], oldy[NodeType.Value.value], name=name+'[%d]' % _index) else: y += json2space(x[NodeType.Value.value], None, name=name) y.append(name) else: for key in x.keys(): y += json2space(x[key], (oldy[key] if oldy != None else None), name+"[%s]" % str(key)) elif isinstance(x, list): for i, x_i in enumerate(x): y += json2space(x_i, (oldy[i] if oldy != None else None), name+"[%d]" % i) else: pass return y
Change search space from json format to hyperopt format
def metropolis_hastings_step(current_state: State, proposed_state: State, energy_change: FloatTensor, seed=None) -> Tuple[State, tf.Tensor, tf.Tensor]: """Metropolis-Hastings step. This probabilistically chooses between `current_state` and `proposed_state` based on the `energy_change` so as to preserve detailed balance. Energy change is the negative of `log_accept_ratio`. Args: current_state: Current state. proposed_state: Proposed state. energy_change: E(proposed_state) - E(previous_state). seed: For reproducibility. Returns: new_state: The chosen state. is_accepted: Whether the proposed state was accepted. log_uniform: The random number that was used to select between the two states. """ flat_current = tf.nest.flatten(current_state) flat_proposed = nest.flatten_up_to(current_state, proposed_state) # Impute the None's in the current state. flat_current = [ p if c is None else c for p, c in zip(flat_proposed, flat_current) ] current_state = tf.nest.pack_sequence_as(current_state, flat_current) current_state = tf.nest.map_structure(tf.convert_to_tensor, current_state) proposed_state = tf.nest.map_structure(tf.convert_to_tensor, proposed_state) energy_change = tf.convert_to_tensor(value=energy_change) log_accept_ratio = -energy_change log_uniform = tf.math.log( tf.random.uniform( shape=tf.shape(input=log_accept_ratio), dtype=log_accept_ratio.dtype.base_dtype, seed=seed)) is_accepted = log_uniform < log_accept_ratio next_state = mcmc_util.choose( is_accepted, proposed_state, current_state, name='choose_next_state') return next_state, is_accepted, log_uniform
Metropolis-Hastings step. This probabilistically chooses between `current_state` and `proposed_state` based on the `energy_change` so as to preserve detailed balance. Energy change is the negative of `log_accept_ratio`. Args: current_state: Current state. proposed_state: Proposed state. energy_change: E(proposed_state) - E(previous_state). seed: For reproducibility. Returns: new_state: The chosen state. is_accepted: Whether the proposed state was accepted. log_uniform: The random number that was used to select between the two states.
def create_path_env_var(new_entries, env=None, env_var='PATH', delimiter=':', prepend=False): """Join path entries, combining with an environment variable if specified.""" if env is None: env = {} prev_path = env.get(env_var, None) if prev_path is None: path_dirs = list() else: path_dirs = list(prev_path.split(delimiter)) new_entries_list = list(new_entries) if prepend: path_dirs = new_entries_list + path_dirs else: path_dirs += new_entries_list return delimiter.join(path_dirs)
Join path entries, combining with an environment variable if specified.
def is_grouping_sane(cls, gtype): """Checks if a given gtype is sane""" if gtype == cls.SHUFFLE or gtype == cls.ALL or gtype == cls.LOWEST or gtype == cls.NONE: return True elif isinstance(gtype, cls.FIELDS): return gtype.gtype == topology_pb2.Grouping.Value("FIELDS") and \ gtype.fields is not None elif isinstance(gtype, cls.CUSTOM): return gtype.gtype == topology_pb2.Grouping.Value("CUSTOM") and \ gtype.python_serialized is not None else: #pylint: disable=fixme #TODO: DIRECT are not supported yet return False
Checks if a given gtype is sane
def attribute_md5(self): """ The MD5 of all attributes is calculated by first generating a utf-8 string from each attribute and MD5-ing the concatenation of them all. Each attribute is encoded with some bytes that describe the length of each part and the type of attribute. Not yet implemented: List types (https://github.com/aws/aws-sdk-java/blob/7844c64cf248aed889811bf2e871ad6b276a89ca/aws-java-sdk-sqs/src/main/java/com/amazonaws/services/sqs/MessageMD5ChecksumHandler.java#L58k) """ def utf8(str): if isinstance(str, six.string_types): return str.encode('utf-8') return str md5 = hashlib.md5() struct_format = "!I".encode('ascii') # ensure it's a bytestring for name in sorted(self.message_attributes.keys()): attr = self.message_attributes[name] data_type = attr['data_type'] encoded = utf8('') # Each part of each attribute is encoded right after it's # own length is packed into a 4-byte integer # 'timestamp' -> b'\x00\x00\x00\t' encoded += struct.pack(struct_format, len(utf8(name))) + utf8(name) # The datatype is additionally given a final byte # representing which type it is encoded += struct.pack(struct_format, len(data_type)) + utf8(data_type) encoded += TRANSPORT_TYPE_ENCODINGS[data_type] if data_type == 'String' or data_type == 'Number': value = attr['string_value'] elif data_type == 'Binary': print(data_type, attr['binary_value'], type(attr['binary_value'])) value = base64.b64decode(attr['binary_value']) else: print("Moto hasn't implemented MD5 hashing for {} attributes".format(data_type)) # The following should be enough of a clue to users that # they are not, in fact, looking at a correct MD5 while # also following the character and length constraints of # MD5 so as not to break client softwre return('deadbeefdeadbeefdeadbeefdeadbeef') encoded += struct.pack(struct_format, len(utf8(value))) + utf8(value) md5.update(encoded) return md5.hexdigest()
The MD5 of all attributes is calculated by first generating a utf-8 string from each attribute and MD5-ing the concatenation of them all. Each attribute is encoded with some bytes that describe the length of each part and the type of attribute. Not yet implemented: List types (https://github.com/aws/aws-sdk-java/blob/7844c64cf248aed889811bf2e871ad6b276a89ca/aws-java-sdk-sqs/src/main/java/com/amazonaws/services/sqs/MessageMD5ChecksumHandler.java#L58k)
def createRootJob(self, *args, **kwargs): """ Create a new job and set it as the root job in this job store :rtype: toil.jobGraph.JobGraph """ rootJob = self.create(*args, **kwargs) self.setRootJob(rootJob.jobStoreID) return rootJob
Create a new job and set it as the root job in this job store :rtype: toil.jobGraph.JobGraph
def __set_no_protein(self, hgvs_string): """Set a flag for no protein expected. ("p.0" or "p.0?") Args: hgvs_string (str): hgvs syntax with "p." removed """ no_protein_list = ['0', '0?'] # no protein symbols if hgvs_string in no_protein_list: self.is_no_protein = True self.is_non_silent = True else: self.is_no_protein = False
Set a flag for no protein expected. ("p.0" or "p.0?") Args: hgvs_string (str): hgvs syntax with "p." removed
def _SendRecv(): """Communicate with the Developer Shell server socket.""" port = int(os.getenv(DEVSHELL_ENV, 0)) if port == 0: raise NoDevshellServer() sock = socket.socket() sock.connect(('localhost', port)) data = CREDENTIAL_INFO_REQUEST_JSON msg = '{0}\n{1}'.format(len(data), data) sock.sendall(_helpers._to_bytes(msg, encoding='utf-8')) header = sock.recv(6).decode() if '\n' not in header: raise CommunicationError('saw no newline in the first 6 bytes') len_str, json_str = header.split('\n', 1) to_read = int(len_str) - len(json_str) if to_read > 0: json_str += sock.recv(to_read, socket.MSG_WAITALL).decode() return CredentialInfoResponse(json_str)
Communicate with the Developer Shell server socket.
def register(self, matchers, runnable): ''' Register an iterator(runnable) to scheduler and wait for events :param matchers: sequence of EventMatchers :param runnable: an iterator that accept send method :param daemon: if True, the runnable will be registered as a daemon. ''' if getattr(self, 'syscallfunc', None) is not None and getattr(self, 'syscallrunnable', None) is None: # Inject this register self.syscallrunnable = runnable else: for m in matchers: self.matchtree.insert(m, runnable) events = self.eventtree.findAndRemove(m) for e in events: self.queue.unblock(e) if m.indices[0] == PollEvent._classname0 and len(m.indices) >= 2: self.polling.onmatch(m.indices[1], None if len(m.indices) <= 2 else m.indices[2], True) self.registerIndex.setdefault(runnable, set()).update(matchers)
Register an iterator(runnable) to scheduler and wait for events :param matchers: sequence of EventMatchers :param runnable: an iterator that accept send method :param daemon: if True, the runnable will be registered as a daemon.
def length(self): """Gets length :return: How many items in linked list of linked list """ item = self.head counter = 0 while item is not None: counter += 1 item = item.next_node return counter
Gets length :return: How many items in linked list of linked list
def baseline_correct(G): """ This function zeroes the baseline from 2.5ppm upwards """ # define ppm ranges that are known to be at baseline, get indices baseidx =[] baseidx.extend(range(np.min(np.where(G.f_ppm<5.0)),np.max(np.where(G.f_ppm>4.0))+1)) baseidx.extend(range(np.min(np.where(G.f_ppm<3.5)),np.max(np.where(G.f_ppm>3.2))+1)) baseidx.extend(range(np.min(np.where(G.f_ppm<2.8)),np.max(np.where(G.f_ppm>2.5))+1)) G.diff = np.mean(G.diff_spectra,0) # find x and y values at those indices yArr=np.real(G.diff[baseidx]) baseppm = G.f_ppm[baseidx] # filter out anything above the new max adjbaseppm =[baseppm[i] for i in np.where(baseppm<=np.max(G.f_ppm))[0]] # spline f = interpolate.interp1d(adjbaseppm[::-1], yArr[::-1], kind='linear', bounds_error=True, fill_value=0) fitidxmax = np.where(G.f_ppm<np.max(adjbaseppm))[0] fitidxmin = np.where(G.f_ppm>np.min(adjbaseppm))[0] fitidx = list(set(fitidxmax) & set(fitidxmin)) basefit = f(G.f_ppm[fitidx])#[::-1] adjusted = G.diff[fitidx]-basefit#[::-1] G.diff_corrected = G.diff G.diff_corrected[fitidx] = adjusted # tag as corrected G.baseline_corrected = True
This function zeroes the baseline from 2.5ppm upwards
def download(course, tid=None, dl_all=False, force=False, upgradejava=False, update=False): """ Download the exercises from the server. """ def dl(id): download_exercise(Exercise.get(Exercise.tid == id), force=force, update_java=upgradejava, update=update) if dl_all: for exercise in list(course.exercises): dl(exercise.tid) elif tid is not None: dl(int(tid)) else: for exercise in list(course.exercises): if not exercise.is_completed: dl(exercise.tid) else: exercise.update_downloaded()
Download the exercises from the server.
def p_article(self, article): '''article : ARTICLEHEADER opttexts rules opttexts''' article[0] = Article(article[1][4], article[2], article[3], article[1][0], article[1][1], article[1][2], article[1][3], article[1][5])
article : ARTICLEHEADER opttexts rules opttexts
def sdiffstore(self, destkey, key, *keys): """Subtract multiple sets and store the resulting set in a key.""" return self.execute(b'SDIFFSTORE', destkey, key, *keys)
Subtract multiple sets and store the resulting set in a key.
def set_checkpoint(self, checkpoint_trigger, checkpoint_path, isOverWrite=True): """ Configure checkpoint settings. :param checkpoint_trigger: the interval to write snapshots :param checkpoint_path: the path to write snapshots into :param isOverWrite: whether to overwrite existing snapshots in path.default is True """ if not os.path.exists(checkpoint_path): mkpath(checkpoint_path) callBigDlFunc(self.bigdl_type, "setCheckPoint", self.value, checkpoint_trigger, checkpoint_path, isOverWrite)
Configure checkpoint settings. :param checkpoint_trigger: the interval to write snapshots :param checkpoint_path: the path to write snapshots into :param isOverWrite: whether to overwrite existing snapshots in path.default is True
def get_path_and_name(full_name): """ Split Whole Patch onto 'Patch' and 'Name' :param full_name: <str> Full Resource Name - likes 'Root/Folder/Folder2/Name' :return: tuple (Patch, Name) """ if full_name: parts = full_name.split("/") return ("/".join(parts[0:-1]), parts[-1]) if len(parts) > 1 else ("/", full_name) return None, None
Split Whole Patch onto 'Patch' and 'Name' :param full_name: <str> Full Resource Name - likes 'Root/Folder/Folder2/Name' :return: tuple (Patch, Name)
def start(self): """ Start the GNS3 VM. """ # get a NAT interface number nat_interface_number = yield from self._look_for_interface("nat") if nat_interface_number < 0: raise GNS3VMError("The GNS3 VM: {} must have a NAT interface configured in order to start".format(self.vmname)) hostonly_interface_number = yield from self._look_for_interface("hostonly") if hostonly_interface_number < 0: raise GNS3VMError("The GNS3 VM: {} must have a host only interface configured in order to start".format(self.vmname)) vboxnet = yield from self._look_for_vboxnet(hostonly_interface_number) if vboxnet is None: raise GNS3VMError("VirtualBox host-only network could not be found for interface {} on GNS3 VM".format(hostonly_interface_number)) if not (yield from self._check_dhcp_server(vboxnet)): raise GNS3VMError("DHCP must be enabled on VirtualBox host-only network: {} for GNS3 VM".format(vboxnet)) vm_state = yield from self._get_state() log.info('"{}" state is {}'.format(self._vmname, vm_state)) if vm_state == "poweroff": yield from self.set_vcpus(self.vcpus) yield from self.set_ram(self.ram) if vm_state in ("poweroff", "saved"): # start the VM if it is not running args = [self._vmname] if self._headless: args.extend(["--type", "headless"]) yield from self._execute("startvm", args) elif vm_state == "paused": args = [self._vmname, "resume"] yield from self._execute("controlvm", args) ip_address = "127.0.0.1" try: # get a random port on localhost with socket.socket() as s: s.bind((ip_address, 0)) api_port = s.getsockname()[1] except OSError as e: raise GNS3VMError("Error while getting random port: {}".format(e)) if (yield from self._check_vbox_port_forwarding()): # delete the GNS3VM NAT port forwarding rule if it exists log.info("Removing GNS3VM NAT port forwarding rule from interface {}".format(nat_interface_number)) yield from self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), "delete", "GNS3VM"]) # add a GNS3VM NAT port forwarding rule to redirect 127.0.0.1 with random port to port 3080 in the VM log.info("Adding GNS3VM NAT port forwarding rule with port {} to interface {}".format(api_port, nat_interface_number)) yield from self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), "GNS3VM,tcp,{},{},,3080".format(ip_address, api_port)]) self.ip_address = yield from self._get_ip(hostonly_interface_number, api_port) self.port = 3080 log.info("GNS3 VM has been started with IP {}".format(self.ip_address)) self.running = True
Start the GNS3 VM.
def toc(*args, **kwargs): """ Port of the MatLAB function of same name Behaviour is controllable to some extent by the keyword args: """ global Gtic_start f_elapsedTime = time.time() - Gtic_start for key, value in kwargs.items(): if key == 'sysprint': return value % f_elapsedTime if key == 'default': return "Elapsed time = %f seconds." % f_elapsedTime return f_elapsedTime
Port of the MatLAB function of same name Behaviour is controllable to some extent by the keyword args:
def get_message_by_id(self, message_id): """ Fetch a message :param message_id: Message ID :type message_id: str :return: Message or False :rtype: Message """ result = self.wapi_functions.getMessageById(message_id) if result: result = factory_message(result, self) return result
Fetch a message :param message_id: Message ID :type message_id: str :return: Message or False :rtype: Message
def clean_strings(iterable): """ Take a list of strings and clear whitespace on each one. If a value in the list is not a string pass it through untouched. Args: iterable: mixed list Returns: mixed list """ retval = [] for val in iterable: try: retval.append(val.strip()) except(AttributeError): retval.append(val) return retval
Take a list of strings and clear whitespace on each one. If a value in the list is not a string pass it through untouched. Args: iterable: mixed list Returns: mixed list
def dot(vec1, vec2): """Returns the dot product of two Vectors""" if isinstance(vec1, Vector3) and isinstance(vec2, Vector3): return (vec1.x * vec2.x) + (vec1.y * vec2.y) + (vec1.z * vec2.z) elif isinstance(vec1, Vector4) and isinstance(vec2, Vector4): return (vec1.x * vec2.x) + (vec1.y * vec2.y) + (vec1.z * vec2.z) + (vec1.w * vec2.w) else: raise TypeError("vec1 and vec2 must a Vector type")
Returns the dot product of two Vectors
def iterate_from_vcf(infile, sample): '''iterate over a vcf-formatted file. *infile* can be any iterator over a lines. The function yields named tuples of the type :class:`pysam.Pileup.PileupSubstitution` or :class:`pysam.Pileup.PileupIndel`. Positions without a snp will be skipped. This method is wasteful and written to support same legacy code that expects samtools pileup output. Better use the vcf parser directly. ''' vcf = pysam.VCF() vcf.connect(infile) if sample not in vcf.getsamples(): raise KeyError("sample %s not vcf file") for row in vcf.fetch(): result = vcf2pileup(row, sample) if result: yield result
iterate over a vcf-formatted file. *infile* can be any iterator over a lines. The function yields named tuples of the type :class:`pysam.Pileup.PileupSubstitution` or :class:`pysam.Pileup.PileupIndel`. Positions without a snp will be skipped. This method is wasteful and written to support same legacy code that expects samtools pileup output. Better use the vcf parser directly.
def parse_fntdata(_data, _config, _extra_data_receiver=None): """ info face="Haettenschweiler" size=60 bold=0 italic=0 charset="" unicode=0 stretchH=100 smooth=1 aa=1 padding=0,0,0,0 spacing=2,2 common lineHeight=64 base=53 scaleW=256 scaleH=128 pages=1 packed=0 page id=0 file="attack_num.png" chars count=12 char id=52 x=2 y=2 width=33 height=51 xoffset=0 yoffset=5 xadvance=32 page=0 chnl=0 letter="4" char id=48 x=37 y=2 width=29 height=50 xoffset=1 yoffset=6 xadvance=29 page=0 chnl=0 letter="0" char id=53 x=68 y=2 width=29 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="5" char id=57 x=99 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="9" char id=54 x=129 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="6" char id=56 x=159 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="8" char id=51 x=189 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="3" char id=50 x=219 y=2 width=28 height=49 xoffset=1 yoffset=7 xadvance=28 page=0 chnl=0 letter="2" char id=55 x=2 y=55 width=30 height=48 xoffset=1 yoffset=8 xadvance=28 page=0 chnl=0 letter="7" char id=49 x=34 y=55 width=20 height=48 xoffset=1 yoffset=8 xadvance=20 page=0 chnl=0 letter="1" char id=45 x=56 y=55 width=18 height=12 xoffset=1 yoffset=36 xadvance=19 page=0 chnl=0 letter="-" char id=32 x=76 y=55 width=0 height=0 xoffset=11 yoffset=73 xadvance=16 page=0 chnl=0 letter="space" """ data = {} frame_data_list = [] parse_common_info = parse("common lineHeight={line_height:d} base={base:d} scaleW={scale_w:d} scaleH={scale_h:d} pages={pages:d} packed={packed:d}", _data[1]) parse_page_info = parse("page id={id:d} file=\"{file}\"", _data[2]) parse_char_count = parse("chars count={count:d}", _data[3]) raw_frames_data = {} for index in xrange(0, parse_char_count["count"]): parse_frame = parse("char id={id:d} x={x:d} y={y:d} width={width:d} height={height:d} xoffset={xoffset:d} yoffset={yoffset:d} xadvance={xadvance:d} page={page:d} chnl={chnl:d} letter=\"{letter}\"", _data[index + 4]) frame_data = {} frame_data["name"] = "{prefix}_{id}.png".format(prefix= _config["prefix"], id=parse_frame["id"], letter=parse_frame["letter"]) frame_data["source_size"] = (parse_frame["width"], parse_frame["height"]) frame_data["rotated"] = False frame_data["src_rect"] = (parse_frame["x"], parse_frame["y"], parse_frame["x"] + parse_frame["width"], parse_frame["y"] + parse_frame["height"]) frame_data["offset"] = (0, 0) if parse_frame["width"] <= 0 or parse_frame["height"] <= 0: continue frame_data_list.append(frame_data) parse_frame_named_data = parse_frame.named.copy() parse_frame_named_data["texture"] = frame_data["name"] raw_frames_data[parse_frame["id"]] = parse_frame_named_data data["texture"] = parse_page_info["file"] data["frames"] = frame_data_list if _extra_data_receiver != None: _extra_data_receiver["common"] = parse_common_info.named _extra_data_receiver["frames"] = raw_frames_data return data
info face="Haettenschweiler" size=60 bold=0 italic=0 charset="" unicode=0 stretchH=100 smooth=1 aa=1 padding=0,0,0,0 spacing=2,2 common lineHeight=64 base=53 scaleW=256 scaleH=128 pages=1 packed=0 page id=0 file="attack_num.png" chars count=12 char id=52 x=2 y=2 width=33 height=51 xoffset=0 yoffset=5 xadvance=32 page=0 chnl=0 letter="4" char id=48 x=37 y=2 width=29 height=50 xoffset=1 yoffset=6 xadvance=29 page=0 chnl=0 letter="0" char id=53 x=68 y=2 width=29 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="5" char id=57 x=99 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="9" char id=54 x=129 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="6" char id=56 x=159 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="8" char id=51 x=189 y=2 width=28 height=50 xoffset=1 yoffset=6 xadvance=28 page=0 chnl=0 letter="3" char id=50 x=219 y=2 width=28 height=49 xoffset=1 yoffset=7 xadvance=28 page=0 chnl=0 letter="2" char id=55 x=2 y=55 width=30 height=48 xoffset=1 yoffset=8 xadvance=28 page=0 chnl=0 letter="7" char id=49 x=34 y=55 width=20 height=48 xoffset=1 yoffset=8 xadvance=20 page=0 chnl=0 letter="1" char id=45 x=56 y=55 width=18 height=12 xoffset=1 yoffset=36 xadvance=19 page=0 chnl=0 letter="-" char id=32 x=76 y=55 width=0 height=0 xoffset=11 yoffset=73 xadvance=16 page=0 chnl=0 letter="space"
def merge(cls, components): """Merges components into a single component, applying their actions appropriately. This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c). :param list components: an iterable of instances of DictValueComponent. :return: An instance representing the result of merging the components. :rtype: `DictValueComponent` """ # Note that action of the merged component is EXTEND until the first REPLACE is encountered. # This guarantees associativity. action = cls.EXTEND val = {} for component in components: if component.action is cls.REPLACE: val = component.val action = cls.REPLACE elif component.action is cls.EXTEND: val.update(component.val) else: raise ParseError('Unknown action for dict value: {}'.format(component.action)) return cls(action, val)
Merges components into a single component, applying their actions appropriately. This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c). :param list components: an iterable of instances of DictValueComponent. :return: An instance representing the result of merging the components. :rtype: `DictValueComponent`
def run(analysis, path=None, name=None, info=None, **kwargs): """Run a single analysis. :param Analysis analysis: Analysis class to run. :param str path: Path of analysis. Can be `__file__`. :param str name: Name of the analysis. :param dict info: Optional entries are ``version``, ``title``, ``readme``, ... :param dict static: Map[url regex, root-folder] to serve static content. """ kwargs.update({ 'analysis': analysis, 'path': path, 'name': name, 'info': info, }) main(**kwargs)
Run a single analysis. :param Analysis analysis: Analysis class to run. :param str path: Path of analysis. Can be `__file__`. :param str name: Name of the analysis. :param dict info: Optional entries are ``version``, ``title``, ``readme``, ... :param dict static: Map[url regex, root-folder] to serve static content.