code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def tqxn(mt, x, n, t): """ n/qx : Probability to die in n years being alive at age x. Probability that x survives n year, and then dies in th subsequent t years """ return tpx(mt, x, t) * qx(mt, x + n)
n/qx : Probability to die in n years being alive at age x. Probability that x survives n year, and then dies in th subsequent t years
def create_routes_and_handler(transmute_func, context): """ return back a handler that is the api generated from the transmute_func, and a list of routes it should be mounted to. """ @wraps(transmute_func.raw_func) def handler(): exc, result = None, None try: args, kwargs = ParamExtractorFlask().extract_params( context, transmute_func, request.content_type ) result = transmute_func(*args, **kwargs) except Exception as e: exc = e """ attaching the traceack is done for you in Python 3, but in Python 2 the __traceback__ must be attached to the object manually. """ exc.__traceback__ = sys.exc_info()[2] """ transmute_func.process_result handles converting the response from the function into the response body, the status code that should be returned, and the response content-type. """ response = transmute_func.process_result( context, result, exc, request.content_type ) return Response( response["body"], status=response["code"], mimetype=response["content-type"], headers=response["headers"] ) return ( _convert_paths_to_flask(transmute_func.paths), handler )
return back a handler that is the api generated from the transmute_func, and a list of routes it should be mounted to.
def geometry_checker(geometry): """Perform a cleaning if the geometry is not valid. :param geometry: The geometry to check and clean. :type geometry: QgsGeometry :return: Tuple of bool and cleaned geometry. True if the geometry is already valid, False if the geometry was not valid. A cleaned geometry, or None if the geometry could not be repaired :rtype: (bool, QgsGeometry) """ if geometry is None: # The geometry can be None. return False, None if geometry.isGeosValid(): return True, geometry else: new_geom = geometry.makeValid() if new_geom.isGeosValid(): return False, new_geom else: # Make valid was not enough, the feature will be deleted. return False, None
Perform a cleaning if the geometry is not valid. :param geometry: The geometry to check and clean. :type geometry: QgsGeometry :return: Tuple of bool and cleaned geometry. True if the geometry is already valid, False if the geometry was not valid. A cleaned geometry, or None if the geometry could not be repaired :rtype: (bool, QgsGeometry)
def read(self, device=None, offset=0, bs=None, count=1): """ Using DIRECT_O read from the block device specified to stdout (Without any optional arguments will read the first 4k from the device) """ volume = self.get_volume(device) block_size = bs or BLOCK_SIZE offset = int(offset) * block_size count = int(count) print("Offset: ", offset) total = 0 with directio.open(volume['path'], buffered=block_size) as file: file.seek(offset) for i in range(0, count): total += os.write(sys.stdout.fileno(), file.read(block_size)) os.write(sys.stdout.fileno(), "\nRead: %d Bytes\n" % total)
Using DIRECT_O read from the block device specified to stdout (Without any optional arguments will read the first 4k from the device)
def listen(self): """Start listening.""" _LOGGER.info('Creating Multicast Socket') self._mcastsocket = self._create_mcast_socket() self._listening = True thread = Thread(target=self._listen_to_msg, args=()) self._threads.append(thread) thread.daemon = True thread.start()
Start listening.
def publish (self): ''' Function to publish cmdvel. ''' self.lock.acquire() tw = cmdvel2Twist(self.vel) self.lock.release() if (self.jdrc.getState() == "flying"): self.pub.publish(tw)
Function to publish cmdvel.
def saveThumbnail(self,fileName,filePath): """ URL to the thumbnail used for the item """ if self._thumbnail is None: self.__init() param_dict = {} if self._thumbnail is not None: imgUrl = self.root + "/info/" + self._thumbnail onlineFileName, file_ext = splitext(self._thumbnail) fileNameSafe = "".join(x for x in fileName if x.isalnum()) + file_ext result = self._get(url=imgUrl, param_dict=param_dict, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, out_folder=filePath, file_name=fileNameSafe) return result else: return None
URL to the thumbnail used for the item
def method(cache_name, key_prefix=None): """Caching decorator for object-level method caches. Cache key generation is delegated to the cache. Args: cache_name (str): The name of the (already-instantiated) cache on the decorated object which should be used to store results of this method. *key_prefix: A constant to use as part of the cache key in addition to the method arguments. """ def decorator(func): if (func.__name__ in ['cause_repertoire', 'effect_repertoire'] and not config.CACHE_REPERTOIRES): return func @wraps(func) def wrapper(obj, *args, **kwargs): cache = getattr(obj, cache_name) # Delegate key generation key = cache.key(*args, _prefix=key_prefix, **kwargs) # Get cached value, or compute value = cache.get(key) if value is None: # miss value = func(obj, *args, **kwargs) cache.set(key, value) return value return wrapper return decorator
Caching decorator for object-level method caches. Cache key generation is delegated to the cache. Args: cache_name (str): The name of the (already-instantiated) cache on the decorated object which should be used to store results of this method. *key_prefix: A constant to use as part of the cache key in addition to the method arguments.
def read_var_str(self, max_size=sys.maxsize): """ Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator. Args: max_size (int): (Optional) maximum number of bytes to read. Returns: bytes: """ length = self.read_var_int(max_size) return self.unpack(str(length) + 's', length)
Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator. Args: max_size (int): (Optional) maximum number of bytes to read. Returns: bytes:
def pathname(self): """Sluggified path for filenames Slugs to a filename using the follow steps * Decode unicode to approximate ascii * Remove existing hypens * Substitute hyphens for non-word characters * Break up the string as paths """ slug = self.name slug = unidecode.unidecode(slug) slug = slug.replace("-", "") slug = re.sub(r"[^\w\.]+", "-", slug).strip("-") return os.path.join(*slug.split("."))
Sluggified path for filenames Slugs to a filename using the follow steps * Decode unicode to approximate ascii * Remove existing hypens * Substitute hyphens for non-word characters * Break up the string as paths
def _tokenize(self, source, name, filename=None, state=None): """Called by the parser to do the preprocessing and filtering for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. """ source = self.preprocess(source, name, filename) stream = self.lexer.tokenize(source, name, filename, state) for ext in self.iter_extensions(): stream = ext.filter_stream(stream) if not isinstance(stream, TokenStream): stream = TokenStream(stream, name, filename) return stream
Called by the parser to do the preprocessing and filtering for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
def VShadowPathSpecGetStoreIndex(path_spec): """Retrieves the store index from the path specification. Args: path_spec (PathSpec): path specification. Returns: int: store index or None if not available. """ store_index = getattr(path_spec, 'store_index', None) if store_index is None: location = getattr(path_spec, 'location', None) if location is None or not location.startswith('/vss'): return None store_index = None try: store_index = int(location[4:], 10) - 1 except (TypeError, ValueError): pass if store_index is None or store_index < 0: return None return store_index
Retrieves the store index from the path specification. Args: path_spec (PathSpec): path specification. Returns: int: store index or None if not available.
def videos(self, **kwargs): """ Get the videos that have been added to a TV episode (teasers, clips, etc...). Args: language: (optional) ISO 639 code. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_series_id_season_number_episode_number_path('videos') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the videos that have been added to a TV episode (teasers, clips, etc...). Args: language: (optional) ISO 639 code. Returns: A dict respresentation of the JSON returned from the API.
def make_items_for(brains_or_objects, endpoint=None, complete=False): """Generate API compatible data items for the given list of brains/objects :param brains_or_objects: List of objects or brains :type brains_or_objects: list/Products.ZCatalog.Lazy.LazyMap :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :param complete: Flag to wake up the object and fetch all data :type complete: bool :returns: A list of extracted data items :rtype: list """ # check if the user wants to include children include_children = req.get_children(False) def extract_data(brain_or_object): info = get_info(brain_or_object, endpoint=endpoint, complete=complete) if include_children and is_folderish(brain_or_object): info.update(get_children_info(brain_or_object, complete=complete)) return info return map(extract_data, brains_or_objects)
Generate API compatible data items for the given list of brains/objects :param brains_or_objects: List of objects or brains :type brains_or_objects: list/Products.ZCatalog.Lazy.LazyMap :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :param complete: Flag to wake up the object and fetch all data :type complete: bool :returns: A list of extracted data items :rtype: list
def can_delete_objectives(self): """Tests if this user can delete Objectives. A return of true does not guarantee successful authorization. A return of false indicates that it is known deleting an Objective will result in a PermissionDenied. This is intended as a hint to an application that may opt not to offer delete operations to an unauthorized user. return: (boolean) - false if Objective deletion is not authorized, true otherwise compliance: mandatory - This method must be implemented. """ url_path = construct_url('authorization', bank_id=self._catalog_idstr) return self._get_request(url_path)['objectiveHints']['canDelete']
Tests if this user can delete Objectives. A return of true does not guarantee successful authorization. A return of false indicates that it is known deleting an Objective will result in a PermissionDenied. This is intended as a hint to an application that may opt not to offer delete operations to an unauthorized user. return: (boolean) - false if Objective deletion is not authorized, true otherwise compliance: mandatory - This method must be implemented.
def get_portal_url_base(self): """ Determine root url of the data service from the url specified. :return: str root url of the data service (eg: https://dataservice.duke.edu) """ api_url = urlparse(self.url).hostname portal_url = re.sub('^api\.', '', api_url) portal_url = re.sub(r'api', '', portal_url) return portal_url
Determine root url of the data service from the url specified. :return: str root url of the data service (eg: https://dataservice.duke.edu)
def running_conversions(self, folder_id=None): """Shows running file converts by folder Note: If folder_id is not provided, ``Home`` folder will be used. Args: folder_id (:obj:`str`, optional): id of the folder to list conversions of files exist in it. Returns: list: list of dictionaries, each dictionary represents a file conversion info. :: [ { "name": "Geysir.AVI", "id": "3565411", "status": "pending", "last_update": "2015-08-23 19:41:40", "progress": 0.32, "retries": "0", "link": "https://openload.co/f/f02JFG293J8/Geysir.AVI", "linkextid": "f02JFG293J8" }, .... ] """ params = {'folder': folder_id} if folder_id else {} return self._get('file/runningconverts', params=params)
Shows running file converts by folder Note: If folder_id is not provided, ``Home`` folder will be used. Args: folder_id (:obj:`str`, optional): id of the folder to list conversions of files exist in it. Returns: list: list of dictionaries, each dictionary represents a file conversion info. :: [ { "name": "Geysir.AVI", "id": "3565411", "status": "pending", "last_update": "2015-08-23 19:41:40", "progress": 0.32, "retries": "0", "link": "https://openload.co/f/f02JFG293J8/Geysir.AVI", "linkextid": "f02JFG293J8" }, .... ]
def _make_probs(self, *sequences): """ https://github.com/gw-c/arith/blob/master/arith.py """ sequences = self._get_counters(*sequences) counts = self._sum_counters(*sequences) if self.terminator is not None: counts[self.terminator] = 1 total_letters = sum(counts.values()) prob_pairs = {} cumulative_count = 0 counts = sorted(counts.items(), key=lambda x: (x[1], x[0]), reverse=True) for char, current_count in counts: prob_pairs[char] = ( Fraction(cumulative_count, total_letters), Fraction(current_count, total_letters), ) cumulative_count += current_count assert cumulative_count == total_letters return prob_pairs
https://github.com/gw-c/arith/blob/master/arith.py
def correctX(args): """ %prog correctX folder tag Run ALLPATHS correction on a folder of paired reads and apply tag. """ p = OptionParser(correctX.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, tag = args tag = tag.split(",") for p, pf in iter_project(folder): correct_pairs(p, pf, tag)
%prog correctX folder tag Run ALLPATHS correction on a folder of paired reads and apply tag.
def render(self, name=None, template=None, context={}): ''''Render Template meta from jinja2 templates. ''' if isinstance(template, Template): _template = template else: _template = Template.objects.get(name=name) # Maybe cache or save local ? response = self.env.from_string( _template.content).render(context) return response
Render Template meta from jinja2 templates.
def tricu(P, k=0): """Cross-diagonal upper triangle.""" tri = numpy.sum(numpy.mgrid[[slice(0,_,1) for _ in P.shape]], 0) tri = tri<len(tri) + k if isinstance(P, Poly): A = P.A.copy() B = {} for key in P.keys: B[key] = A[key]*tri return Poly(B, shape=P.shape, dim=P.dim, dtype=P.dtype) out = P*tri return out
Cross-diagonal upper triangle.
def add_access_list(self, accesslist, rank=None): """ Add an access list to the match condition. Valid access list types are IPAccessList (v4 and v6), IPPrefixList (v4 and v6), AS Path, CommunityAccessList, ExtendedCommunityAccessList. """ self.conditions.append( dict(access_list_ref=accesslist.href, type='element', rank=rank))
Add an access list to the match condition. Valid access list types are IPAccessList (v4 and v6), IPPrefixList (v4 and v6), AS Path, CommunityAccessList, ExtendedCommunityAccessList.
def syscall(self, func): ''' Call the func in core context (main loop). func should like:: def syscall_sample(scheduler, processor): something... where processor is a function which accept an event. When calling processor, scheduler directly process this event without sending it to queue. An event matcher is returned to the caller, and the caller should wait for the event immediately to get the return value from the system call. The SyscallReturnEvent will have 'retvalue' as the return value, or 'exception' as the exception thrown: (type, value, traceback) :param func: syscall function :returns: an event matcher to wait for the SyscallReturnEvent. If None is returned, a syscall is already scheduled; return to core context at first. ''' if getattr(self, 'syscallfunc', None) is not None: return None self.syscallfunc = func self.syscallmatcher = SyscallReturnEvent.createMatcher() return self.syscallmatcher
Call the func in core context (main loop). func should like:: def syscall_sample(scheduler, processor): something... where processor is a function which accept an event. When calling processor, scheduler directly process this event without sending it to queue. An event matcher is returned to the caller, and the caller should wait for the event immediately to get the return value from the system call. The SyscallReturnEvent will have 'retvalue' as the return value, or 'exception' as the exception thrown: (type, value, traceback) :param func: syscall function :returns: an event matcher to wait for the SyscallReturnEvent. If None is returned, a syscall is already scheduled; return to core context at first.
def is_tuple_type(tp): """Test if the type is a generic tuple type, including subclasses excluding non-generic classes. Examples:: is_tuple_type(int) == False is_tuple_type(tuple) == False is_tuple_type(Tuple) == True is_tuple_type(Tuple[str, int]) == True class MyClass(Tuple[str, int]): ... is_tuple_type(MyClass) == True For more general tests use issubclass(..., tuple), for more precise test (excluding subclasses) use:: get_origin(tp) is tuple # Tuple prior to Python 3.7 """ if NEW_TYPING: return (tp is Tuple or isinstance(tp, _GenericAlias) and tp.__origin__ is tuple or isinstance(tp, type) and issubclass(tp, Generic) and issubclass(tp, tuple)) return type(tp) is TupleMeta
Test if the type is a generic tuple type, including subclasses excluding non-generic classes. Examples:: is_tuple_type(int) == False is_tuple_type(tuple) == False is_tuple_type(Tuple) == True is_tuple_type(Tuple[str, int]) == True class MyClass(Tuple[str, int]): ... is_tuple_type(MyClass) == True For more general tests use issubclass(..., tuple), for more precise test (excluding subclasses) use:: get_origin(tp) is tuple # Tuple prior to Python 3.7
def _get_value_from_match(self, key, match): """ Gets the value of the property in the given MatchObject. Args: key (str): Key of the property looked-up. match (MatchObject): The matched property. Return: The discovered value, as a string or boolean. """ value = match.groups(1)[0] clean_value = str(value).lstrip().rstrip() if clean_value == 'true': self._log.info('Got value of "%s" as boolean true.', key) return True if clean_value == 'false': self._log.info('Got value of "%s" as boolean false.', key) return False try: float_value = float(clean_value) self._log.info('Got value of "%s" as float "%f".', key, float_value) return float_value except ValueError: self._log.info('Got value of "%s" as string "%s".', key, clean_value) return clean_value
Gets the value of the property in the given MatchObject. Args: key (str): Key of the property looked-up. match (MatchObject): The matched property. Return: The discovered value, as a string or boolean.
def get_checkerboard_matrix(kernel_width): """ example matrix for width = 2 -1 -1 1 1 -1 -1 1 1 1 1 -1 -1 1 1 -1 -1 :param kernel_width: :return: """ return np.vstack(( np.hstack(( -1 * np.ones((kernel_width, kernel_width)), np.ones((kernel_width, kernel_width)) )), np.hstack(( np.ones((kernel_width, kernel_width)), -1 * np.ones((kernel_width, kernel_width)) )) ))
example matrix for width = 2 -1 -1 1 1 -1 -1 1 1 1 1 -1 -1 1 1 -1 -1 :param kernel_width: :return:
def stripped_db_url(url): """Return a version of the DB url with the password stripped out.""" parsed = urlparse(url) if parsed.password is None: return url return parsed._replace( netloc="{}:***@{}".format(parsed.username, parsed.hostname) ).geturl()
Return a version of the DB url with the password stripped out.
def format_config(sensor_graph): """Extract the config variables from this sensor graph in ASCII format. Args: sensor_graph (SensorGraph): the sensor graph that we want to format Returns: str: The ascii output lines concatenated as a single string """ cmdfile = CommandFile("Config Variables", "1.0") for slot in sorted(sensor_graph.config_database, key=lambda x: x.encode()): for conf_var, conf_def in sorted(sensor_graph.config_database[slot].items()): conf_type, conf_val = conf_def if conf_type == 'binary': conf_val = 'hex:' + hexlify(conf_val) cmdfile.add("set_variable", slot, conf_var, conf_type, conf_val) return cmdfile.dump()
Extract the config variables from this sensor graph in ASCII format. Args: sensor_graph (SensorGraph): the sensor graph that we want to format Returns: str: The ascii output lines concatenated as a single string
def start(component, exact): # type: (str) -> None """ Create a new release. It will bump the current version number and create a release branch called `release/<version>` with one new commit (the version bump). **Example Config**:: \b version_file: 'src/mypkg/__init__.py' **Examples**:: \b $ peltak release start patch # Make a new patch release $ peltak release start minor # Make a new minor release $ peltak release start major # Make a new major release $ peltak release start # same as start patch """ from peltak.extra.gitflow import logic logic.release.start(component, exact)
Create a new release. It will bump the current version number and create a release branch called `release/<version>` with one new commit (the version bump). **Example Config**:: \b version_file: 'src/mypkg/__init__.py' **Examples**:: \b $ peltak release start patch # Make a new patch release $ peltak release start minor # Make a new minor release $ peltak release start major # Make a new major release $ peltak release start # same as start patch
def setShadowed(self, state): """ Sets whether or not this toolbar is shadowed. :param state | <bool> """ self._shadowed = state if state: self._colored = False for child in self.findChildren(XToolButton): child.setShadowed(state)
Sets whether or not this toolbar is shadowed. :param state | <bool>
def subnet_get(auth=None, **kwargs): ''' Get a single subnet filters A Python dictionary of filter conditions to push down CLI Example: .. code-block:: bash salt '*' neutronng.subnet_get name=subnet1 ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_subnet(**kwargs)
Get a single subnet filters A Python dictionary of filter conditions to push down CLI Example: .. code-block:: bash salt '*' neutronng.subnet_get name=subnet1
def add_algorithm(self, parser): """Add the --algorithm option.""" help = 'The HashAlgorithm that will be used to generate the signature (default: %(default)s).' % { 'default': ca_settings.CA_DIGEST_ALGORITHM.name, } parser.add_argument( '--algorithm', metavar='{sha512,sha256,...}', default=ca_settings.CA_DIGEST_ALGORITHM, action=AlgorithmAction, help=help)
Add the --algorithm option.
def sphergal_to_rectgal(l,b,d,vr,pmll,pmbb,degree=False): """ NAME: sphergal_to_rectgal PURPOSE: transform phase-space coordinates in spherical Galactic coordinates to rectangular Galactic coordinates (can take vector inputs) INPUT: l - Galactic longitude (rad) b - Galactic lattitude (rad) d - distance (kpc) vr - line-of-sight velocity (km/s) pmll - proper motion in the Galactic longitude direction (mu_l*cos(b) ) (mas/yr) pmbb - proper motion in the Galactic lattitude (mas/yr) degree - (bool) if True, l and b are in degrees OUTPUT: (X,Y,Z,vx,vy,vz) in (kpc,kpc,kpc,km/s,km/s,km/s) HISTORY: 2009-10-25 - Written - Bovy (NYU) """ XYZ= lbd_to_XYZ(l,b,d,degree=degree) vxvyvz= vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,l,b,d,XYZ=False,degree=degree) if sc.array(l).shape == (): return sc.array([XYZ[0],XYZ[1],XYZ[2],vxvyvz[0],vxvyvz[1],vxvyvz[2]]) else: out=sc.zeros((len(l),6)) out[:,0:3]= XYZ out[:,3:6]= vxvyvz return out
NAME: sphergal_to_rectgal PURPOSE: transform phase-space coordinates in spherical Galactic coordinates to rectangular Galactic coordinates (can take vector inputs) INPUT: l - Galactic longitude (rad) b - Galactic lattitude (rad) d - distance (kpc) vr - line-of-sight velocity (km/s) pmll - proper motion in the Galactic longitude direction (mu_l*cos(b) ) (mas/yr) pmbb - proper motion in the Galactic lattitude (mas/yr) degree - (bool) if True, l and b are in degrees OUTPUT: (X,Y,Z,vx,vy,vz) in (kpc,kpc,kpc,km/s,km/s,km/s) HISTORY: 2009-10-25 - Written - Bovy (NYU)
def key(self, *args, **kwargs): """ Return the cache key to use. If you're passing anything but primitive types to the ``get`` method, it's likely that you'll need to override this method. """ if not args and not kwargs: return self.class_path try: if args and not kwargs: return "%s:%s" % (self.class_path, self.hash(args)) # The line might break if your passed values are un-hashable. If # it does, you need to override this method and implement your own # key algorithm. return "%s:%s:%s:%s" % (self.class_path, self.hash(args), self.hash([k for k in sorted(kwargs)]), self.hash([kwargs[k] for k in sorted(kwargs)])) except TypeError: raise RuntimeError( "Unable to generate cache key due to unhashable" "args or kwargs - you need to implement your own" "key generation method to avoid this problem")
Return the cache key to use. If you're passing anything but primitive types to the ``get`` method, it's likely that you'll need to override this method.
def execution(): ''' Collect all the sys.doc output from each minion and return the aggregate CLI Example: .. code-block:: bash salt-run doc.execution ''' client = salt.client.get_local_client(__opts__['conf_file']) docs = {} try: for ret in client.cmd_iter('*', 'sys.doc', timeout=__opts__['timeout']): for v in six.itervalues(ret): docs.update(v) except SaltClientError as exc: print(exc) return [] i = itertools.chain.from_iterable([six.iteritems(docs['ret'])]) ret = dict(list(i)) return ret
Collect all the sys.doc output from each minion and return the aggregate CLI Example: .. code-block:: bash salt-run doc.execution
def flush(self): # nocover """ Flush to this and the redirected stream """ if self.redirect is not None: self.redirect.flush() super(TeeStringIO, self).flush()
Flush to this and the redirected stream
def data(self): """ Get a cached post-processed result of a GitHub API call. Uses Trac cache to avoid constant querying of the remote API. If a previous API call did not succeed, automatically retries after a timeout. """ if self._next_update and datetime.now() > self._next_update: self.update() return self._data
Get a cached post-processed result of a GitHub API call. Uses Trac cache to avoid constant querying of the remote API. If a previous API call did not succeed, automatically retries after a timeout.
def ternarize(x, thresh=0.05): """ Implemented Trained Ternary Quantization: https://arxiv.org/abs/1612.01064 Code modified from the authors' at: https://github.com/czhu95/ternarynet/blob/master/examples/Ternary-Net/ternary.py """ shape = x.get_shape() thre_x = tf.stop_gradient(tf.reduce_max(tf.abs(x)) * thresh) w_p = tf.get_variable('Wp', initializer=1.0, dtype=tf.float32) w_n = tf.get_variable('Wn', initializer=1.0, dtype=tf.float32) tf.summary.scalar(w_p.op.name + '-summary', w_p) tf.summary.scalar(w_n.op.name + '-summary', w_n) mask = tf.ones(shape) mask_p = tf.where(x > thre_x, tf.ones(shape) * w_p, mask) mask_np = tf.where(x < -thre_x, tf.ones(shape) * w_n, mask_p) mask_z = tf.where((x < thre_x) & (x > - thre_x), tf.zeros(shape), mask) @tf.custom_gradient def _sign_mask(x): return tf.sign(x) * mask_z, lambda dy: dy w = _sign_mask(x) w = w * mask_np tf.summary.histogram(w.name, w) return w
Implemented Trained Ternary Quantization: https://arxiv.org/abs/1612.01064 Code modified from the authors' at: https://github.com/czhu95/ternarynet/blob/master/examples/Ternary-Net/ternary.py
def from_env(cls, prefix, kms_decrypt=False, aws_profile=None): """ Load database credential from env variable. - host: ENV.{PREFIX}_HOST - port: ENV.{PREFIX}_PORT - database: ENV.{PREFIX}_DATABASE - username: ENV.{PREFIX}_USERNAME - password: ENV.{PREFIX}_PASSWORD :param prefix: str :param kms_decrypt: bool :param aws_profile: str """ if len(prefix) < 1: raise ValueError("prefix can't be empty") if len(set(prefix).difference(set(string.ascii_uppercase + "_"))): raise ValueError("prefix can only use [A-Z] and '_'!") if not prefix.endswith("_"): prefix = prefix + "_" data = dict( host=os.getenv(prefix + "HOST"), port=os.getenv(prefix + "PORT"), database=os.getenv(prefix + "DATABASE"), username=os.getenv(prefix + "USERNAME"), password=os.getenv(prefix + "PASSWORD"), ) if kms_decrypt is True: # pragma: no cover import boto3 from base64 import b64decode if aws_profile is not None: kms = boto3.client("kms") else: ses = boto3.Session(profile_name=aws_profile) kms = ses.client("kms") def decrypt(kms, text): return kms.decrypt( CiphertextBlob=b64decode(text.encode("utf-8")) )["Plaintext"].decode("utf-8") data = { key: value if value is None else decrypt(kms, str(value)) for key, value in data.items() } return cls(**data)
Load database credential from env variable. - host: ENV.{PREFIX}_HOST - port: ENV.{PREFIX}_PORT - database: ENV.{PREFIX}_DATABASE - username: ENV.{PREFIX}_USERNAME - password: ENV.{PREFIX}_PASSWORD :param prefix: str :param kms_decrypt: bool :param aws_profile: str
def red_ext(request, message=None): ''' The external landing. Also a convenience function for redirecting users who don't have site access to the external page. Parameters: request - the request in the calling function message - a message from the caller function ''' if message: messages.add_message(request, messages.ERROR, message) return HttpResponseRedirect(reverse('external'))
The external landing. Also a convenience function for redirecting users who don't have site access to the external page. Parameters: request - the request in the calling function message - a message from the caller function
def evaluate(data_source, batch_size, ctx=None): """Evaluate the model on the dataset with cache model. Parameters ---------- data_source : NDArray The dataset is evaluated on. batch_size : int The size of the mini-batch. ctx : mx.cpu() or mx.gpu() The context of the computation. Returns ------- loss: float The loss on the dataset """ total_L = 0 hidden = cache_cell.\ begin_state(func=mx.nd.zeros, batch_size=batch_size, ctx=context[0]) next_word_history = None cache_history = None for i in range(0, len(data_source) - 1, args.bptt): if i > 0: print('Batch %d/%d, ppl %f'% (i, len(data_source), math.exp(total_L/i))) data, target = get_batch(data_source, i) data = data.as_in_context(ctx) target = target.as_in_context(ctx) L = 0 outs, next_word_history, cache_history, hidden = \ cache_cell(data, target, next_word_history, cache_history, hidden) for out in outs: L += (-mx.nd.log(out)).asscalar() total_L += L / data.shape[1] hidden = detach(hidden) return total_L / len(data_source)
Evaluate the model on the dataset with cache model. Parameters ---------- data_source : NDArray The dataset is evaluated on. batch_size : int The size of the mini-batch. ctx : mx.cpu() or mx.gpu() The context of the computation. Returns ------- loss: float The loss on the dataset
def node_hist_fig( node_color_distribution, title="Graph Node Distribution", width=400, height=300, top=60, left=25, bottom=60, right=25, bgcolor="rgb(240,240,240)", y_gridcolor="white", ): """Define the plotly plot representing the node histogram Parameters ---------- node_color_distribution: list of dicts describing the build_histogram width, height: integers - width and height of the histogram FigureWidget left, top, right, bottom: ints; number of pixels around the FigureWidget bgcolor: rgb of hex color code for the figure background color y_gridcolor: rgb of hex color code for the yaxis y_gridcolor Returns ------- FigureWidget object representing the histogram of the graph nodes """ text = [ "{perc}%".format(**locals()) for perc in [d["perc"] for d in node_color_distribution] ] pl_hist = go.Bar( y=[d["height"] for d in node_color_distribution], marker=dict(color=[d["color"] for d in node_color_distribution]), text=text, hoverinfo="y+text", ) hist_layout = dict( title=title, width=width, height=height, font=dict(size=12), xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False), yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)), bargap=0.01, margin=dict(l=left, r=right, b=bottom, t=top), hovermode="x", plot_bgcolor=bgcolor, ) return go.FigureWidget(data=[pl_hist], layout=hist_layout)
Define the plotly plot representing the node histogram Parameters ---------- node_color_distribution: list of dicts describing the build_histogram width, height: integers - width and height of the histogram FigureWidget left, top, right, bottom: ints; number of pixels around the FigureWidget bgcolor: rgb of hex color code for the figure background color y_gridcolor: rgb of hex color code for the yaxis y_gridcolor Returns ------- FigureWidget object representing the histogram of the graph nodes
def __set_URL(self, url): """ URL is stored as a str internally and must not contain ASCII chars. Raised exception in case of detected non-ASCII URL characters may be either UnicodeEncodeError or UnicodeDecodeError, depending on the used Python version's str type and the exact value passed as URL input data. """ if isinstance(url, str): url.encode("ascii") # Check for non-ASCII characters. self.url = url elif sys.version_info < (3, 0): self.url = url.encode("ascii") else: self.url = url.decode("ascii")
URL is stored as a str internally and must not contain ASCII chars. Raised exception in case of detected non-ASCII URL characters may be either UnicodeEncodeError or UnicodeDecodeError, depending on the used Python version's str type and the exact value passed as URL input data.
def get_events(self, **kwargs): """Retrieve events from server.""" force = kwargs.pop('force', False) response = api.request_sync_events(self.blink, self.network_id, force=force) try: return response['event'] except (TypeError, KeyError): _LOGGER.error("Could not extract events: %s", response, exc_info=True) return False
Retrieve events from server.
def _build_sql_query( self): """ *build sql query for the sdss square search* **Key Arguments:** # - **Return:** - None .. todo:: """ self.log.info('starting the ``_build_sql_query`` method') ra1, ra2, dec1, dec2 = self.ra1, self.ra2, self.dec1, self.dec2 if self.galaxyType == "all": self.sqlQuery = u""" SELECT p.objiD, p.ra, p.dec, s.z as specz, s.zerr as specz_err, z.z as photoz, z.zerr as photoz_err, p.type FROM PhotoObjAll p LEFT JOIN SpecObjAll AS s ON s.bestobjid = p.objid LEFT JOIN Photoz AS z ON z.objid = p.objid WHERE (p.ra between %(ra1)s and %(ra2)s) and (p.dec between %(dec1)s and %(dec2)s) and p.clean = 1 and p.type = 3 """ % locals() elif self.galaxyType == "specz": self.sqlQuery = u""" SELECT p.objiD, p.ra, p.dec, s.z as specz, s.zerr as specz_err, p.type FROM PhotoObjAll p, SpecObjAll s WHERE (s.bestobjid = p.objid) and (p.ra between %(ra1)s and %(ra2)s) and (p.dec between %(dec1)s and %(dec2)s) and p.type = 3 """ % locals() elif self.galaxyType == "photoz": self.sqlQuery = u""" SELECT p.objiD, p.ra, p.dec, z.z as photoz, z.zerr as photoz_err, p.type FROM PhotoObjAll p, Photoz z WHERE (z.objid = p.objid) and (p.ra between %(ra1)s and %(ra2)s) and (p.dec between %(dec1)s and %(dec2)s) and p.clean = 1 and p.type = 3 """ % locals() elif self.galaxyType == False or not self.galaxyType: self.sqlQuery = u""" SELECT p.objiD, p.ra, p.dec, s.z as specz, s.zerr as specz_err, z.z as photoz, z.zerr as photoz_err, p.type FROM PhotoObjAll p LEFT JOIN SpecObjAll AS s ON s.bestobjid = p.objid LEFT JOIN Photoz AS z ON z.objid = p.objid WHERE (p.ra between %(ra1)s and %(ra2)s) and (p.dec between %(dec1)s and %(dec2)s) and p.clean = 1 and (p.type = 3 or p.type = 6) """ % locals() self.sqlQuery = self.sqlQuery.strip() self.log.info('completed the ``_build_sql_query`` method') return None
*build sql query for the sdss square search* **Key Arguments:** # - **Return:** - None .. todo::
def _update_targets(vesseldicts, environment_dict): """ <Purpose> Connects to the nodes in the vesseldicts and adds them to the list of valid targets. <Arguments> vesseldicts: A list of vesseldicts obtained through SeattleClearinghouseClient calls. <Side Effects> All valid targets that the user can access on the specified nodes are added to the list of targets. <Exceptions> None <Returns> None """ # Compile a list of the nodes that we need to check nodelist = [] for vesseldict in vesseldicts: nodeip_port = vesseldict['node_ip']+':'+str(vesseldict['node_port']) if not nodeip_port in nodelist: nodelist.append(nodeip_port) # we'll output a message about the new keys later... newidlist = [] faillist = [] # Clear the list so that the user doesn't target vessels acquired from # previous requests when targeting this group seash_global_variables.targets['acquired'] = [] print nodelist # currently, if I browse more than once, I look up everything again... retdict = seash_helper.contact_targets( nodelist, seash_helper.browse_target, environment_dict['currentkeyname'], 'acquired') # parse the output so we can print out something intelligible for nodename in retdict: if retdict[nodename][0]: newidlist = newidlist + retdict[nodename][1] else: faillist.append(nodename) seash_helper.print_vessel_errors(retdict) if len(newidlist) == 0: print "Could not add any new targets." else: print "Added targets: "+", ".join(newidlist) if len(seash_global_variables.targets['acquired']) > 0: num_targets = str(len(seash_global_variables.targets['acquired'])) print "Added group 'acquired' with "+num_targets+" targets"
<Purpose> Connects to the nodes in the vesseldicts and adds them to the list of valid targets. <Arguments> vesseldicts: A list of vesseldicts obtained through SeattleClearinghouseClient calls. <Side Effects> All valid targets that the user can access on the specified nodes are added to the list of targets. <Exceptions> None <Returns> None
def list_networks(**kwargs): ''' List all virtual networks. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.list_networks ''' conn = __get_conn(**kwargs) try: return [net.name() for net in conn.listAllNetworks()] finally: conn.close()
List all virtual networks. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.list_networks
def accept(self, origin, protocol): """ Create a new route attached to a L{IBoxReceiver} created by the L{IBoxReceiverFactory} with the indicated protocol. @type origin: C{unicode} @param origin: The identifier of a route on the peer which will be associated with this connection. Boxes sent back by the protocol which is created in this call will be sent back to this route. @type protocol: C{unicode} @param protocol: The name of the protocol to which to establish a connection. @raise ProtocolUnknown: If no factory can be found for the named protocol. @return: A newly created C{unicode} route identifier for this connection (as the value of a C{dict} with a C{'route'} key). """ for factory in self.store.powerupsFor(IBoxReceiverFactory): # XXX What if there's a duplicate somewhere? if factory.protocol == protocol: receiver = factory.getBoxReceiver() route = self.router.bindRoute(receiver) # This might be better implemented using a hook on the box. # See Twisted ticket #3479. self.reactor.callLater(0, route.connectTo, origin) return {'route': route.localRouteName} raise ProtocolUnknown()
Create a new route attached to a L{IBoxReceiver} created by the L{IBoxReceiverFactory} with the indicated protocol. @type origin: C{unicode} @param origin: The identifier of a route on the peer which will be associated with this connection. Boxes sent back by the protocol which is created in this call will be sent back to this route. @type protocol: C{unicode} @param protocol: The name of the protocol to which to establish a connection. @raise ProtocolUnknown: If no factory can be found for the named protocol. @return: A newly created C{unicode} route identifier for this connection (as the value of a C{dict} with a C{'route'} key).
def paintEvent(self, event): """ Draws the pixmap for this widget. :param event | <QPaintEvent> """ pixmap = self.currentPixmap() rect = self.currentPixmapRect() with XPainter(self) as painter: painter.drawPixmap(rect.x(), rect.y(), pixmap)
Draws the pixmap for this widget. :param event | <QPaintEvent>
def cp(hdfs_src, hdfs_dst): """Copy a file :param hdfs_src: Source (str) :param hdfs_dst: Destination (str) :raises: IOError: If unsuccessful """ cmd = "hadoop fs -cp %s %s" % (hdfs_src, hdfs_dst) rcode, stdout, stderr = _checked_hadoop_fs_command(cmd)
Copy a file :param hdfs_src: Source (str) :param hdfs_dst: Destination (str) :raises: IOError: If unsuccessful
def _readClusterSettings(self): """ Read the current instance's meta-data to get the cluster settings. """ # get the leader metadata mdUrl = "http://169.254.169.254/metadata/instance?api-version=2017-08-01" header = {'Metadata': 'True'} request = urllib.request.Request(url=mdUrl, headers=header) response = urllib.request.urlopen(request) data = response.read() dataStr = data.decode("utf-8") metadata = json.loads(dataStr) # set values from the leader meta-data self._zone = metadata['compute']['location'] self.clusterName = metadata['compute']['resourceGroupName'] tagsStr = metadata['compute']['tags'] tags = dict(item.split(":") for item in tagsStr.split(";")) self._owner = tags.get('owner', 'no-owner') leader = self.getLeader() self._leaderPrivateIP = leader.privateIP self._setSSH() # create id_rsa.pub file on the leader if it is not there self._masterPublicKeyFile = self.LEADER_HOME_DIR + '.ssh/id_rsa.pub' # Add static nodes to /etc/hosts since Azure sometimes fails to find them with DNS map(lambda x: self._addToHosts(x), self.getProvisionedWorkers(None))
Read the current instance's meta-data to get the cluster settings.
def get_top_paths(self): """ :calls: `GET /repos/:owner/:repo/traffic/popular/paths <https://developer.github.com/v3/repos/traffic/>`_ :rtype: :class:`list` of :class:`github.Path.Path` """ headers, data = self._requester.requestJsonAndCheck( "GET", self.url + "/traffic/popular/paths" ) if isinstance(data, list): return [ github.Path.Path(self._requester, headers, item, completed=True) for item in data ]
:calls: `GET /repos/:owner/:repo/traffic/popular/paths <https://developer.github.com/v3/repos/traffic/>`_ :rtype: :class:`list` of :class:`github.Path.Path`
def apply(self, s, active=None): """ Apply the REPP's rewrite rules to the input string *s*. Args: s (str): the input string to process active (optional): a collection of external module names that may be applied if called Returns: a :class:`REPPResult` object containing the processed string and characterization maps """ if active is None: active = self.active return self.group.apply(s, active=active)
Apply the REPP's rewrite rules to the input string *s*. Args: s (str): the input string to process active (optional): a collection of external module names that may be applied if called Returns: a :class:`REPPResult` object containing the processed string and characterization maps
def merge_dict(self, *args, **kwargs): """ Takes variable inputs, compiles them into a dictionary then merges it to the current nomenclate's state :param args: (dict, Nomenclate), any number of dictionary inputs or Nomenclates to be converted to dicts :param kwargs: str, any number of kwargs that represent token:value pairs """ input_dict = self._convert_input(*args, **kwargs) if input_dict: self._sift_and_init_configs(input_dict) self.token_dict.merge_serialization(input_dict)
Takes variable inputs, compiles them into a dictionary then merges it to the current nomenclate's state :param args: (dict, Nomenclate), any number of dictionary inputs or Nomenclates to be converted to dicts :param kwargs: str, any number of kwargs that represent token:value pairs
def _run_train_step(self, data, mode='train'): """Run a single training step. :param data: input data :param mode: 'train' or 'test'. """ epoch_size = ((len(data) // self.batch_size) - 1) // self.num_steps costs = 0.0 iters = 0 step = 0 state = self._init_state.eval() op = self._train_op if mode == 'train' else tf.no_op() for step, (x, y) in enumerate( utilities.seq_data_iterator( data, self.batch_size, self.num_steps)): cost, state, _ = self.tf_session.run( [self.cost, self.final_state, op], {self.input_data: x, self.input_labels: y, self._init_state: state}) costs += cost iters += self.num_steps if step % (epoch_size // 10) == 10: print("%.3f perplexity" % (step * 1.0 / epoch_size)) return np.exp(costs / iters)
Run a single training step. :param data: input data :param mode: 'train' or 'test'.
def partition_agent(host): """ Partition a node from all network traffic except for SSH and loopback :param hostname: host or IP of the machine to partition from the cluster """ network.save_iptables(host) network.flush_all_rules(host) network.allow_all_traffic(host) network.run_iptables(host, ALLOW_SSH) network.run_iptables(host, ALLOW_PING) network.run_iptables(host, DISALLOW_MESOS) network.run_iptables(host, DISALLOW_INPUT)
Partition a node from all network traffic except for SSH and loopback :param hostname: host or IP of the machine to partition from the cluster
def get_context_dict(self): """ return a context dict of the desired state """ context_dict = {} for s in self.sections(): for k, v in self.manifest.items(s): context_dict["%s:%s" % (s, k)] = v for k, v in self.inputs.values().items(): context_dict["config:{0}".format(k)] = v context_dict.update(self.additional_context_variables.items()) context_dict.update(dict([("%s|escaped" % k, re.escape(str(v) or "")) for k, v in context_dict.items()])) return context_dict
return a context dict of the desired state
def unset(self, host, *args): """ Removes settings for a host. Parameters ---------- host : the host to remove settings from. *args : list of settings to removes. """ self.__check_host_args(host, args) remove_idx = [idx for idx, x in enumerate(self.lines_) if x.host == host and x.key.lower() in args] for idx in reversed(sorted(remove_idx)): del self.lines_[idx]
Removes settings for a host. Parameters ---------- host : the host to remove settings from. *args : list of settings to removes.
def search_elementnames(self, *substrings: str, name: str = 'elementnames') -> 'Selection': """Return a new selection containing all elements of the current selection with a name containing at least one of the given substrings. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, _ = prepare_full_example_2() Pass the (sub)strings as positional arguments and, optionally, the name of the newly created |Selection| object as a keyword argument: >>> test = pub.selections.complete.copy('test') >>> from hydpy import prepare_model >>> test.search_elementnames('dill', 'lahn_1') Selection("elementnames", nodes=(), elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) Wrong string specifications result in errors like the following: >>> test.search_elementnames(['dill', 'lahn_1']) Traceback (most recent call last): ... TypeError: While trying to determine the elements of selection \ `test` with names containing at least one of the given substrings \ `['dill', 'lahn_1']`, the following error occurred: 'in <string>' \ requires string as left operand, not list Method |Selection.select_elementnames| restricts the current selection to the one determined with the method |Selection.search_elementnames|: >>> test.select_elementnames('dill', 'lahn_1') Selection("test", nodes=("dill", "lahn_1", "lahn_2", "lahn_3"), elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) On the contrary, the method |Selection.deselect_elementnames| restricts the current selection to all devices not determined by the method |Selection.search_elementnames|: >>> pub.selections.complete.deselect_elementnames('dill', 'lahn_1') Selection("complete", nodes=("dill", "lahn_1", "lahn_2", "lahn_3"), elements=("land_lahn_2", "land_lahn_3", "stream_lahn_2_lahn_3")) """ try: selection = Selection(name) for element in self.elements: for substring in substrings: if substring in element.name: selection.elements += element break return selection except BaseException: values = objecttools.enumeration(substrings) objecttools.augment_excmessage( f'While trying to determine the elements of selection ' f'`{self.name}` with names containing at least one ' f'of the given substrings `{values}`')
Return a new selection containing all elements of the current selection with a name containing at least one of the given substrings. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, _ = prepare_full_example_2() Pass the (sub)strings as positional arguments and, optionally, the name of the newly created |Selection| object as a keyword argument: >>> test = pub.selections.complete.copy('test') >>> from hydpy import prepare_model >>> test.search_elementnames('dill', 'lahn_1') Selection("elementnames", nodes=(), elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) Wrong string specifications result in errors like the following: >>> test.search_elementnames(['dill', 'lahn_1']) Traceback (most recent call last): ... TypeError: While trying to determine the elements of selection \ `test` with names containing at least one of the given substrings \ `['dill', 'lahn_1']`, the following error occurred: 'in <string>' \ requires string as left operand, not list Method |Selection.select_elementnames| restricts the current selection to the one determined with the method |Selection.search_elementnames|: >>> test.select_elementnames('dill', 'lahn_1') Selection("test", nodes=("dill", "lahn_1", "lahn_2", "lahn_3"), elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) On the contrary, the method |Selection.deselect_elementnames| restricts the current selection to all devices not determined by the method |Selection.search_elementnames|: >>> pub.selections.complete.deselect_elementnames('dill', 'lahn_1') Selection("complete", nodes=("dill", "lahn_1", "lahn_2", "lahn_3"), elements=("land_lahn_2", "land_lahn_3", "stream_lahn_2_lahn_3"))
def _create_index_content(words): """Create html string of index file. Parameters ---------- words : list of str List of cached words. Returns ------- str html string. """ content = ["<h1>Index</h1>", "<ul>"] for word in words: content.append( '<li><a href="translations/{word}.html">{word}</a></li>'.format(word=word) ) content.append("</ul>") if not words: content.append("<i>Nothing to see here ...yet!</i>") return "\n".join(content)
Create html string of index file. Parameters ---------- words : list of str List of cached words. Returns ------- str html string.
def setOverlayAlpha(self, ulOverlayHandle, fAlpha): """Sets the alpha of the overlay quad. Use 1.0 for 100 percent opacity to 0.0 for 0 percent opacity.""" fn = self.function_table.setOverlayAlpha result = fn(ulOverlayHandle, fAlpha) return result
Sets the alpha of the overlay quad. Use 1.0 for 100 percent opacity to 0.0 for 0 percent opacity.
def get_info(self, full=False): " Return printable information about current site. " if full: context = self.as_dict() return "".join("{0:<25} = {1}\n".format( key, context[key]) for key in sorted(context.iterkeys())) return "%s [%s]" % (self.get_name(), self.template)
Return printable information about current site.
def ProcessPathSpec(self, mediator, path_spec): """Processes a path specification. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. path_spec (dfvfs.PathSpec): path specification. """ self.last_activity_timestamp = time.time() self.processing_status = definitions.STATUS_INDICATOR_RUNNING file_entry = path_spec_resolver.Resolver.OpenFileEntry( path_spec, resolver_context=mediator.resolver_context) if file_entry is None: display_name = mediator.GetDisplayNameForPathSpec(path_spec) logger.warning( 'Unable to open file entry with path spec: {0:s}'.format( display_name)) self.processing_status = definitions.STATUS_INDICATOR_IDLE return mediator.SetFileEntry(file_entry) try: if file_entry.IsDirectory(): self._ProcessDirectory(mediator, file_entry) self._ProcessFileEntry(mediator, file_entry) finally: mediator.ResetFileEntry() self.last_activity_timestamp = time.time() self.processing_status = definitions.STATUS_INDICATOR_IDLE
Processes a path specification. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. path_spec (dfvfs.PathSpec): path specification.
def b64_decode(data: bytes) -> bytes: """ :param data: Base 64 encoded data to decode. :type data: bytes :return: Base 64 decoded data. :rtype: bytes """ missing_padding = len(data) % 4 if missing_padding != 0: data += b'=' * (4 - missing_padding) return urlsafe_b64decode(data)
:param data: Base 64 encoded data to decode. :type data: bytes :return: Base 64 decoded data. :rtype: bytes
def slicing_singlevalue(arg, length): """Internally used.""" if isinstance(arg, slice): start, stop, step = arg.indices(length) i = start if step > 0: while i < stop: yield i i += step else: while i > stop: yield i i += step else: try: i = arg.__index__() except AttributeError: raise TypeError("indices must be integers or slices, not " + arg.__class__.__name__) if i < 0: i += length yield i
Internally used.
def _check_reach_env(): """Check that the environment supports runnig reach.""" # Get the path to the REACH JAR path_to_reach = get_config('REACHPATH') if path_to_reach is None: path_to_reach = environ.get('REACHPATH', None) if path_to_reach is None or not path.exists(path_to_reach): raise ReachError( 'Reach path unset or invalid. Check REACHPATH environment var ' 'and/or config file.' ) logger.debug('Using REACH jar at: %s' % path_to_reach) # Get the reach version. reach_version = get_config('REACH_VERSION') if reach_version is None: reach_version = environ.get('REACH_VERSION', None) if reach_version is None: logger.debug('REACH version not set in REACH_VERSION') m = re.match('reach-(.*?)\.jar', path.basename(path_to_reach)) reach_version = re.sub('-SNAP.*?$', '', m.groups()[0]) logger.debug('Using REACH version: %s' % reach_version) return path_to_reach, reach_version
Check that the environment supports runnig reach.
def volumes(self, assets, dt): """ The volume field's aggregation returns the sum of all volumes between the market open and the `dt` If there has been no data on or before the `dt` the volume is 0. Returns ------- np.array with dtype=int64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume') volumes = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): volumes.append(0) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'volume') entries[asset] = (dt_value, val) volumes.append(val) continue else: try: last_visited_dt, last_total = entries[asset] if last_visited_dt == dt_value: volumes.append(last_total) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, 'volume') val += last_total entries[asset] = (dt_value, val) volumes.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['volume'], after_last, dt, [asset], )[0] val = np.nansum(window) + last_total entries[asset] = (dt_value, val) volumes.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['volume'], market_open, dt, [asset], )[0] val = np.nansum(window) entries[asset] = (dt_value, val) volumes.append(val) continue return np.array(volumes)
The volume field's aggregation returns the sum of all volumes between the market open and the `dt` If there has been no data on or before the `dt` the volume is 0. Returns ------- np.array with dtype=int64, in order of assets parameter.
def _post_init(self): """A post init trigger""" try: return self.postinit() except Exception as exc: return self._onerror(Result.from_exception(exc, uuid=self.uuid))
A post init trigger
def is_active(self): """Determines whether this plugin is active. This plugin is only active if any run has an embedding. Returns: Whether any run has embedding data to show in the projector. """ if not self.multiplexer: return False if self._is_active: # We have already determined that the projector plugin should be active. # Do not re-compute that. We have no reason to later set this plugin to be # inactive. return True if self._thread_for_determining_is_active: # We are currently determining whether the plugin is active. Do not start # a separate thread. return self._is_active # The plugin is currently not active. The frontend might check again later. # For now, spin off a separate thread to determine whether the plugin is # active. new_thread = threading.Thread( target=self._determine_is_active, name='ProjectorPluginIsActiveThread') self._thread_for_determining_is_active = new_thread new_thread.start() return False
Determines whether this plugin is active. This plugin is only active if any run has an embedding. Returns: Whether any run has embedding data to show in the projector.
def remove_product_version(self, id, product_version_id, **kwargs): """ Removes a product version from the specified config set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.remove_product_version(id, product_version_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration set id (required) :param int product_version_id: Product version id (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.remove_product_version_with_http_info(id, product_version_id, **kwargs) else: (data) = self.remove_product_version_with_http_info(id, product_version_id, **kwargs) return data
Removes a product version from the specified config set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.remove_product_version(id, product_version_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration set id (required) :param int product_version_id: Product version id (required) :return: None If the method is called asynchronously, returns the request thread.
def add_dns(ip, interface='Local Area Connection', index=1): ''' Add the DNS server to the network interface (index starts from 1) Note: if the interface DNS is configured by DHCP, all the DNS servers will be removed from the interface and the requested DNS will be the only one CLI Example: .. code-block:: bash salt '*' win_dns_client.add_dns <ip> <interface> <index> ''' servers = get_dns_servers(interface) # Return False if could not find the interface if servers is False: return False # Return true if configured try: if servers[index - 1] == ip: return True except IndexError: pass # If configured in the wrong order delete it if ip in servers: rm_dns(ip, interface) cmd = ['netsh', 'interface', 'ip', 'add', 'dns', interface, ip, 'index={0}'.format(index), 'validate=no'] return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
Add the DNS server to the network interface (index starts from 1) Note: if the interface DNS is configured by DHCP, all the DNS servers will be removed from the interface and the requested DNS will be the only one CLI Example: .. code-block:: bash salt '*' win_dns_client.add_dns <ip> <interface> <index>
async def load_credentials(self, credentials): """Load existing credentials.""" split = credentials.split(':') self.identifier = split[0] self.srp.initialize(binascii.unhexlify(split[1])) _LOGGER.debug('Loaded AirPlay credentials: %s', credentials)
Load existing credentials.
def _serialize_normalized_array(array, fmt='png', quality=70): """Given a normalized array, returns byte representation of image encoding. Args: array: NumPy array of dtype uint8 and range 0 to 255 fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer """ dtype = array.dtype assert np.issubdtype(dtype, np.unsignedinteger) assert np.max(array) <= np.iinfo(dtype).max assert array.shape[-1] > 1 # array dims must have been squeezed image = PIL.Image.fromarray(array) image_bytes = BytesIO() image.save(image_bytes, fmt, quality=quality) # TODO: Python 3 could save a copy here by using `getbuffer()` instead. image_data = image_bytes.getvalue() return image_data
Given a normalized array, returns byte representation of image encoding. Args: array: NumPy array of dtype uint8 and range 0 to 255 fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer
def start(self): """Start the single-user server in a docker service. You can specify the params for the service through jupyterhub_config.py or using the user_options """ # https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/user.py#L202 # By default jupyterhub calls the spawner passing user_options if self.use_user_options: user_options = self.user_options else: user_options = {} self.log.warn("user_options: {}".format(user_options)) service = yield self.get_service() if service is None: if 'name' in user_options: self.server_name = user_options['name'] if hasattr(self, 'container_spec') and self.container_spec is not None: container_spec = dict(**self.container_spec) elif user_options == {}: raise("A container_spec is needed in to create a service") container_spec.update(user_options.get('container_spec', {})) # iterates over mounts to create # a new mounts list of docker.types.Mount container_spec['mounts'] = [] for mount in self.container_spec['mounts']: m = dict(**mount) if 'source' in m: m['source'] = m['source'].format( username=self.service_owner) if 'driver_config' in m: device = m['driver_config']['options']['device'].format( username=self.service_owner ) m['driver_config']['options']['device'] = device m['driver_config'] = docker.types.DriverConfig( **m['driver_config']) container_spec['mounts'].append(docker.types.Mount(**m)) # some Envs are required by the single-user-image container_spec['env'] = self.get_env() if hasattr(self, 'resource_spec'): resource_spec = self.resource_spec resource_spec.update(user_options.get('resource_spec', {})) if hasattr(self, 'networks'): networks = self.networks if user_options.get('networks') is not None: networks = user_options.get('networks') if hasattr(self, 'placement'): placement = self.placement if user_options.get('placement') is not None: placement = user_options.get('placement') image = container_spec['Image'] del container_spec['Image'] # create the service container_spec = docker.types.ContainerSpec( image, **container_spec) resources = docker.types.Resources(**resource_spec) task_spec = {'container_spec': container_spec, 'resources': resources, 'placement': placement } task_tmpl = docker.types.TaskTemplate(**task_spec) resp = yield self.docker('create_service', task_tmpl, name=self.service_name, networks=networks) self.service_id = resp['ID'] self.log.info( "Created Docker service '%s' (id: %s) from image %s", self.service_name, self.service_id[:7], image) else: self.log.info( "Found existing Docker service '%s' (id: %s)", self.service_name, self.service_id[:7]) # Handle re-using API token. # Get the API token from the environment variables # of the running service: envs = service['Spec']['TaskTemplate']['ContainerSpec']['Env'] for line in envs: if line.startswith('JPY_API_TOKEN='): self.api_token = line.split('=', 1)[1] break ip = self.service_name port = self.service_port # we use service_name instead of ip # https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery # service_port is actually equal to 8888 return (ip, port)
Start the single-user server in a docker service. You can specify the params for the service through jupyterhub_config.py or using the user_options
def traceroute(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4=None, filter=None, timeout=2, verbose=None, **kargs): # noqa: E501 """Instant TCP traceroute traceroute(target, [maxttl=30,] [dport=80,] [sport=80,] [verbose=conf.verb]) -> None # noqa: E501 """ if verbose is None: verbose = conf.verb if filter is None: # we only consider ICMP error packets and TCP packets with at # least the ACK flag set *and* either the SYN or the RST flag # set filter = "(icmp and (icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12)) or (tcp and (tcp[13] & 0x16 > 0x10))" # noqa: E501 if l4 is None: a, b = sr(IP(dst=target, id=RandShort(), ttl=(minttl, maxttl)) / TCP(seq=RandInt(), sport=sport, dport=dport), # noqa: E501 timeout=timeout, filter=filter, verbose=verbose, **kargs) else: # this should always work filter = "ip" a, b = sr(IP(dst=target, id=RandShort(), ttl=(minttl, maxttl)) / l4, timeout=timeout, filter=filter, verbose=verbose, **kargs) a = TracerouteResult(a.res) if verbose: a.show() return a, b
Instant TCP traceroute traceroute(target, [maxttl=30,] [dport=80,] [sport=80,] [verbose=conf.verb]) -> None # noqa: E501
def get_weekly_charts(self, chart_kind, from_date=None, to_date=None): """ Returns the weekly charts for the week starting from the from_date value to the to_date value. chart_kind should be one of "album", "artist" or "track" """ method = ".getWeekly" + chart_kind.title() + "Chart" chart_type = eval(chart_kind.title()) # string to type params = self._get_params() if from_date and to_date: params["from"] = from_date params["to"] = to_date doc = self._request(self.ws_prefix + method, True, params) seq = [] for node in doc.getElementsByTagName(chart_kind.lower()): if chart_kind == "artist": item = chart_type(_extract(node, "name"), self.network) else: item = chart_type( _extract(node, "artist"), _extract(node, "name"), self.network ) weight = _number(_extract(node, "playcount")) seq.append(TopItem(item, weight)) return seq
Returns the weekly charts for the week starting from the from_date value to the to_date value. chart_kind should be one of "album", "artist" or "track"
def start_rpc_listeners(self): """Configure all listeners here""" self._setup_rpc() if not self.endpoints: return [] self.conn = n_rpc.create_connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) return self.conn.consume_in_threads()
Configure all listeners here
def date_of_birth(self, value): """ The date of birth of the individual. :param value: :return: """ if value: self._date_of_birth = parse(value).date() if isinstance(value, type_check) else value
The date of birth of the individual. :param value: :return:
def load_config(self, path, environments, fill_with_defaults=False): """Will load default.yaml and <environment>.yaml at given path. The environment config will override the default values. :param path: directory where to find your config files. If the last character is not a slash (/) it will be appended. Example: resources/ :param environments: list of environment configs to load. File name pattern: <environment>.yaml. Example: develop.yaml. Latter configs will override previous ones. :param fill_with_defaults: use 'defaults' keyword in config file to fill up following config entrys. :return: your config as dictionary. """ yaml.add_implicit_resolver("!environ", self.__environ_pattern) yaml.add_constructor('!environ', self.__get_from_environment) yaml.add_implicit_resolver("!vault", self.__vault_pattern) yaml.add_constructor('!vault', self.__get_from_vault) if not path.endswith('/'): path += '/' if type(environments) != list: environments = [environments] config = {} try: for env in environments: with open(path + env + '.yaml', 'r') as configFile: env_config = yaml.load(configFile.read()) or {} config.update(env_config) if fill_with_defaults: if 'defaults' in config: defaults = config['defaults'] for target in defaults: for index, item in enumerate(config[target]): tmp = defaults[target].copy() tmp.update(config[target][index]) config[target][index] = tmp return config except exceptions.VaultError as error: raise ConfigLoaderError("Could not read vault secrets [" + error.__class__.__name__ + "]") except yaml.YAMLError as error: raise ConfigLoaderError("Configuration files malformed [" + error.__class__.__name__ + "]") except json.decoder.JSONDecodeError as error: raise ConfigLoaderError("Vault response was not json [" + error.__class__.__name__ + "]") except Exception as error: raise ConfigLoaderError("WTF? [" + error.__class__.__name__ + "]")
Will load default.yaml and <environment>.yaml at given path. The environment config will override the default values. :param path: directory where to find your config files. If the last character is not a slash (/) it will be appended. Example: resources/ :param environments: list of environment configs to load. File name pattern: <environment>.yaml. Example: develop.yaml. Latter configs will override previous ones. :param fill_with_defaults: use 'defaults' keyword in config file to fill up following config entrys. :return: your config as dictionary.
def sufar4(ascfile, meas_output='measurements.txt', aniso_output='rmag_anisotropy.txt', spec_infile=None, spec_outfile='specimens.txt', samp_outfile='samples.txt', site_outfile='sites.txt', specnum=0, sample_naming_con='1', user="", locname="unknown", instrument='', static_15_position_mode=False, dir_path='.', input_dir_path='', data_model_num=3): """ Converts ascii files generated by SUFAR ver.4.0 to MagIC files Parameters ---------- ascfile : str input ASC file, required meas_output : str measurement output filename, default "measurements.txt" aniso_output : str anisotropy output filename, MagIC 2 only, "rmag_anisotropy.txt" spec_infile : str specimen infile, default None spec_outfile : str specimen outfile, default "specimens.txt" samp_outfile : str sample outfile, default "samples.txt" site_outfile : str site outfile, default "sites.txt" specnum : int number of characters to designate a specimen, default 0 sample_naming_con : str sample/site naming convention, default '1', see info below user : str user name, default "" locname : str location name, default "unknown" instrument : str instrument name, default "" static_15_position_mode : bool specify static 15 position mode - default False (is spinning) dir_path : str output directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" data_model_num : int MagIC data model 2 or 3, default 3 Returns -------- type - Tuple : (True or False indicating if conversion was sucessful, file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name = sample name [6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY """ citation = 'This study' cont = 0 Z = 1 AniRecSs, AniRecs, SpecRecs, SampRecs, SiteRecs, MeasRecs = [], [], [], [], [], [] isspec = '0' spin = 0 data_model_num = int(float(data_model_num)) # set defaults for MagIC 2 if data_model_num == 2: if meas_output == 'measurements.txt': meas_output = 'magic_measurements.txt' if spec_outfile == 'specimens.txt': spec_outfile = 'er_specimens.txt' if samp_outfile == 'samples.txt': samp_outfile = 'er_samples.txt' if site_outfile == 'sites.txt': site_outfile = 'er_sites.txt' # set column names for MagIC 3 spec_name_col = 'specimen' samp_name_col = 'sample' site_name_col = 'site' loc_name_col = 'location' citation_col = 'citations' method_col = 'method_codes' site_description_col = 'description' expedition_col = 'expedition_name' instrument_col = 'instrument_codes' experiment_col = 'experiments' analyst_col = 'analysts' quality_col = 'quality' aniso_quality_col = 'result_quality' meas_standard_col = 'standard' meas_description_col = 'description' aniso_type_col = 'aniso_type' aniso_unit_col = 'aniso_s_unit' aniso_n_col = 'aniso_s_n_measurements' azimuth_col = 'azimuth' spec_volume_col = 'volume' samp_dip_col = 'dip' bed_dip_col = 'bed_dip' bed_dip_direction_col = 'bed_dip_direction' chi_vol_col = 'susc_chi_volume' aniso_sigma_col = 'aniso_s_sigma' aniso_unit_col = 'aniso_s_unit' aniso_tilt_corr_col = 'aniso_tilt_correction' meas_table_name = 'measurements' spec_table_name = 'specimens' samp_table_name = 'samples' site_table_name = 'sites' # set column names for MagIC 2 if data_model_num == 2: spec_name_col = 'er_specimen_name' samp_name_col = 'er_sample_name' site_name_col = 'er_site_name' loc_name_col = 'er_location_name' citation_col = 'er_citation_names' method_col = 'magic_method_codes' site_description_col = 'site_description' expedition_col = 'er_expedition_name' instrument_col = 'magic_instrument_codes' experiment_col = 'magic_experiment_names' analyst_col = 'er_analyst_mail_names' quality_col = 'measurement_flag' aniso_quality_col = 'anisotropy_flag' meas_standard_col = 'measurement_standard' meas_description_col = 'measurement_description' aniso_type_col = 'anisotropy_type' aniso_unit_col = 'anisotropy_unit' aniso_n_col = 'anisotropy_n' azimuth_col = 'sample_azimuth' spec_volume_col = 'specimen_volume' samp_dip_col = 'sample_dip' bed_dip_col = 'sample_bed_dip' bed_dip_direction_col = 'sample_bed_dip_direction' chi_vol_col = 'measurement_chi_volume' aniso_sigma_col = 'anisotropy_sigma' aniso_unit_col = 'anisotropy_unit' aniso_tilt_corr_col = 'anisotropy_tilt_correction' meas_table_name = 'magic_measurements' spec_table_name = 'er_specimens' samp_table_name = 'er_samples' site_table_name = 'er_sites' # create full path for files input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) ascfile = os.path.join(input_dir_path, ascfile) aniso_output = os.path.join(output_dir_path, aniso_output) # initialized but not used meas_output = os.path.join(output_dir_path, meas_output) spec_outfile = os.path.join(output_dir_path, spec_outfile) samp_outfile = os.path.join(output_dir_path, samp_outfile) site_outfile = os.path.join(output_dir_path, site_outfile) if "4" in sample_naming_con: if "-" not in sample_naming_con: print("option [4] must be in form 4-Z where Z is an integer") return False, "option [4] must be in form 4-Z where Z is an integer" else: Z = sample_naming_con.split("-")[1] sample_naming_con = "4" if "7" in sample_naming_con: if "-" not in sample_naming_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "option [7] must be in form 7-Z where Z is an integer" else: Z = sample_naming_con.split("-")[1] sample_naming_con = "7" if static_15_position_mode: spin = 0 if spec_infile: if os.path.isfile(os.path.join(input_dir_path, str(spec_infile))): # means an er_specimens.txt file has been provided with sample, # site, location (etc.) info isspec = '1' specnum = int(specnum) if isspec == "1": specs, file_type = pmag.magic_read(spec_infile) specnames, sampnames, sitenames = [], [], [] # if '-new' not in sys.argv: # see if there are already specimen,sample, site files lying around # try: # SpecRecs,file_type=pmag.magic_read(input_dir_path+'/er_specimens.txt') # for spec in SpecRecs: # if spec['er_specimen_name'] not in specnames: # specnames.append(samp['er_specimen_name']) # except: # SpecRecs,specs=[],[] # try: # SampRecs,file_type=pmag.magic_read(input_dir_path+'/er_samples.txt') # for samp in SampRecs: # if samp['er_sample_name'] not in sampnames:sampnames.append(samp['er_sample_name']) # except: # sampnames,SampRecs=[],[] # try: # SiteRecs,file_type=pmag.magic_read(input_dir_path+'/er_sites.txt') # for site in SiteRecs: # if site['er_site_names'] not in sitenames:sitenames.append(site['er_site_name']) # except: # sitenames,SiteRecs=[],[] try: file_input = open(ascfile, 'r') except: print('Error opening file: ', ascfile) return False, 'Error opening file: {}'.format(ascfile) Data = file_input.readlines() file_input.close() k = 0 while k < len(Data): line = Data[k] words = line.split() if "ANISOTROPY" in words: # first line of data for the spec MeasRec, AniRec, SpecRec, SampRec, SiteRec = {}, {}, {}, {}, {} specname = words[0] AniRec[spec_name_col] = specname if isspec == "1": for spec in specs: if spec[spec_name_col] == specname: AniRec[samp_name_col] = spec[samp_name_col] AniRec[site_name_col] = spec[site_name_col] AniRec[loc_name_col] = spec[loc_name_col] break elif isspec == "0": if specnum != 0: sampname = specname[:-specnum] else: sampname = specname AniRec[samp_name_col] = sampname SpecRec[spec_name_col] = specname SpecRec[samp_name_col] = sampname SampRec[samp_name_col] = sampname SiteRec[samp_name_col] = sampname SiteRec[site_description_col] = 's' if sample_naming_con != "9": AniRec[site_name_col] = pmag.parse_site( AniRec[samp_name_col], sample_naming_con, Z) SpecRec[site_name_col] = pmag.parse_site( AniRec[samp_name_col], sample_naming_con, Z) SampRec[site_name_col] = pmag.parse_site( AniRec[samp_name_col], sample_naming_con, Z) SiteRec[site_name_col] = pmag.parse_site( AniRec[samp_name_col], sample_naming_con, Z) else: AniRec[site_name_col] = specname SpecRec[site_name_col] = specname SampRec[site_name_col] = specname SiteRec[site_name_col] = specname pieces = specname.split('-') AniRec[expedition_col] = pieces[0] SpecRec[expedition_col] = pieces[0] SampRec[expedition_col] = pieces[0] SiteRec[expedition_col] = pieces[0] location = pieces[1] AniRec[loc_name_col] = locname SpecRec[loc_name_col] = locname SampRec[loc_name_col] = locname SiteRec[loc_name_col] = locname AniRec[citation_col] = "This study" SpecRec[citation_col] = "This study" SampRec[citation_col] = "This study" SiteRec[citation_col] = "This study" AniRec[citation_col] = "This study" AniRec[instrument_col] = instrument AniRec[method_col] = "LP-X:AE-H:LP-AN-MS" AniRec[experiment_col] = specname + ":" + "LP-AN-MS" AniRec[analyst_col] = user for key in list(AniRec.keys()): MeasRec[key] = AniRec[key] if data_model_num == 2: MeasRec['magic_experiment_name'] = AniRec.get('magic_experiment_names', '') if 'magic_experiment_names' in MeasRec: MeasRec.pop('magic_experiment_names') if data_model_num == 3: MeasRec['experiment'] = AniRec.get('experiments', '') if 'experiments' in MeasRec: MeasRec.pop('experiments') MeasRec[quality_col] = 'g' AniRec[aniso_quality_col] = 'g' MeasRec[meas_standard_col] = 'u' MeasRec[meas_description_col] = 'Bulk sucsecptibility measurement' AniRec[aniso_type_col] = "AMS" AniRec[aniso_unit_col] = "Normalized by trace" if spin == 1: AniRec[aniso_n_col] = "192" else: AniRec[aniso_n_col] = "15" if 'Azi' in words and isspec == '0': az = float(words[1]) P1 = float(words[4]) P2 = float(words[5]) P3 = float(words[6]) # P4 relates to a fabric or bedding measurement -- not dealt with # here P4 = float(words[7]) az = az + P1 * 360. / 12. - P3 * 360. / 12. if az >= 360: az = az - 360 elif az <= -360: az = az + 360 labaz = az SampRec[azimuth_col] = str(round(az, 1)) if 'Dip' in words: # convert actual volume to m^3 from cm^3 SpecRec[spec_volume_col] = '%8.3e' % (float(words[10]) * 1e-6) dip = float(words[1]) if P2 == 90: dip = dip - 90. labdip = dip SampRec[samp_dip_col] = str(round(dip, 1)) if 'T1' in words and 'F1' in words: k += 2 # read in fourth line down line = Data[k] rec = line.split() dd = rec[1].split('/') dip_direction = int(dd[0]) + 90 SampRec[bed_dip_direction_col] = '%i' % (dip_direction) SampRec[bed_dip_col] = dd[1] bed_dip = float(dd[1]) if "Mean" in words: k += 4 # read in fourth line down line = Data[k] rec = line.split() MeasRec[chi_vol_col] = rec[1] sigma = .01 * float(rec[2]) / 3. AniRec[aniso_sigma_col] = '%7.4f' % (sigma) AniRec[aniso_unit_col] = 'SI' if "factors" in words: k += 4 # read in second line down line = Data[k] rec = line.split() if "Specimen" in words: # first part of specimen data # eigenvalues sum to unity - not 3 s1_val = '%7.4f' % (float(words[5]) / 3.) s2_val = '%7.4f' % (float(words[6]) / 3.) s3_val = '%7.4f' % (float(words[7]) / 3.) k += 1 line = Data[k] rec = line.split() # eigenvalues sum to unity - not 3 s4_val= '%7.4f' % (float(rec[5]) / 3.) s5_val = '%7.4f' % (float(rec[6]) / 3.) s6_val = '%7.4f' % (float(rec[7]) / 3.) # parse for data model 2 if data_model_num == 2: AniRec['anisotropy_s1'] = s1_val AniRec['anisotropy_s2'] = s2_val AniRec['anisotropy_s3'] = s3_val AniRec['anisotropy_s4'] = s4_val AniRec['anisotropy_s5'] = s5_val AniRec['anisotropy_s6'] = s6_val # parse for data model 3 else: vals = (s1_val, s2_val, s3_val, s4_val, s5_val, s6_val) AniRec['aniso_s'] = ":".join([v.strip() for v in vals]) # AniRec[aniso_tilt_corr_col] = '-1' AniRecs.append(AniRec) AniRecG, AniRecT = {}, {} for key in list(AniRec.keys()): AniRecG[key] = AniRec[key] for key in list(AniRec.keys()): AniRecT[key] = AniRec[key] sbar = [] sbar.append(float(s1_val)) sbar.append(float(s2_val)) sbar.append(float(s3_val)) sbar.append(float(s4_val)) sbar.append(float(s5_val)) sbar.append(float(s6_val)) sbarg = pmag.dosgeo(sbar, labaz, labdip) s1_g = '%12.10f' % (sbarg[0]) s2_g = '%12.10f' % (sbarg[1]) s3_g = '%12.10f' % (sbarg[2]) s4_g = '%12.10f' % (sbarg[3]) s5_g = '%12.10f' % (sbarg[4]) s6_g = '%12.10f' % (sbarg[5]) if data_model_num == 2: AniRecG["anisotropy_s1"] = s1_g AniRecG["anisotropy_s2"] = s2_g AniRecG["anisotropy_s3"] = s3_g AniRecG["anisotropy_s4"] = s4_g AniRecG["anisotropy_s5"] = s5_g AniRecG["anisotropy_s6"] = s6_g else: vals = (s1_g, s2_g, s3_g, s4_g, s5_g, s6_g) AniRecG['aniso_s'] = ":".join([v.strip() for v in vals]) AniRecG[aniso_tilt_corr_col] = '0' AniRecs.append(AniRecG) if bed_dip != "" and bed_dip != 0: # have tilt correction sbart = pmag.dostilt(sbarg, dip_direction, bed_dip) s1_t = '%12.10f' % (sbart[0]) s2_t = '%12.10f' % (sbart[1]) s3_t = '%12.10f' % (sbart[2]) s4_t = '%12.10f' % (sbart[3]) s5_t = '%12.10f' % (sbart[4]) s6_t = '%12.10f' % (sbart[5]) if data_model_num == 2: AniRecT["anisotropy_s1"] = s1_t AniRecT["anisotropy_s2"] = s2_t AniRecT["anisotropy_s3"] = s3_t AniRecT["anisotropy_s4"] = s4_t AniRecT["anisotropy_s5"] = s5_t AniRecT["anisotropy_s6"] = s6_t else: vals = (s1_t, s2_t, s3_t, s4_t, s5_t, s6_t) AniRecT["aniso_s"] = ":".join([v.strip() for v in vals]) AniRecT[aniso_tilt_corr_col] = '100' AniRecs.append(AniRecT) MeasRecs.append(MeasRec) if SpecRec[spec_name_col] not in specnames: SpecRecs.append(SpecRec) specnames.append(SpecRec[spec_name_col]) if SampRec[samp_name_col] not in sampnames: SampRecs.append(SampRec) sampnames.append(SampRec[samp_name_col]) if SiteRec[site_name_col] not in sitenames: SiteRecs.append(SiteRec) sitenames.append(SiteRec[site_name_col]) k += 1 # skip to next specimen pmag.magic_write(meas_output, MeasRecs, meas_table_name) print("bulk measurements put in ", meas_output) # if isspec=="0": SpecOut, keys = pmag.fillkeys(SpecRecs) # # for MagIC 2, anisotropy records go in rmag_anisotropy if data_model_num == 2: pmag.magic_write(aniso_output, AniRecs, 'rmag_anisotropy') print("anisotropy tensors put in ", aniso_output) # for MagIC 3, anisotropy records go in specimens if data_model_num == 3: full_SpecOut = [] spec_list = [] for rec in SpecOut: full_SpecOut.append(rec) spec_name = rec[spec_name_col] if spec_name not in spec_list: spec_list.append(spec_name) ani_recs = pmag.get_dictitem(AniRecs, spec_name_col, spec_name, 'T') full_SpecOut.extend(ani_recs) # FILL KEYS full_SpecOut, keys = pmag.fillkeys(full_SpecOut) else: full_SpecOut = SpecOut pmag.magic_write(spec_outfile, full_SpecOut, spec_table_name) print("specimen/anisotropy info put in ", spec_outfile) SampOut, keys = pmag.fillkeys(SampRecs) pmag.magic_write(samp_outfile, SampOut, samp_table_name) print("sample info put in ", samp_outfile) SiteOut, keys = pmag.fillkeys(SiteRecs) pmag.magic_write(site_outfile, SiteOut, site_table_name) print("site info put in ", site_outfile) return True, meas_output
Converts ascii files generated by SUFAR ver.4.0 to MagIC files Parameters ---------- ascfile : str input ASC file, required meas_output : str measurement output filename, default "measurements.txt" aniso_output : str anisotropy output filename, MagIC 2 only, "rmag_anisotropy.txt" spec_infile : str specimen infile, default None spec_outfile : str specimen outfile, default "specimens.txt" samp_outfile : str sample outfile, default "samples.txt" site_outfile : str site outfile, default "sites.txt" specnum : int number of characters to designate a specimen, default 0 sample_naming_con : str sample/site naming convention, default '1', see info below user : str user name, default "" locname : str location name, default "unknown" instrument : str instrument name, default "" static_15_position_mode : bool specify static 15 position mode - default False (is spinning) dir_path : str output directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" data_model_num : int MagIC data model 2 or 3, default 3 Returns -------- type - Tuple : (True or False indicating if conversion was sucessful, file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name = sample name [6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
def diffuse(self, *args): """ this is a dispatcher of diffuse implementation. Depending of the arguments used. """ mode = diffusingModeEnum.unknown if (isinstance(args[0], str) and (len(args) == 3)): # reveived diffuse(str, any, any) mode = diffusingModeEnum.element elif (hasattr(args[0], "__len__") and (len(args) == 2)): # reveived diffuse(dict({str: any}), dict({str: any})) mode = diffusingModeEnum.elements else: raise TypeError( "Called diffuse method using bad argments, receive this" + " '{0}', but expected 'str, any, any' or" + " 'dict(str: any), dict(str: any)'." .format(args)) self._diffuse(mode, *args)
this is a dispatcher of diffuse implementation. Depending of the arguments used.
def post_request(profile, resource, payload): """Do a POST request to Github's API. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. resource The part of a Github API URL that comes after ``.../:repo/git``. For instance, for ``.../:repo/git/commits``, it's ``/commits``. payload A dict of values to send as the payload of the POST request. The data will be JSON-encoded. Returns: The body of the response, converted from JSON into a Python dict. """ url = get_url(profile, resource) headers = get_headers(profile) response = requests.post(url, json=payload, headers=headers) return response.json()
Do a POST request to Github's API. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. resource The part of a Github API URL that comes after ``.../:repo/git``. For instance, for ``.../:repo/git/commits``, it's ``/commits``. payload A dict of values to send as the payload of the POST request. The data will be JSON-encoded. Returns: The body of the response, converted from JSON into a Python dict.
def QA_indicator_RSI(DataFrame, N1=12, N2=26, N3=9): '相对强弱指标RSI1:SMA(MAX(CLOSE-LC,0),N1,1)/SMA(ABS(CLOSE-LC),N1,1)*100;' CLOSE = DataFrame['close'] LC = REF(CLOSE, 1) RSI1 = SMA(MAX(CLOSE - LC, 0), N1) / SMA(ABS(CLOSE - LC), N1) * 100 RSI2 = SMA(MAX(CLOSE - LC, 0), N2) / SMA(ABS(CLOSE - LC), N2) * 100 RSI3 = SMA(MAX(CLOSE - LC, 0), N3) / SMA(ABS(CLOSE - LC), N3) * 100 DICT = {'RSI1': RSI1, 'RSI2': RSI2, 'RSI3': RSI3} return pd.DataFrame(DICT)
相对强弱指标RSI1:SMA(MAX(CLOSE-LC,0),N1,1)/SMA(ABS(CLOSE-LC),N1,1)*100;
def fdrcorrection(pvals, alpha=0.05): """ benjamini hocheberg fdr correction. inspired by statsmodels """ # Implement copy from GOATools. pvals = np.asarray(pvals) pvals_sortind = np.argsort(pvals) pvals_sorted = np.take(pvals, pvals_sortind) ecdffactor = _ecdf(pvals_sorted) reject = pvals_sorted <= ecdffactor*alpha if reject.any(): rejectmax = max(np.nonzero(reject)[0]) reject[:rejectmax] = True pvals_corrected_raw = pvals_sorted / ecdffactor pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1] del pvals_corrected_raw pvals_corrected[pvals_corrected>1] = 1 pvals_corrected_ = np.empty_like(pvals_corrected) pvals_corrected_[pvals_sortind] = pvals_corrected del pvals_corrected reject_ = np.empty_like(reject) reject_[pvals_sortind] = reject return reject_, pvals_corrected_
benjamini hocheberg fdr correction. inspired by statsmodels
def save(self, *args, **kwargs): """ automatically update updated date field """ # auto fill updated field with current time unless explicitly disabled auto_update = kwargs.get('auto_update', True) if auto_update: self.updated = now() # remove eventual auto_update if 'auto_update' in kwargs: kwargs.pop('auto_update') super(BaseDate, self).save(*args, **kwargs)
automatically update updated date field
def sys_open(self, buf, flags, mode): """ :param buf: address of zero-terminated pathname :param flags: file access bits :param mode: file permission mode """ filename = self.current.read_string(buf) try: f = self._sys_open_get_file(filename, flags) logger.debug(f"Opening file {filename} for real fd {f.fileno()}") except IOError as e: logger.warning(f"Could not open file {filename}. Reason: {e!s}") return -e.errno if e.errno is not None else -errno.EINVAL return self._open(f)
:param buf: address of zero-terminated pathname :param flags: file access bits :param mode: file permission mode
def prepare_mainsubstituter(): """Prepare and return a |Substituter| object for the main `__init__` file of *HydPy*.""" substituter = Substituter() for module in (builtins, numpy, datetime, unittest, doctest, inspect, io, os, sys, time, collections, itertools, subprocess, scipy, typing): substituter.add_module(module) for subpackage in (auxs, core, cythons, exe): for dummy, name, dummy in pkgutil.walk_packages(subpackage.__path__): full_name = subpackage.__name__ + '.' + name substituter.add_module(importlib.import_module(full_name)) substituter.add_modules(models) for cymodule in (annutils, smoothutils, pointerutils): substituter.add_module(cymodule, cython=True) substituter._short2long['|pub|'] = ':mod:`~hydpy.pub`' substituter._short2long['|config|'] = ':mod:`~hydpy.config`' return substituter
Prepare and return a |Substituter| object for the main `__init__` file of *HydPy*.
def _make_multidim_func(one_d_func, n, *args): """ A helper function to cut down on code repetition. Almost all of the code in qnwcheb, qnwlege, qnwsimp, qnwtrap is just dealing various forms of input arguments and then shelling out to the corresponding 1d version of the function. This routine does all the argument checking and passes things through the appropriate 1d function before using a tensor product to combine weights and nodes. Parameters ---------- one_d_func : function The 1d function to be called along each dimension n : int or array_like(float) A length-d iterable of the number of nodes in each dimension args : These are the arguments to various qnw____ functions. For the majority of the functions this is just a and b, but some differ. Returns ------- func : function The multi-dimensional version of the parameter ``one_d_func`` """ _args = list(args) n = np.atleast_1d(n) args = list(map(np.atleast_1d, _args)) if all([x.size == 1 for x in [n] + args]): return one_d_func(n[0], *_args) d = n.size for i in range(len(args)): if args[i].size == 1: args[i] = np.repeat(args[i], d) nodes = [] weights = [] for i in range(d): ai = [x[i] for x in args] _1d = one_d_func(n[i], *ai) nodes.append(_1d[0]) weights.append(_1d[1]) weights = ckron(*weights[::-1]) # reverse ordered tensor product nodes = gridmake(*nodes) return nodes, weights
A helper function to cut down on code repetition. Almost all of the code in qnwcheb, qnwlege, qnwsimp, qnwtrap is just dealing various forms of input arguments and then shelling out to the corresponding 1d version of the function. This routine does all the argument checking and passes things through the appropriate 1d function before using a tensor product to combine weights and nodes. Parameters ---------- one_d_func : function The 1d function to be called along each dimension n : int or array_like(float) A length-d iterable of the number of nodes in each dimension args : These are the arguments to various qnw____ functions. For the majority of the functions this is just a and b, but some differ. Returns ------- func : function The multi-dimensional version of the parameter ``one_d_func``
def get_comments(self): """ :calls: `GET /gists/:gist_id/comments <http://developer.github.com/v3/gists/comments>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.GistComment.GistComment` """ return github.PaginatedList.PaginatedList( github.GistComment.GistComment, self._requester, self.url + "/comments", None )
:calls: `GET /gists/:gist_id/comments <http://developer.github.com/v3/gists/comments>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.GistComment.GistComment`
def java_install(self): """ install java :return: """ sudo('apt-get install openjdk-8-jdk -y') java_home = run('readlink -f /usr/bin/java | ' 'sed "s:/jre/bin/java::"') append(bigdata_conf.global_env_home, 'export JAVA_HOME={0}'.format( java_home ), use_sudo=True) run('source {0}'.format(bigdata_conf.global_env_home))
install java :return:
def _bind_sources_to_destination(self): # type: (SyncCopy) -> # Tuple[blobxfer.models.azure.StorageEntity, # blobxfer.models.azure.StorageEntity] """Bind source storage entity to destination storage entities :param SyncCopy self: this :rtype: tuple :return: (source storage entity, destination storage entity) """ seen = set() # iterate through source paths to download for src in self._spec.sources: for src_ase in src.files( self._creds, self._spec.options, self._general_options.dry_run): # generate copy destinations for source dest = [ dst_ase for dst_ase in self._generate_destination_for_source(src_ase) ] if len(dest) == 0: continue primary_dst = dest[0] uid = blobxfer.operations.synccopy.SyncCopy.create_deletion_id( primary_dst._client, primary_dst.container, primary_dst.name) if uid in seen: raise RuntimeError( 'duplicate destination entity detected: {}/{}'.format( primary_dst._client.primary_endpoint, primary_dst.path)) seen.add(uid) # add to exclusion set if self._spec.options.delete_extraneous_destination: self._delete_exclude.add(uid) if len(dest[1:]) > 0: if primary_dst.replica_targets is None: primary_dst.replica_targets = [] primary_dst.replica_targets.extend(dest[1:]) # check replica targets for duplicates for rt in primary_dst.replica_targets: ruid = ( blobxfer.operations.synccopy.SyncCopy. create_deletion_id( rt._client, rt.container, rt.name) ) if ruid in seen: raise RuntimeError( ('duplicate destination entity detected: ' '{}/{}').format( rt._client.primary_endpoint, rt.path)) seen.add(ruid) # add replica targets to deletion exclusion set if self._spec.options.delete_extraneous_destination: self._delete_exclude.add(ruid) yield src_ase, primary_dst
Bind source storage entity to destination storage entities :param SyncCopy self: this :rtype: tuple :return: (source storage entity, destination storage entity)
def _reuse_pre_installed_setuptools(env, installer): """ Return whether a pre-installed setuptools distribution should be reused. """ if not env.setuptools_version: return # no prior setuptools ==> no reuse reuse_old = config.reuse_old_setuptools reuse_best = config.reuse_best_setuptools reuse_future = config.reuse_future_setuptools reuse_comment = None if reuse_old or reuse_best or reuse_future: pv_old = parse_version(env.setuptools_version) pv_new = parse_version(installer.setuptools_version()) if pv_old < pv_new: if reuse_old: reuse_comment = "%s+ recommended" % ( installer.setuptools_version(),) elif pv_old > pv_new: if reuse_future: reuse_comment = "%s+ required" % ( installer.setuptools_version(),) elif reuse_best: reuse_comment = "" if reuse_comment is None: return # reuse not allowed by configuration if reuse_comment: reuse_comment = " (%s)" % (reuse_comment,) print("Reusing pre-installed setuptools %s distribution%s." % ( env.setuptools_version, reuse_comment)) return True
Return whether a pre-installed setuptools distribution should be reused.
def on_attribute(self, name): """ Decorator for attribute listeners. The decorated function (``observer``) is invoked differently depending on the *type of attribute*. Attributes that represent sensor values or which are used to monitor connection status are updated whenever a message is received from the vehicle. Attributes which reflect vehicle "state" are only updated when their values change (for example :py:func:`Vehicle.system_status`, :py:attr:`Vehicle.armed`, and :py:attr:`Vehicle.mode`). The argument list for the callback is ``observer(object, attr_name, attribute_value)`` * ``self`` - the associated :py:class:`Vehicle`. This may be compared to a global vehicle handle to implement vehicle-specific callback handling (if needed). * ``attr_name`` - the attribute name. This can be used to infer which attribute has triggered if the same callback is used for watching several attributes. * ``msg`` - the attribute value (so you don't need to re-query the vehicle object). .. note:: There is no way to remove an attribute listener added with this decorator. Use :py:func:`add_attribute_listener` if you need to be able to remove the :py:func:`attribute listener <remove_attribute_listener>`. The code fragment below shows how you can create a listener for the attitude attribute. .. code:: python @vehicle.on_attribute('attitude') def attitude_listener(self, name, msg): print '%s attribute is: %s' % (name, msg) See :ref:`vehicle_state_observe_attributes` for more information. :param String name: The name of the attribute to watch (or '*' to watch all attributes). :param observer: The callback to invoke when a change in the attribute is detected. """ def decorator(fn): if isinstance(name, list): for n in name: self.add_attribute_listener(n, fn) else: self.add_attribute_listener(name, fn) return decorator
Decorator for attribute listeners. The decorated function (``observer``) is invoked differently depending on the *type of attribute*. Attributes that represent sensor values or which are used to monitor connection status are updated whenever a message is received from the vehicle. Attributes which reflect vehicle "state" are only updated when their values change (for example :py:func:`Vehicle.system_status`, :py:attr:`Vehicle.armed`, and :py:attr:`Vehicle.mode`). The argument list for the callback is ``observer(object, attr_name, attribute_value)`` * ``self`` - the associated :py:class:`Vehicle`. This may be compared to a global vehicle handle to implement vehicle-specific callback handling (if needed). * ``attr_name`` - the attribute name. This can be used to infer which attribute has triggered if the same callback is used for watching several attributes. * ``msg`` - the attribute value (so you don't need to re-query the vehicle object). .. note:: There is no way to remove an attribute listener added with this decorator. Use :py:func:`add_attribute_listener` if you need to be able to remove the :py:func:`attribute listener <remove_attribute_listener>`. The code fragment below shows how you can create a listener for the attitude attribute. .. code:: python @vehicle.on_attribute('attitude') def attitude_listener(self, name, msg): print '%s attribute is: %s' % (name, msg) See :ref:`vehicle_state_observe_attributes` for more information. :param String name: The name of the attribute to watch (or '*' to watch all attributes). :param observer: The callback to invoke when a change in the attribute is detected.
def uridecode(uristring, encoding='utf-8', errors='strict'): """Decode a URI string or string component.""" if not isinstance(uristring, bytes): uristring = uristring.encode(encoding or 'ascii', errors) parts = uristring.split(b'%') result = [parts[0]] append = result.append decode = _decoded.get for s in parts[1:]: append(decode(s[:2], b'%' + s[:2])) append(s[2:]) if encoding is not None: return b''.join(result).decode(encoding, errors) else: return b''.join(result)
Decode a URI string or string component.
def to_bytes(self): """Convert the entire image to bytes. :rtype: bytes """ # grab the chunks we needs out = [PNG_SIGN] # FIXME: it's tricky to define "other_chunks". HoneyView stop the # animation if it sees chunks other than fctl or idat, so we put other # chunks to the end of the file other_chunks = [] seq = 0 # for first frame png, control = self.frames[0] # header out.append(png.hdr) # acTL out.append(make_chunk("acTL", struct.pack("!II", len(self.frames), self.num_plays))) # fcTL if control: out.append(make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes())) seq += 1 # and others... idat_chunks = [] for type_, data in png.chunks: if type_ in ("IHDR", "IEND"): continue if type_ == "IDAT": # put at last idat_chunks.append(data) continue out.append(data) out.extend(idat_chunks) # FIXME: we should do some optimization to frames... # for other frames for png, control in self.frames[1:]: # fcTL out.append( make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes()) ) seq += 1 # and others... for type_, data in png.chunks: if type_ in ("IHDR", "IEND") or type_ in CHUNK_BEFORE_IDAT: continue elif type_ == "IDAT": # convert IDAT to fdAT out.append( make_chunk("fdAT", struct.pack("!I", seq) + data[8:-4]) ) seq += 1 else: other_chunks.append(data) # end out.extend(other_chunks) out.append(png.end) return b"".join(out)
Convert the entire image to bytes. :rtype: bytes
async def set_position(self, position, wait_for_completion=True): """Set window to desired position. Parameters: * position: Position object containing the target position. * wait_for_completion: If set, function will return after device has reached target position. """ command_send = CommandSend( pyvlx=self.pyvlx, wait_for_completion=wait_for_completion, node_id=self.node_id, parameter=position) await command_send.do_api_call() if not command_send.success: raise PyVLXException("Unable to send command") await self.after_update()
Set window to desired position. Parameters: * position: Position object containing the target position. * wait_for_completion: If set, function will return after device has reached target position.
def generate_ill_conditioned_dot_product(n, c, dps=100): """n ... length of vector c ... target condition number """ # Algorithm 6.1 from # # ACCURATE SUM AND DOT PRODUCT, # TAKESHI OGITA, SIEGFRIED M. RUMP, AND SHIN'ICHI OISHI. assert n >= 6 n2 = round(n / 2) x = numpy.zeros(n) y = numpy.zeros(n) b = math.log2(c) # vector of exponents between 0 and b/2: e = numpy.rint(numpy.random.rand(n2) * b / 2).astype(int) # make sure exponents b/2 and 0 actually occur in e # vectors x,y e[0] = round(b / 2) + 1 e[-1] = 0 # generate first half of vectors x, y rx, ry = numpy.random.rand(2, n2) x[:n2] = (2 * rx - 1) * 2 ** e y[:n2] = (2 * ry - 1) * 2 ** e def dot_exact(x, y): mp.dps = dps # convert to list first, see # <https://github.com/fredrik-johansson/mpmath/pull/385> return mp.fdot(x.tolist(), y.tolist()) # for i=n2+1:n and v=1:i, # generate x_i, y_i such that (*) x(v)’*y(v) ~ 2^e(i-n2) # generate exponents for second half e = numpy.rint(numpy.linspace(b / 2, 0, n - n2)).astype(int) rx, ry = numpy.random.rand(2, n2) for i in range(n2, n): # x_i random with generated exponent x[i] = (2 * rx[i - n2] - 1) * 2 ** e[i - n2] # y_i according to (*) y[i] = ( (2 * ry[i - n2] - 1) * 2 ** e[i - n2] - dot_exact(x[: i + 1], y[: i + 1]) ) / x[i] x, y = numpy.random.permutation((x, y)) # the true dot product rounded to nearest floating point d = dot_exact(x, y) # the actual condition number C = 2 * dot_exact(abs(x), abs(y)) / abs(d) return x, y, d, C
n ... length of vector c ... target condition number
def get_partition_by_name(self, db_name, tbl_name, part_name): """ Parameters: - db_name - tbl_name - part_name """ self.send_get_partition_by_name(db_name, tbl_name, part_name) return self.recv_get_partition_by_name()
Parameters: - db_name - tbl_name - part_name
def changed(self, name): """Returns true if the parameter with the specified name has its value changed by the *first* module procedure in the interface. :arg name: the name of the parameter to check changed status for. """ if self.first: return self.first.changed(name) else: return False
Returns true if the parameter with the specified name has its value changed by the *first* module procedure in the interface. :arg name: the name of the parameter to check changed status for.
def env(self, **kw): ''' Allows adding/overriding env vars in the execution context. :param kw: Key-value pairs :return: self ''' self._original_env = kw if self._env is None: self._env = dict(os.environ) self._env.update({k: unicode(v) for k, v in kw.iteritems()}) return self
Allows adding/overriding env vars in the execution context. :param kw: Key-value pairs :return: self