code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _initalize_tree(self, position, momentum, slice_var, stepsize): """ Initalizes root node of the tree, i.e depth = 0 """ position_bar, momentum_bar, _ = self.simulate_dynamics(self.model, position, momentum, stepsize, self.grad_log_pdf).get_proposed_values() _, logp_bar = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf() hamiltonian = logp_bar - 0.5 * np.dot(momentum_bar, momentum_bar) candidate_set_size = slice_var < np.exp(hamiltonian) accept_set_bool = hamiltonian > np.log(slice_var) - 10000 # delta_max = 10000 return position_bar, momentum_bar, candidate_set_size, accept_set_bool
Initalizes root node of the tree, i.e depth = 0
def status_to_string(cls, status): """ Converts a message status to a string. :param status: Status to convert (p yqode.core.modes.CheckerMessages) :return: The status string. :rtype: str """ strings = {CheckerMessages.INFO: "Info", CheckerMessages.WARNING: "Warning", CheckerMessages.ERROR: "Error"} return strings[status]
Converts a message status to a string. :param status: Status to convert (p yqode.core.modes.CheckerMessages) :return: The status string. :rtype: str
def endpoint_update(auth=None, **kwargs): ''' Update an endpoint CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 interface=public enabled=False salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 region=newregion salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 service_name_or_id=glance url=https://example.org:9292 ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.update_endpoint(**kwargs)
Update an endpoint CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 interface=public enabled=False salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 region=newregion salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 service_name_or_id=glance url=https://example.org:9292
def get_gpio_mode(self, gpio_id): """ Return the gpio mode for gpio :gpio_id:. @gpio_id Character A or B. """ if not self._connected: return return self._protocol.status.get("OTGW_GPIO_{}".format(gpio_id))
Return the gpio mode for gpio :gpio_id:. @gpio_id Character A or B.
def run_flag_maf_zero(in_prefix, in_type, out_prefix, base_dir, options): """Runs step11 (flag MAF zero). :param in_prefix: the prefix of the input files. :param in_type: the type of the input files. :param out_prefix: the output prefix. :param base_dir: the output directory. :param options: the options needed. :type in_prefix: str :type in_type: str :type out_prefix: str :type base_dir: str :type options: list :returns: a tuple containing the prefix of the output files (the input prefix for the next script) and the type of the output files (``bfile``). This function calls the :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module. The required file type for this module is ``bfile``, hence the need to use the :py:func:`check_input_files` to check if the file input file type is the good one, or to create it if needed. .. note:: The :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module doesn't return usable output files. Hence, this function returns the input file prefix and its type. """ # Creating the output directory os.mkdir(out_prefix) # We know we need bfile required_type = "bfile" check_input_files(in_prefix, in_type, required_type) # We need to inject the name of the input file and the name of the output # prefix script_prefix = os.path.join(out_prefix, "flag_maf_0") options += ["--{}".format(required_type), in_prefix, "--out", script_prefix] # We run the script try: flag_maf_zero.main(options) except flag_maf_zero.ProgramError as e: msg = "flag_maf_zero: {}".format(e) raise ProgramError(msg) # Reading the file to compute the number of flagged markers nb_flagged = None flagged_fn = script_prefix + ".list" with open(flagged_fn, "r") as i_file: nb_flagged = len(i_file.read().splitlines()) # We write a LaTeX summary latex_file = os.path.join(script_prefix + ".summary.tex") try: with open(latex_file, "w") as o_file: print >>o_file, latex_template.subsection( flag_maf_zero.pretty_name ) safe_fn = latex_template.sanitize_tex(os.path.basename(flagged_fn)) text = ( "After computing minor allele frequencies (MAF) of all " "markers using Plink, a total of {:,d} marker{} had a MAF " "of zero and were flagged ({}).".format( nb_flagged, "s" if nb_flagged - 1 > 1 else "", "see file " + latex_template.texttt(safe_fn) + " for more information" ) ) print >>o_file, latex_template.wrap_lines(text) except IOError: msg = "{}: cannot write LaTeX summary".format(latex_file) raise ProgramError(msg) # Writing the summary results with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file: print >>o_file, "# {}".format(script_prefix) print >>o_file, ("Number of markers flagged for MAF of 0\t" "{:,d}".format(nb_flagged)) print >>o_file, "---" # We know this step doesn't produce an new data set, so we return the old # prefix and the old in_type return _StepResult( next_file=in_prefix, next_file_type=required_type, latex_summary=latex_file, description=flag_maf_zero.desc, long_description=flag_maf_zero.long_desc, graph_path=None, )
Runs step11 (flag MAF zero). :param in_prefix: the prefix of the input files. :param in_type: the type of the input files. :param out_prefix: the output prefix. :param base_dir: the output directory. :param options: the options needed. :type in_prefix: str :type in_type: str :type out_prefix: str :type base_dir: str :type options: list :returns: a tuple containing the prefix of the output files (the input prefix for the next script) and the type of the output files (``bfile``). This function calls the :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module. The required file type for this module is ``bfile``, hence the need to use the :py:func:`check_input_files` to check if the file input file type is the good one, or to create it if needed. .. note:: The :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module doesn't return usable output files. Hence, this function returns the input file prefix and its type.
def is_descendant_of_log(self, id_, log_id): """Tests if an ``Id`` is a descendant of a log. arg: id (osid.id.Id): an ``Id`` arg: log_id (osid.id.Id): the ``Id`` of a log return: (boolean) - ``true`` if the ``id`` is a descendant of the ``log_id,`` ``false`` otherwise raise: NotFound - ``log_id`` is not found raise: NullArgument - ``id`` or ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` is not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_descendant_of_bin if self._catalog_session is not None: return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=log_id) return self._hierarchy_session.is_descendant(id_=id_, descendant_id=log_id)
Tests if an ``Id`` is a descendant of a log. arg: id (osid.id.Id): an ``Id`` arg: log_id (osid.id.Id): the ``Id`` of a log return: (boolean) - ``true`` if the ``id`` is a descendant of the ``log_id,`` ``false`` otherwise raise: NotFound - ``log_id`` is not found raise: NullArgument - ``id`` or ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` is not found return ``false``.
def from_json(cls, json_info): """Build a Trial instance from a json string.""" if json_info is None: return None return TrialRecord( trial_id=json_info["trial_id"], job_id=json_info["job_id"], trial_status=json_info["status"], start_time=json_info["start_time"], params=json_info["params"])
Build a Trial instance from a json string.
def _add_metadata(item, metadata, remotes, only_metadata=False): """Add metadata information from CSV file to current item. Retrieves metadata based on 'description' parsed from input CSV file. Adds to object and handles special keys: - `description`: A new description for the item. Used to relabel items based on the pre-determined description from fastq name or BAM read groups. - Keys matching supported names in the algorithm section map to key/value pairs there instead of metadata. """ for check_key in [item["description"]] + _get_file_keys(item) + _get_vrn_keys(item): item_md = metadata.get(check_key) if item_md: break if not item_md: item_md = _find_glob_metadata(item["files"], metadata) if remotes.get("region"): item["algorithm"]["variant_regions"] = remotes["region"] TOP_LEVEL = set(["description", "genome_build", "lane", "vrn_file", "files", "analysis"]) keep_sample = True if item_md and len(item_md) > 0: if "metadata" not in item: item["metadata"] = {} for k, v in item_md.items(): if v: if k in TOP_LEVEL: item[k] = v elif k in run_info.ALGORITHM_KEYS: v = _handle_special_yaml_cases(v) item["algorithm"][k] = v else: v = _handle_special_yaml_cases(v) item["metadata"][k] = v elif len(metadata) > 0: warn = "Dropped sample" if only_metadata else "Added minimal sample information" print("WARNING: %s: metadata not found for %s, %s" % (warn, item["description"], [os.path.basename(f) for f in item["files"]])) keep_sample = not only_metadata if tz.get_in(["metadata", "ped"], item): item["metadata"] = _add_ped_metadata(item["description"], item["metadata"]) return item if keep_sample else None
Add metadata information from CSV file to current item. Retrieves metadata based on 'description' parsed from input CSV file. Adds to object and handles special keys: - `description`: A new description for the item. Used to relabel items based on the pre-determined description from fastq name or BAM read groups. - Keys matching supported names in the algorithm section map to key/value pairs there instead of metadata.
def gen_tensor_data(): """ toy interaction data """ X, y = toy_interaction(return_X_y=True, n=10000) gam = LinearGAM(te(0, 1,lam=0.1)).fit(X, y) XX = gam.generate_X_grid(term=0, meshgrid=True) Z = gam.partial_dependence(term=0, meshgrid=True) fig = plt.figure(figsize=(9,6)) ax = plt.axes(projection='3d') ax.dist = 7.5 ax.plot_surface(XX[0], XX[1], Z, cmap='viridis') ax.set_axis_off() fig.tight_layout() plt.savefig('imgs/pygam_tensor.png', transparent=True, dpi=300)
toy interaction data
def plot_ecg_grids(ecg_grids, fs, units, time_units, axes): "Add ecg grids to the axes" if ecg_grids == 'all': ecg_grids = range(0, len(axes)) for ch in ecg_grids: # Get the initial plot limits auto_xlims = axes[ch].get_xlim() auto_ylims= axes[ch].get_ylim() (major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y) = calc_ecg_grids(auto_ylims[0], auto_ylims[1], units[ch], fs, auto_xlims[1], time_units) min_x, max_x = np.min(minor_ticks_x), np.max(minor_ticks_x) min_y, max_y = np.min(minor_ticks_y), np.max(minor_ticks_y) for tick in minor_ticks_x: axes[ch].plot([tick, tick], [min_y, max_y], c='#ededed', marker='|', zorder=1) for tick in major_ticks_x: axes[ch].plot([tick, tick], [min_y, max_y], c='#bababa', marker='|', zorder=2) for tick in minor_ticks_y: axes[ch].plot([min_x, max_x], [tick, tick], c='#ededed', marker='_', zorder=1) for tick in major_ticks_y: axes[ch].plot([min_x, max_x], [tick, tick], c='#bababa', marker='_', zorder=2) # Plotting the lines changes the graph. Set the limits back axes[ch].set_xlim(auto_xlims) axes[ch].set_ylim(auto_ylims)
Add ecg grids to the axes
def _get_video(edx_video_id): """ Get a Video instance, prefetching encoded video and course information. Raises ValVideoNotFoundError if the video cannot be retrieved. """ try: return Video.objects.prefetch_related("encoded_videos", "courses").get(edx_video_id=edx_video_id) except Video.DoesNotExist: error_message = u"Video not found for edx_video_id: {0}".format(edx_video_id) raise ValVideoNotFoundError(error_message) except Exception: error_message = u"Could not get edx_video_id: {0}".format(edx_video_id) logger.exception(error_message) raise ValInternalError(error_message)
Get a Video instance, prefetching encoded video and course information. Raises ValVideoNotFoundError if the video cannot be retrieved.
def cross_list_section(self, id, new_course_id): """ Cross-list a Section. Move the Section to another course. The new course may be in a different account (department), but must belong to the same root account (institution). """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - PATH - new_course_id """ID""" path["new_course_id"] = new_course_id self.logger.debug("POST /api/v1/sections/{id}/crosslist/{new_course_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/sections/{id}/crosslist/{new_course_id}".format(**path), data=data, params=params, single_item=True)
Cross-list a Section. Move the Section to another course. The new course may be in a different account (department), but must belong to the same root account (institution).
def load(filename): """"Load yaml file with specific include loader.""" if os.path.isfile(filename): with open(filename) as handle: return yaml_load(handle, Loader=Loader) # nosec raise RuntimeError("File %s doesn't exist!" % filename)
Load yaml file with specific include loader.
def unquote (s, matching=False): """Remove leading and ending single and double quotes. The quotes need to match if matching is True. Only one quote from each end will be stripped. @return: if s evaluates to False, return s as is, else return string with stripped quotes @rtype: unquoted string, or s unchanged if it is evaluting to False """ if not s: return s if len(s) < 2: return s if matching: if s[0] in ("\"'") and s[0] == s[-1]: s = s[1:-1] else: if s[0] in ("\"'"): s = s[1:] if s[-1] in ("\"'"): s = s[:-1] return s
Remove leading and ending single and double quotes. The quotes need to match if matching is True. Only one quote from each end will be stripped. @return: if s evaluates to False, return s as is, else return string with stripped quotes @rtype: unquoted string, or s unchanged if it is evaluting to False
def _error(self, exc_info): """ Retrieves the error info """ if self.exc_info: if self.traceback: return exc_info return exc_info[:2] return exc_info[1]
Retrieves the error info
def get_apis(self): """Returns set of api names referenced in this Registry :return: set of api name strings """ out = set(x.api for x in self.types.values() if x.api) for ft in self.features.values(): out.update(ft.get_apis()) for ext in self.extensions.values(): out.update(ext.get_apis()) return out
Returns set of api names referenced in this Registry :return: set of api name strings
def list_all_return_line_items(cls, **kwargs): """List ReturnLineItems Return a list of ReturnLineItems This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_return_line_items(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[ReturnLineItem] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_return_line_items_with_http_info(**kwargs) else: (data) = cls._list_all_return_line_items_with_http_info(**kwargs) return data
List ReturnLineItems Return a list of ReturnLineItems This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_return_line_items(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[ReturnLineItem] If the method is called asynchronously, returns the request thread.
def rpc(commands, method='cli', **kwargs): ''' Execute an arbitrary RPC request via the Nexus API. commands The commands to be executed. method: ``cli`` The type of the response, i.e., raw text (``cli_ascii``) or structured document (``cli``). Defaults to ``cli`` (structured data). transport: ``https`` Specifies the type of connection transport to use. Valid values for the connection are ``http``, and ``https``. host: ``localhost`` The IP address or DNS host name of the connection device. username: ``admin`` The username to pass to the device to authenticate the NX-API connection. password The password to pass to the device to authenticate the NX-API connection. port The TCP port of the endpoint for the NX-API connection. If this keyword is not specified, the default value is automatically determined by the transport type (``80`` for ``http``, or ``443`` for ``https``). timeout: ``60`` Time in seconds to wait for the device to respond. Default: 60 seconds. verify: ``True`` Either a boolean, in which case it controls whether we verify the NX-API TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. CLI Example: .. code-block:: bash salt-call --local nxps_api.rpc 'show version' ''' nxos_api_kwargs = __salt__['config.get']('nxos_api', {}) nxos_api_kwargs.update(**kwargs) if 'nxos_api.rpc' in __proxy__ and __salt__['config.get']('proxy:proxytype') == 'nxos_api': # If the nxos_api.rpc Proxy function is available and currently running # in a nxos_api Proxy Minion return __proxy__['nxos_api.rpc'](commands, method=method, **nxos_api_kwargs) nxos_api_kwargs = __salt__['config.get']('nxos_api', {}) nxos_api_kwargs.update(**kwargs) return __utils__['nxos_api.rpc'](commands, method=method, **nxos_api_kwargs)
Execute an arbitrary RPC request via the Nexus API. commands The commands to be executed. method: ``cli`` The type of the response, i.e., raw text (``cli_ascii``) or structured document (``cli``). Defaults to ``cli`` (structured data). transport: ``https`` Specifies the type of connection transport to use. Valid values for the connection are ``http``, and ``https``. host: ``localhost`` The IP address or DNS host name of the connection device. username: ``admin`` The username to pass to the device to authenticate the NX-API connection. password The password to pass to the device to authenticate the NX-API connection. port The TCP port of the endpoint for the NX-API connection. If this keyword is not specified, the default value is automatically determined by the transport type (``80`` for ``http``, or ``443`` for ``https``). timeout: ``60`` Time in seconds to wait for the device to respond. Default: 60 seconds. verify: ``True`` Either a boolean, in which case it controls whether we verify the NX-API TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. CLI Example: .. code-block:: bash salt-call --local nxps_api.rpc 'show version'
def check_conf_enabled(conf): ''' .. versionadded:: 2016.3.0 Checks to see if the specific conf symlink is in /etc/apache2/conf-enabled. This will only be functional on Debian-based operating systems (Ubuntu, Mint, etc). CLI Examples: .. code-block:: bash salt '*' apache.check_conf_enabled security salt '*' apache.check_conf_enabled security.conf ''' if conf.endswith('.conf'): conf_file = conf else: conf_file = '{0}.conf'.format(conf) return os.path.islink('/etc/apache2/conf-enabled/{0}'.format(conf_file))
.. versionadded:: 2016.3.0 Checks to see if the specific conf symlink is in /etc/apache2/conf-enabled. This will only be functional on Debian-based operating systems (Ubuntu, Mint, etc). CLI Examples: .. code-block:: bash salt '*' apache.check_conf_enabled security salt '*' apache.check_conf_enabled security.conf
def cashFlowDF(symbol, token='', version=''): '''Pulls cash flow data. Available quarterly (4 quarters) or annually (4 years). https://iexcloud.io/docs/api/#cash-flow Updates at 8am, 9am UTC daily Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result ''' val = cashFlow(symbol, token, version) df = pd.io.json.json_normalize(val, 'cashflow', 'symbol') _toDatetime(df) _reindex(df, 'reportDate') df.replace(to_replace=[None], value=np.nan, inplace=True) return df
Pulls cash flow data. Available quarterly (4 quarters) or annually (4 years). https://iexcloud.io/docs/api/#cash-flow Updates at 8am, 9am UTC daily Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
def stop(self): """ Clears all members """ # Exit the loop with self._lock: self._stop_event.set() self._shell_event.clear() if self._context is not None: # Unregister from events self._context.remove_service_listener(self) # Release the shell self.clear_shell() self._context = None
Clears all members
def getItemXML(): ''' Build an XML string that contains some randomly positioned goal items''' xml="" for item in range(NUM_GOALS): x = str(random.randint(old_div(-ARENA_WIDTH,2),old_div(ARENA_WIDTH,2))) z = str(random.randint(old_div(-ARENA_BREADTH,2),old_div(ARENA_BREADTH,2))) xml += '''<DrawItem x="''' + x + '''" y="210" z="''' + z + '''" type="''' + GOAL_TYPE + '''"/>''' return xml
Build an XML string that contains some randomly positioned goal items
def get_summaries(client, filter=None): """ Generate presentation summaries in a reverse chronological order. A filter class can be supplied to filter summaries or bound the fetching process. """ try: index = 0 while True: rb = _RightBarPage(client, index) summaries = rb.summaries() if filter is not None: summaries = filter.filter(summaries) for summary in summaries: yield summary index += len(summaries) except StopIteration: pass
Generate presentation summaries in a reverse chronological order. A filter class can be supplied to filter summaries or bound the fetching process.
def gmdaOnes(shape, dtype, mask=None, numGhosts=1): """ ghosted distributed array one constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0) """ res = GhostedMaskedDistArray(shape, dtype) res.mask = mask res.setNumberOfGhosts(numGhosts) res[:] = 1 return res
ghosted distributed array one constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0)
def read_ipv6_route(self, length, extension): """Read Routing Header for IPv6. Structure of IPv6-Route header [RFC 8200][RFC 5095]: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Hdr Ext Len | Routing Type | Segments Left | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | . . . type-specific data . . . | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 route.next Next Header 1 8 route.length Header Extensive Length 2 16 route.type Routing Type 3 24 route.seg_left Segments Left 4 32 route.data Type-Specific Data """ if length is None: length = len(self) _next = self._read_protos(1) _hlen = self._read_unpack(1) _type = self._read_unpack(1) _left = self._read_unpack(1) ipv6_route = dict( next=_next, length=(_hlen + 1) * 8, type=_ROUTING_TYPE.get(_type, 'Unassigned'), seg_left=_left, ) _dlen = _hlen * 8 - 4 if _dlen: _func = _ROUTE_PROC.get(_type, 'none') _data = eval(f'self._read_data_type_{_func}')(_dlen) ipv6_route.update(_data) length -= ipv6_route['length'] ipv6_route['packet'] = self._read_packet(header=ipv6_route['length'], payload=length) if extension: self._protos = None return ipv6_route return self._decode_next_layer(ipv6_route, _next, length)
Read Routing Header for IPv6. Structure of IPv6-Route header [RFC 8200][RFC 5095]: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Hdr Ext Len | Routing Type | Segments Left | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | . . . type-specific data . . . | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 route.next Next Header 1 8 route.length Header Extensive Length 2 16 route.type Routing Type 3 24 route.seg_left Segments Left 4 32 route.data Type-Specific Data
def get_event_type(self, event_type): """GetEventType. [Preview API] Get a specific event type. :param str event_type: :rtype: :class:`<NotificationEventType> <azure.devops.v5_0.notification.models.NotificationEventType>` """ route_values = {} if event_type is not None: route_values['eventType'] = self._serialize.url('event_type', event_type, 'str') response = self._send(http_method='GET', location_id='cc84fb5f-6247-4c7a-aeae-e5a3c3fddb21', version='5.0-preview.1', route_values=route_values) return self._deserialize('NotificationEventType', response)
GetEventType. [Preview API] Get a specific event type. :param str event_type: :rtype: :class:`<NotificationEventType> <azure.devops.v5_0.notification.models.NotificationEventType>`
def contents(self, path, ref=None): """Get the contents of the file pointed to by ``path``. If the path provided is actually a directory, you will receive a dictionary back of the form:: { 'filename.md': Contents(), # Where Contents an instance 'github.py': Contents(), } :param str path: (required), path to file, e.g. github3/repo.py :param str ref: (optional), the string name of a commit/branch/tag. Default: master :returns: :class:`Contents <github3.repos.contents.Contents>` or dict if successful, else None """ url = self._build_url('contents', path, base_url=self._api) json = self._json(self._get(url, params={'ref': ref}), 200) if isinstance(json, dict): return Contents(json, self) elif isinstance(json, list): return dict((j.get('name'), Contents(j, self)) for j in json) return None
Get the contents of the file pointed to by ``path``. If the path provided is actually a directory, you will receive a dictionary back of the form:: { 'filename.md': Contents(), # Where Contents an instance 'github.py': Contents(), } :param str path: (required), path to file, e.g. github3/repo.py :param str ref: (optional), the string name of a commit/branch/tag. Default: master :returns: :class:`Contents <github3.repos.contents.Contents>` or dict if successful, else None
def frame2expnum(frameid): """Given a standard OSSOS frameid return the expnum, version and ccdnum as a dictionary.""" result = {} parts = re.search('(?P<expnum>\d{7})(?P<type>\S)(?P<ccd>\d\d)', frameid) assert parts is not None result['expnum'] = parts.group('expnum') result['ccd'] = parts.group('ccd') result['version'] = parts.group('type') return result
Given a standard OSSOS frameid return the expnum, version and ccdnum as a dictionary.
def ls(self, startswith=None): ''' List all cubes available to the calling client. :param startswith: string to use in a simple "startswith" query filter :returns list: sorted list of cube names ''' logger.info('Listing cubes starting with "%s")' % startswith) startswith = unicode(startswith or '') tables = sorted(name for name in self.db_tables if name.startswith(startswith)) return tables
List all cubes available to the calling client. :param startswith: string to use in a simple "startswith" query filter :returns list: sorted list of cube names
def facade(factory): """Declare a method as a facade factory.""" wrapper = FacadeDescriptor(factory.__name__, factory) return update_wrapper(wrapper, factory)
Declare a method as a facade factory.
def gaussian_convolve (maj1, min1, pa1, maj2, min2, pa2): """Convolve two Gaussians analytically. Given the shapes of two 2-dimensional Gaussians, this function returns the shape of their convolution. Arguments: maj1 Major axis of input Gaussian 1. min1 Minor axis of input Gaussian 1. pa1 Orientation angle of input Gaussian 1, in radians. maj2 Major axis of input Gaussian 2. min2 Minor axis of input Gaussian 2. pa2 Orientation angle of input Gaussian 2, in radians. The return value is ``(maj3, min3, pa3)``, with the same format as the input arguments. The axes can be measured in any units, so long as they're consistent. Implementation copied from MIRIAD’s ``gaufac``. """ c1 = np.cos (pa1) s1 = np.sin (pa1) c2 = np.cos (pa2) s2 = np.sin (pa2) a = (maj1*c1)**2 + (min1*s1)**2 + (maj2*c2)**2 + (min2*s2)**2 b = (maj1*s1)**2 + (min1*c1)**2 + (maj2*s2)**2 + (min2*c2)**2 g = 2 * ((min1**2 - maj1**2) * s1 * c1 + (min2**2 - maj2**2) * s2 * c2) s = a + b t = np.sqrt ((a - b)**2 + g**2) maj3 = np.sqrt (0.5 * (s + t)) min3 = np.sqrt (0.5 * (s - t)) if abs (g) + abs (a - b) == 0: pa3 = 0. else: pa3 = 0.5 * np.arctan2 (-g, a - b) # "Amplitude of the resulting Gaussian": # f = pi / (4 * np.log (2)) * maj1 * min1 * maj2 * min2 \ # / np.sqrt (a * b - 0.25 * g**2) return maj3, min3, pa3
Convolve two Gaussians analytically. Given the shapes of two 2-dimensional Gaussians, this function returns the shape of their convolution. Arguments: maj1 Major axis of input Gaussian 1. min1 Minor axis of input Gaussian 1. pa1 Orientation angle of input Gaussian 1, in radians. maj2 Major axis of input Gaussian 2. min2 Minor axis of input Gaussian 2. pa2 Orientation angle of input Gaussian 2, in radians. The return value is ``(maj3, min3, pa3)``, with the same format as the input arguments. The axes can be measured in any units, so long as they're consistent. Implementation copied from MIRIAD’s ``gaufac``.
def _get_goslimids_norel(self, dagslim): """Get all GO slim GO IDs that do not have a relationship.""" go_slims = set() go2obj = self.gosubdag.go2obj for goid in dagslim: goobj = go2obj[goid] if not goobj.relationship: go_slims.add(goobj.id) return go_slims
Get all GO slim GO IDs that do not have a relationship.
def create_guest_screen_info(self, display, status, primary, change_origin, origin_x, origin_y, width, height, bits_per_pixel): """Make a IGuestScreenInfo object with the provided parameters. in display of type int The number of the guest display. in status of type :class:`GuestMonitorStatus` @c True, if this guest screen is enabled, @c False otherwise. in primary of type bool Whether this guest monitor must be primary. in change_origin of type bool @c True, if the origin of the guest screen should be changed, @c False otherwise. in origin_x of type int The X origin of the guest screen. in origin_y of type int The Y origin of the guest screen. in width of type int The width of the guest screen. in height of type int The height of the guest screen. in bits_per_pixel of type int The number of bits per pixel of the guest screen. return guest_screen_info of type :class:`IGuestScreenInfo` The created object. """ if not isinstance(display, baseinteger): raise TypeError("display can only be an instance of type baseinteger") if not isinstance(status, GuestMonitorStatus): raise TypeError("status can only be an instance of type GuestMonitorStatus") if not isinstance(primary, bool): raise TypeError("primary can only be an instance of type bool") if not isinstance(change_origin, bool): raise TypeError("change_origin can only be an instance of type bool") if not isinstance(origin_x, baseinteger): raise TypeError("origin_x can only be an instance of type baseinteger") if not isinstance(origin_y, baseinteger): raise TypeError("origin_y can only be an instance of type baseinteger") if not isinstance(width, baseinteger): raise TypeError("width can only be an instance of type baseinteger") if not isinstance(height, baseinteger): raise TypeError("height can only be an instance of type baseinteger") if not isinstance(bits_per_pixel, baseinteger): raise TypeError("bits_per_pixel can only be an instance of type baseinteger") guest_screen_info = self._call("createGuestScreenInfo", in_p=[display, status, primary, change_origin, origin_x, origin_y, width, height, bits_per_pixel]) guest_screen_info = IGuestScreenInfo(guest_screen_info) return guest_screen_info
Make a IGuestScreenInfo object with the provided parameters. in display of type int The number of the guest display. in status of type :class:`GuestMonitorStatus` @c True, if this guest screen is enabled, @c False otherwise. in primary of type bool Whether this guest monitor must be primary. in change_origin of type bool @c True, if the origin of the guest screen should be changed, @c False otherwise. in origin_x of type int The X origin of the guest screen. in origin_y of type int The Y origin of the guest screen. in width of type int The width of the guest screen. in height of type int The height of the guest screen. in bits_per_pixel of type int The number of bits per pixel of the guest screen. return guest_screen_info of type :class:`IGuestScreenInfo` The created object.
def flip(self): ''' :returns: None Swaps the positions of A and B. ''' tmp = self.A.xyz self.A = self.B self.B = tmp
:returns: None Swaps the positions of A and B.
def _get_argument(self, argument_node): """ Returns a FritzActionArgument instance for the given argument_node. """ argument = FritzActionArgument() argument.name = argument_node.find(self.nodename('name')).text argument.direction = argument_node.find(self.nodename('direction')).text rsv = argument_node.find(self.nodename('relatedStateVariable')).text # TODO: track malformed xml-nodes (i.e. misspelled) argument.data_type = self.state_variables.get(rsv, None) return argument
Returns a FritzActionArgument instance for the given argument_node.
def cublasSdgmm(handle, mode, m, n, A, lda, x, incx, C, ldc): """ Matrix-diagonal matrix product for real general matrix. """ status = _libcublas.cublasSdgmm(handle, _CUBLAS_SIDE[mode], m, n, int(A), lda, int(x), incx, int(C), ldc) cublasCheckStatus(status)
Matrix-diagonal matrix product for real general matrix.
def get_braintree_gateway_by_id(cls, braintree_gateway_id, **kwargs): """Find BraintreeGateway Return single instance of BraintreeGateway by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_braintree_gateway_by_id(braintree_gateway_id, async=True) >>> result = thread.get() :param async bool :param str braintree_gateway_id: ID of braintreeGateway to return (required) :return: BraintreeGateway If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs) else: (data) = cls._get_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs) return data
Find BraintreeGateway Return single instance of BraintreeGateway by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_braintree_gateway_by_id(braintree_gateway_id, async=True) >>> result = thread.get() :param async bool :param str braintree_gateway_id: ID of braintreeGateway to return (required) :return: BraintreeGateway If the method is called asynchronously, returns the request thread.
def graph(data): """Draws graph of rating vs episode number""" title = data['name'] + ' (' + data['rating'] + ') ' plt.title(title) plt.xlabel('Episode Number') plt.ylabel('Ratings') rf,ef=graphdata(data) col=['red', 'green' , 'orange'] for i in range(len(rf)): x,y=ef[i],rf[i] k = i + 1 plt.plot(x, y,color=col[i%3]) x1, x2, y1, y2 = plt.axis() y2 = 10 if y1 > 7: y1 = 7 plt.axis([x1, x2, y1, y2]) plt.show()
Draws graph of rating vs episode number
def format_BLB(): """Sets some formatting options in Matplotlib.""" rc("figure", facecolor="white") rc('font', family = 'serif', size=10) #, serif = 'cmr10') rc('xtick', labelsize=10) rc('ytick', labelsize=10) rc('axes', linewidth=1) rc('xtick.major', size=4, width=1) rc('xtick.minor', size=2, width=1) rc('ytick.major', size=4, width=1) rc('ytick.minor', size=2, width=1)
Sets some formatting options in Matplotlib.
def insert(self, table, insert_obj, ignore=True): """[insert bulk data] Arguments: table {[DeclarativeMeta cls]} -- [reflection of table] insert_obj {[pd.DataFrame or list of dicts]} -- [insert_obj] Keyword Arguments: ignore {bool} -- [wether ignore exception or not] (default: {True}) Raises: ValueError -- [f"The {reprlib.repr(insert_obj)} must be list of dicts type!"] Returns: [type] -- [description] """ if isinstance(insert_obj, pd.DataFrame): if insert_obj.empty: raise ValueError('The input DataFrame is empty, please check!') insert_obj = insert_obj.to_dict(orient='records') elif not isinstance(insert_obj, list): raise ValueError( f"The {reprlib.repr(insert_obj)} must be list of dicts type!") ignore_str = 'IGNORE' if ignore else '' return self._session.execute( table.__table__.insert().prefix_with(ignore_str), insert_obj)
[insert bulk data] Arguments: table {[DeclarativeMeta cls]} -- [reflection of table] insert_obj {[pd.DataFrame or list of dicts]} -- [insert_obj] Keyword Arguments: ignore {bool} -- [wether ignore exception or not] (default: {True}) Raises: ValueError -- [f"The {reprlib.repr(insert_obj)} must be list of dicts type!"] Returns: [type] -- [description]
def _params_extend(params, _ignore_name=False, **kwargs): ''' Extends the params dictionary by values from keyword arguments. .. versionadded:: 2016.3.0 :param params: Dictionary with parameters for zabbix API. :param _ignore_name: Salt State module is passing first line as 'name' parameter. If API uses optional parameter 'name' (for ex. host_create, user_create method), please use 'visible_name' or 'firstname' instead of 'name' to not mess these values. :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: Extended params dictionary with parameters. ''' # extend params value by optional zabbix API parameters for key in kwargs: if not key.startswith('_'): params.setdefault(key, kwargs[key]) # ignore name parameter passed from Salt state module, use firstname or visible_name instead if _ignore_name: params.pop('name', None) if 'firstname' in params: params['name'] = params.pop('firstname') elif 'visible_name' in params: params['name'] = params.pop('visible_name') return params
Extends the params dictionary by values from keyword arguments. .. versionadded:: 2016.3.0 :param params: Dictionary with parameters for zabbix API. :param _ignore_name: Salt State module is passing first line as 'name' parameter. If API uses optional parameter 'name' (for ex. host_create, user_create method), please use 'visible_name' or 'firstname' instead of 'name' to not mess these values. :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: Extended params dictionary with parameters.
def _generate(self, message): """Given a message in message, return a response in the appropriate format.""" raw_params = {"INPUT_TEXT" : message.encode('UTF8'), "INPUT_TYPE" : self.input_type, "OUTPUT_TYPE" : self.output_type, "LOCALE" : self._locale, "AUDIO" : self.audio, "VOICE" : self._voice, } params = urlencode(raw_params) headers = {} logging.debug('maryclient: generate, raw_params=%s' % repr(raw_params)) # Open connection to self._host, self._port. conn = httplib.HTTPConnection(self._host, self._port) #conn.set_debuglevel(5) conn.request("POST", "/process", params, headers) response = conn.getresponse() if response.status != 200: logging.error(response.getheaders()) raise Exception ("{0}: {1}".format(response.status, response.reason)) return response.read()
Given a message in message, return a response in the appropriate format.
def get_provider(): """Return an instance of the BLE provider for the current platform.""" global _provider # Set the provider based on the current platform. if _provider is None: if sys.platform.startswith('linux'): # Linux platform from .bluez_dbus.provider import BluezProvider _provider = BluezProvider() elif sys.platform == 'darwin': # Mac OSX platform from .corebluetooth.provider import CoreBluetoothProvider _provider = CoreBluetoothProvider() else: # Unsupported platform raise RuntimeError('Sorry the {0} platform is not supported by the BLE library!'.format(sys.platform)) return _provider
Return an instance of the BLE provider for the current platform.
def add_file_normal(f, targetdir, generator,script, source): """ Add a normal file including its source """ basename = os.path.basename(f) if targetdir != ".": relativepath = os.path.join(targetdir, basename) else: relativepath = basename relpath = os.path.relpath(f, os.getcwd()) filetype = 'data' if script: filetype = 'script' if generator: filetype = 'generator' update = OrderedDict([ ('type', filetype), ('generator', generator), ('relativepath', relativepath), ('content', ""), ('source', source), ('localfullpath', f), ('localrelativepath', relpath) ]) update = annotate_record(update) return (basename, update)
Add a normal file including its source
def files(self, *, bundle: str=None, tags: List[str]=None, version: int=None, path: str=None) -> models.File: """Fetch files from the store.""" query = self.File.query if bundle: query = (query.join(self.File.version, self.Version.bundle) .filter(self.Bundle.name == bundle)) if tags: # require records to match ALL tags query = ( query.join(self.File.tags) .filter(self.Tag.name.in_(tags)) .group_by(models.File.id) .having(func.count(models.Tag.name) == len(tags)) ) if version: query = query.join(self.File.version).filter(self.Version.id == version) if path: query = query.filter_by(path=path) return query
Fetch files from the store.
def any_unique(keys, axis=semantics.axis_default): """returns true if any of the keys is unique""" index = as_index(keys, axis) return np.any(index.count == 1)
returns true if any of the keys is unique
def json_changepass(self): ''' The first char of 'code' stands for the different field. '1' for user_name '2' for user_email '3' for user_pass '4' for user_role The seconde char of 'code' stands for different status. '1' for invalide '2' for already exists. ''' # user_create_status = {'success': False, 'code': '00'} # Not used currently. post_data = self.get_post_data() check_usr_status = MUser.check_user(self.userinfo.uid, post_data['rawpass']) if check_usr_status == 1: user_create_status = self.__check_valid_pass(post_data) if not user_create_status['success']: return json.dump(user_create_status, self) form_pass = SumFormPass(self.request.arguments) if form_pass.validate(): MUser.update_pass(self.userinfo.uid, post_data['user_pass']) return json.dump(user_create_status, self) return json.dump(user_create_status, self) return False
The first char of 'code' stands for the different field. '1' for user_name '2' for user_email '3' for user_pass '4' for user_role The seconde char of 'code' stands for different status. '1' for invalide '2' for already exists.
def dataframe(self): """ Returns a pandas DataFrame where each row is a representation of the Game class. Rows are indexed by the boxscore string. """ frames = [] for game in self.__iter__(): df = game.dataframe if df is not None: frames.append(df) if frames == []: return None return pd.concat(frames)
Returns a pandas DataFrame where each row is a representation of the Game class. Rows are indexed by the boxscore string.
def get_devices(device_type: DeviceType) -> Iterator[str]: """Gets names of power devices of the specified type. :param str device_type: the type of the devices to retrieve :return: the device names :rtype: Iterator[str] """ for device in BASE_PATH.iterdir(): with open(str(Path(device, 'type'))) as type_file: if type_file.readline().strip() == device_type.value: yield device.name
Gets names of power devices of the specified type. :param str device_type: the type of the devices to retrieve :return: the device names :rtype: Iterator[str]
def expand_requirement(request, paths=None): """Expands a requirement string like 'python-2.*', 'foo-2.*+<*', etc. Wildcards are expanded to the latest version that matches. There is also a special wildcard '**' that will expand to the full version, but it cannot be used in combination with '*'. Wildcards MUST placehold a whole version token, not partial - while 'foo-2.*' is valid, 'foo-2.v*' is not. Wildcards MUST appear at the end of version numbers - while 'foo-1.*.*' is valid, 'foo-1.*.0' is not. It is possible that an expansion will result in an invalid request string (such as 'foo-2+<2'). The appropriate exception will be raised if this happens. Examples: >>> print expand_requirement('python-2.*') python-2.7 >>> print expand_requirement('python==2.**') python==2.7.12 >>> print expand_requirement('python<**') python<3.0.5 Args: request (str): Request to expand, eg 'python-2.*' paths (list of str, optional): paths to search for package families, defaults to `config.packages_path`. Returns: str: Expanded request string. """ if '*' not in request: return request from rez.vendor.version.version import VersionRange from rez.vendor.version.requirement import Requirement from rez.packages_ import get_latest_package from uuid import uuid4 wildcard_map = {} expanded_versions = {} request_ = request # replace wildcards with valid version tokens that can be replaced again # afterwards. This produces a horrendous, but both valid and temporary, # version string. # while "**" in request_: uid = "_%s_" % uuid4().hex request_ = request_.replace("**", uid, 1) wildcard_map[uid] = "**" while '*' in request_: uid = "_%s_" % uuid4().hex request_ = request_.replace('*', uid, 1) wildcard_map[uid] = '*' # create the requirement, then expand wildcards # req = Requirement(request_, invalid_bound_error=False) def expand_version(version): rank = len(version) wildcard_found = False while version and str(version[-1]) in wildcard_map: token = wildcard_map[str(version[-1])] version = version.trim(len(version) - 1) if token == "**": if wildcard_found: # catches bad syntax '**.*' return None else: wildcard_found = True rank = 0 break wildcard_found = True if not wildcard_found: return None range_ = VersionRange(str(version)) package = get_latest_package(name=req.name, range_=range_, paths=paths) if package is None: return version if rank: return package.version.trim(rank) else: return package.version def visit_version(version): # requirements like 'foo-1' are actually represented internally as # 'foo-1+<1_' - '1_' is the next possible version after '1'. So we have # to detect this case and remap the uid-ified wildcard back here too. # for v, expanded_v in expanded_versions.iteritems(): if version == v.next(): return expanded_v.next() version_ = expand_version(version) if version_ is None: return None expanded_versions[version] = version_ return version_ if req.range_ is not None: req.range_.visit_versions(visit_version) result = str(req) # do some cleanup so that long uids aren't left in invalid wildcarded strings for uid, token in wildcard_map.iteritems(): result = result.replace(uid, token) # cast back to a Requirement again, then back to a string. This will catch # bad verison ranges, but will also put OR'd version ranges into the correct # order expanded_req = Requirement(result) return str(expanded_req)
Expands a requirement string like 'python-2.*', 'foo-2.*+<*', etc. Wildcards are expanded to the latest version that matches. There is also a special wildcard '**' that will expand to the full version, but it cannot be used in combination with '*'. Wildcards MUST placehold a whole version token, not partial - while 'foo-2.*' is valid, 'foo-2.v*' is not. Wildcards MUST appear at the end of version numbers - while 'foo-1.*.*' is valid, 'foo-1.*.0' is not. It is possible that an expansion will result in an invalid request string (such as 'foo-2+<2'). The appropriate exception will be raised if this happens. Examples: >>> print expand_requirement('python-2.*') python-2.7 >>> print expand_requirement('python==2.**') python==2.7.12 >>> print expand_requirement('python<**') python<3.0.5 Args: request (str): Request to expand, eg 'python-2.*' paths (list of str, optional): paths to search for package families, defaults to `config.packages_path`. Returns: str: Expanded request string.
def IntegerAbs(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Takes the absolute value of a vertex :param input_vertex: the vertex """ return Integer(context.jvm_view().IntegerAbsVertex, label, cast_to_integer_vertex(input_vertex))
Takes the absolute value of a vertex :param input_vertex: the vertex
def add_filter(self, component, filter_group="pyxley-filter"): """Add a filter to the layout.""" if getattr(component, "name") != "Filter": raise Exception("Component is not an instance of Filter") if filter_group not in self.filters: self.filters[filter_group] = [] self.filters[filter_group].append(component)
Add a filter to the layout.
def GetTaskPendingMerge(self, current_task): """Retrieves the first task that is pending merge or has a higher priority. This function will check if there is a task with a higher merge priority than the current_task being merged. If so, that task with the higher priority is returned. Args: current_task (Task): current task being merged or None if no such task. Returns: Task: the next task to merge or None if there is no task pending merge or with a higher priority. """ next_task = self._tasks_pending_merge.PeekTask() if not next_task: return None if current_task and next_task.merge_priority > current_task.merge_priority: return None with self._lock: next_task = self._tasks_pending_merge.PopTask() self._tasks_merging[next_task.identifier] = next_task return next_task
Retrieves the first task that is pending merge or has a higher priority. This function will check if there is a task with a higher merge priority than the current_task being merged. If so, that task with the higher priority is returned. Args: current_task (Task): current task being merged or None if no such task. Returns: Task: the next task to merge or None if there is no task pending merge or with a higher priority.
def style_similarity(page1, page2): """ Computes CSS style Similarity between two DOM trees A = classes(Document_1) B = classes(Document_2) style_similarity = |A & B| / (|A| + |B| - |A & B|) :param page1: html of the page1 :param page2: html of the page2 :return: Number between 0 and 1. If the number is next to 1 the page are really similar. """ classes_page1 = get_classes(page1) classes_page2 = get_classes(page2) return jaccard_similarity(classes_page1, classes_page2)
Computes CSS style Similarity between two DOM trees A = classes(Document_1) B = classes(Document_2) style_similarity = |A & B| / (|A| + |B| - |A & B|) :param page1: html of the page1 :param page2: html of the page2 :return: Number between 0 and 1. If the number is next to 1 the page are really similar.
def _normalize_str(self, string): """ Remove special characters and strip spaces """ if string: if not isinstance(string, str): string = str(string, 'utf-8', 'replace') return unicodedata.normalize('NFKD', string).encode( 'ASCII', 'ignore').decode('ASCII') return ''
Remove special characters and strip spaces
def _finalized_internal(self, context, pipeline_key, root_pipeline_key, caller_output, aborted): """Used by the Pipeline evaluator to finalize this Pipeline.""" result_status = _PipelineRecord.RUN if aborted: result_status = _PipelineRecord.ABORTED self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, result_status) logging.debug('Finalizing %s(*%r, **%r)#%s', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) try: self.finalized() except NotImplementedError: pass
Used by the Pipeline evaluator to finalize this Pipeline.
def is_valid_sound_tuple(sound_tuple, final_form=True): """ Check if a character combination complies to Vietnamese phonology. The basic idea is that if one can pronunce a sound_tuple then it's valid. Sound tuples containing consonants exclusively (almost always abbreviations) are also valid. Input: sound_tuple - a SoundTuple final_form - whether the tuple represents a complete word Output: True if the tuple seems to be Vietnamese, False otherwise. """ # We only work with lower case sound_tuple = SoundTuple._make([s.lower() for s in sound_tuple]) # Words with no vowel are always valid # FIXME: This looks like it should be toggled by a config key. if not sound_tuple.vowel: result = True elif final_form: result = \ has_valid_consonants(sound_tuple) and \ has_valid_vowel(sound_tuple) and \ has_valid_accent(sound_tuple) else: result = \ has_valid_consonants(sound_tuple) and \ has_valid_vowel_non_final(sound_tuple) return result
Check if a character combination complies to Vietnamese phonology. The basic idea is that if one can pronunce a sound_tuple then it's valid. Sound tuples containing consonants exclusively (almost always abbreviations) are also valid. Input: sound_tuple - a SoundTuple final_form - whether the tuple represents a complete word Output: True if the tuple seems to be Vietnamese, False otherwise.
def _get_max_size(parts, size=1): """ Given a list of parts, find the maximum number of commands contained in it. """ max_group_size = 0 for part in parts: if isinstance(part, list): group_size = 0 for input_group in part: group_size += 1 if group_size > max_group_size: max_group_size = group_size magic_size = _get_magic_size(parts) return max_group_size * magic_size
Given a list of parts, find the maximum number of commands contained in it.
def get_new_oids(self): ''' Returns a list of unique oids that have not been extracted yet. Essentially, a diff of distinct oids in the source database compared to cube. ''' table = self.lconfig.get('table') _oid = self.lconfig.get('_oid') if is_array(_oid): _oid = _oid[0] # get the db column, not the field alias last_id = self.container.get_last_field(field='_oid') ids = [] if last_id: try: # try to convert to integer... if not, assume unicode value last_id = float(last_id) where = "%s.%s > %s" % (table, _oid, last_id) except (TypeError, ValueError): where = "%s.%s > '%s'" % (table, _oid, last_id) ids = self.sql_get_oids(where) return ids
Returns a list of unique oids that have not been extracted yet. Essentially, a diff of distinct oids in the source database compared to cube.
def from_file(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening='<<', group_marker_closing='>>'): """ Read a string from a file and derive a ``Graph`` from it. This is a convenience function for opening a file and passing its contents to ``Graph.from_string()`` (see that for more detail) Args: source (str): the file to read and derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. See ``Graph.from_string`` for more detail. merge_same_words (bool): whether nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. Returns: Graph Example: >>> graph = Graph.from_file('cage.txt') # doctest: +SKIP >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'poetry i have nothing to say and i' """ source_string = open(source, 'r').read() return cls.from_string(source_string, distance_weights, merge_same_words, group_marker_opening=group_marker_opening, group_marker_closing=group_marker_closing)
Read a string from a file and derive a ``Graph`` from it. This is a convenience function for opening a file and passing its contents to ``Graph.from_string()`` (see that for more detail) Args: source (str): the file to read and derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. See ``Graph.from_string`` for more detail. merge_same_words (bool): whether nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. Returns: Graph Example: >>> graph = Graph.from_file('cage.txt') # doctest: +SKIP >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'poetry i have nothing to say and i'
def get_most_recent_release(self, group, artifact, remote=False): """Get the version number of the most recent release (non-integration version) of a particular group and artifact combination. :param str group: Group of the artifact to get the version of :param str artifact: Name of the artifact to get the version of :param bool remote: Should remote repositories be searched to find the latest version? Note this can make the request much slower. Default is false. :return: Version number of the most recent release :rtype: str :raises requests.exceptions.HTTPError: For any non-success HTTP responses from the Artifactory API. """ url = self._base_url + '/api/search/latestVersion' params = {'g': group, 'a': artifact, 'repos': self._repo, 'remote': int(remote)} self._logger.debug("Using latest version API at %s - params %s", url, params) response = self._session.get(url, params=params) response.raise_for_status() return response.text.strip()
Get the version number of the most recent release (non-integration version) of a particular group and artifact combination. :param str group: Group of the artifact to get the version of :param str artifact: Name of the artifact to get the version of :param bool remote: Should remote repositories be searched to find the latest version? Note this can make the request much slower. Default is false. :return: Version number of the most recent release :rtype: str :raises requests.exceptions.HTTPError: For any non-success HTTP responses from the Artifactory API.
def StaticAdd(cls, collection_urn, rdf_value, timestamp=None, suffix=None, mutation_pool=None): """Adds an rdf value to a collection. Adds an rdf value to a collection. Does not require that the collection be open. NOTE: The caller is responsible for ensuring that the collection exists and is of the correct type. Args: collection_urn: The urn of the collection to add to. rdf_value: The rdf value to add to the collection. timestamp: The timestamp (in microseconds) to store the rdf value at. Defaults to the current time. suffix: A 'fractional timestamp' suffix to reduce the chance of collisions. Defaults to a random number. mutation_pool: A MutationPool object to write to. Returns: The pair (timestamp, suffix) which identifies the value within the collection. Raises: ValueError: rdf_value has unexpected type. """ if not isinstance(rdf_value, cls.RDF_TYPE): raise ValueError("This collection only accepts values of type %s." % cls.RDF_TYPE.__name__) if mutation_pool is None: raise ValueError("Mutation pool can't be none.") if timestamp is None: timestamp = rdfvalue.RDFDatetime.Now() if isinstance(timestamp, rdfvalue.RDFDatetime): timestamp = timestamp.AsMicrosecondsSinceEpoch() if not rdf_value.age: rdf_value.age = rdfvalue.RDFDatetime.Now() if not isinstance(collection_urn, rdfvalue.RDFURN): collection_urn = rdfvalue.RDFURN(collection_urn) _, timestamp, suffix = mutation_pool.CollectionAddItem( collection_urn, rdf_value, timestamp, suffix=suffix) return timestamp, suffix
Adds an rdf value to a collection. Adds an rdf value to a collection. Does not require that the collection be open. NOTE: The caller is responsible for ensuring that the collection exists and is of the correct type. Args: collection_urn: The urn of the collection to add to. rdf_value: The rdf value to add to the collection. timestamp: The timestamp (in microseconds) to store the rdf value at. Defaults to the current time. suffix: A 'fractional timestamp' suffix to reduce the chance of collisions. Defaults to a random number. mutation_pool: A MutationPool object to write to. Returns: The pair (timestamp, suffix) which identifies the value within the collection. Raises: ValueError: rdf_value has unexpected type.
def list_since(self, message_id, limit=None): """Return a page of group messages created since a message. This is used to fetch the most recent messages after another. There may exist messages between the one given and the ones returned. Use :func:`list_after` to retrieve newer messages without skipping any. :param str message_id: the ID of a message :param int limit: maximum number of messages per page :return: group messages :rtype: :class:`~groupy.pagers.MessageList` """ return self.list(since_id=message_id, limit=limit)
Return a page of group messages created since a message. This is used to fetch the most recent messages after another. There may exist messages between the one given and the ones returned. Use :func:`list_after` to retrieve newer messages without skipping any. :param str message_id: the ID of a message :param int limit: maximum number of messages per page :return: group messages :rtype: :class:`~groupy.pagers.MessageList`
def get_rejection_reasons(self, keyword=None): """ Returns a list with the rejection reasons as strings :param keyword: set of rejection reasons to be retrieved. Possible values are: - 'selected': Get, amongst the set of predefined reasons, the ones selected - 'other': Get the user free-typed reason for rejection - None: Get all rejection reasons :return: list of rejection reasons as strings or an empty list """ keys = ['selected', 'other'] if keyword is None: return sum(map(self.get_rejection_reasons, keys), []) if keyword not in keys: return [] rejection_reasons = self.context.getRejectionReasons() rejection_reasons = rejection_reasons and rejection_reasons[0] or {} if keyword == 'other': return rejection_reasons.get(keyword, '') and [rejection_reasons.get(keyword, '')] or [] return rejection_reasons.get(keyword, [])
Returns a list with the rejection reasons as strings :param keyword: set of rejection reasons to be retrieved. Possible values are: - 'selected': Get, amongst the set of predefined reasons, the ones selected - 'other': Get the user free-typed reason for rejection - None: Get all rejection reasons :return: list of rejection reasons as strings or an empty list
def wait_on_receipt(self): """ Wait until we receive a message receipt. """ with self.receipt_condition: while not self.received: self.receipt_condition.wait() self.received = False
Wait until we receive a message receipt.
def set_extra_info(self, username, extra_info): """Set extra info for the given user. Raise a ServerError if an error occurs in the request process. @param username The username for the user to update. @param info The extra info as a JSON encoded string, or as a Python dictionary like object. """ url = self._get_extra_info_url(username) make_request(url, method='PUT', body=extra_info, timeout=self.timeout)
Set extra info for the given user. Raise a ServerError if an error occurs in the request process. @param username The username for the user to update. @param info The extra info as a JSON encoded string, or as a Python dictionary like object.
def subarc_between_points(self, p_from=None, p_to=None): ''' Given two points on the arc, extract a sub-arc between those points. No check is made to verify the points are actually on the arc. It is basically a wrapper around subarc(point_as_angle(p_from), point_as_angle(p_to)). Either p_from or p_to may be None to denote first or last arc endpoints. >>> a = Arc((0, 0), 1, 0, 90, True) >>> a.subarc_between_points((1, 0), (np.cos(np.pi/4), np.sin(np.pi/4))) Arc([0.000, 0.000], 1.000, 0.000, 45.000, True, degrees=45.000) >>> a.subarc_between_points(None, None) Arc([0.000, 0.000], 1.000, 0.000, 90.000, True, degrees=90.000) >>> a.subarc_between_points((np.cos(np.pi/4), np.sin(np.pi/4))) Arc([0.000, 0.000], 1.000, 45.000, 90.000, True, degrees=45.000) ''' a_from = self.point_as_angle(p_from) if p_from is not None else None a_to = self.point_as_angle(p_to) if p_to is not None else None return self.subarc(a_from, a_to)
Given two points on the arc, extract a sub-arc between those points. No check is made to verify the points are actually on the arc. It is basically a wrapper around subarc(point_as_angle(p_from), point_as_angle(p_to)). Either p_from or p_to may be None to denote first or last arc endpoints. >>> a = Arc((0, 0), 1, 0, 90, True) >>> a.subarc_between_points((1, 0), (np.cos(np.pi/4), np.sin(np.pi/4))) Arc([0.000, 0.000], 1.000, 0.000, 45.000, True, degrees=45.000) >>> a.subarc_between_points(None, None) Arc([0.000, 0.000], 1.000, 0.000, 90.000, True, degrees=90.000) >>> a.subarc_between_points((np.cos(np.pi/4), np.sin(np.pi/4))) Arc([0.000, 0.000], 1.000, 45.000, 90.000, True, degrees=45.000)
def sorted_bases(bases): '''If a class subclasses each class in bases (in that order), then this function returns the would-be python mro for the created class, minus <object>. ''' ret = [] for base in bases: # lst = [super(base), super(super(base)), ..., highest_base] lst = _bases(base) if not ret: ret = lst elif not any(b in ret for b in lst): ret += lst else: buf = [] for b in lst: if b in ret: if buf: ret = graft(ret, buf, ret.index(b)) buf = [] else: buf.append(b) if buf: ret += buf return ret
If a class subclasses each class in bases (in that order), then this function returns the would-be python mro for the created class, minus <object>.
def read_h5ad(filename, backed: Optional[str] = None, chunk_size: int = 6000): """Read ``.h5ad``-formatted hdf5 file. Parameters ---------- filename File name of data file. backed : {``None``, ``'r'``, ``'r+'``} If ``'r'``, load :class:`~anndata.AnnData` in ``backed`` mode instead of fully loading it into memory (`memory` mode). If you want to modify backed attributes of the AnnData object, you need to choose ``'r+'``. chunk_size Used only when loading sparse dataset that is stored as dense. Loading iterates through chunks of the dataset of this row size until it reads the whole dataset. Higher size means higher memory consumption and higher loading speed. """ if isinstance(backed, bool): # We pass `None`s through to h5py.File, and its default is “a” # (=“r+”, but create the file if it doesn’t exist) backed = 'r+' if backed else None warnings.warn( "In a future version, read_h5ad will no longer explicitly support " "boolean arguments. Specify the read mode, or leave `backed=None`.", DeprecationWarning, ) if backed: # open in backed-mode return AnnData(filename=filename, filemode=backed) else: # load everything into memory constructor_args = _read_args_from_h5ad(filename=filename, chunk_size=chunk_size) X = constructor_args[0] dtype = None if X is not None: dtype = X.dtype.name # maintain dtype, since 0.7 return AnnData(*_read_args_from_h5ad(filename=filename, chunk_size=chunk_size), dtype=dtype)
Read ``.h5ad``-formatted hdf5 file. Parameters ---------- filename File name of data file. backed : {``None``, ``'r'``, ``'r+'``} If ``'r'``, load :class:`~anndata.AnnData` in ``backed`` mode instead of fully loading it into memory (`memory` mode). If you want to modify backed attributes of the AnnData object, you need to choose ``'r+'``. chunk_size Used only when loading sparse dataset that is stored as dense. Loading iterates through chunks of the dataset of this row size until it reads the whole dataset. Higher size means higher memory consumption and higher loading speed.
def no_login_required(func): """ Dummy decorator. @login_required will inspect the method to look for this decorator Use this decorator when you want do not require login in a "@login_required" class/method :param func: :return: """ @functools.wraps(func) def decorated_view(*args, **kwargs): return func(*args, **kwargs) return decorated_view
Dummy decorator. @login_required will inspect the method to look for this decorator Use this decorator when you want do not require login in a "@login_required" class/method :param func: :return:
def verify(self, secret_key): """ Verifies the authenticity of a notification message. TODO: This is doing a form of authentication and this functionality should really be merged with the pluggable authentication mechanism at some point. """ verification_input = NotificationMessage.SERVICE_NAME verification_input += NotificationMessage.OPERATION_NAME verification_input += self.timestamp h = hmac.new(key=secret_key, digestmod=sha) h.update(verification_input) signature_calc = base64.b64encode(h.digest()) return self.signature == signature_calc
Verifies the authenticity of a notification message. TODO: This is doing a form of authentication and this functionality should really be merged with the pluggable authentication mechanism at some point.
def manage_file(name, sfn, ret, source, source_sum, user, group, mode, attrs, saltenv, backup, makedirs=False, template=None, # pylint: disable=W0613 show_changes=True, contents=None, dir_mode=None, follow_symlinks=True, skip_verify=False, keep_mode=False, encoding=None, encoding_errors='strict', seuser=None, serole=None, setype=None, serange=None, **kwargs): ''' Checks the destination against what was retrieved with get_managed and makes the appropriate modifications (if necessary). name location to place the file sfn location of cached file on the minion This is the path to the file stored on the minion. This file is placed on the minion using cp.cache_file. If the hash sum of that file matches the source_sum, we do not transfer the file to the minion again. This file is then grabbed and if it has template set, it renders the file to be placed into the correct place on the system using salt.files.utils.copyfile() ret The initial state return data structure. Pass in ``None`` to use the default structure. source file reference on the master source_sum sum hash for source user user owner group group owner backup backup_mode attrs attributes to be set on file: '' means remove all of them .. versionadded:: 2018.3.0 makedirs make directories if they do not exist template format of templating show_changes Include diff in state return contents: contents to be placed in the file dir_mode mode for directories created with makedirs skip_verify : False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 keep_mode : False If ``True``, and the ``source`` is a file from the Salt fileserver (or a local file on the minion), the mode of the destination file will be set to the mode of the source file. .. note:: keep_mode does not work with salt-ssh. As a consequence of how the files are transferred to the minion, and the inability to connect back to the master with salt-ssh, salt is unable to stat the file as it exists on the fileserver and thus cannot mirror the mode on the salt-ssh minion encoding If specified, then the specified encoding will be used. Otherwise, the file will be encoded using the system locale (usually UTF-8). See https://docs.python.org/3/library/codecs.html#standard-encodings for the list of available encodings. .. versionadded:: 2017.7.0 encoding_errors : 'strict' Default is ```'strict'```. See https://docs.python.org/2/library/codecs.html#codec-base-classes for the error handling schemes. .. versionadded:: 2017.7.0 seuser selinux user attribute .. versionadded:: Neon serange selinux range attribute .. versionadded:: Neon setype selinux type attribute .. versionadded:: Neon serange selinux range attribute .. versionadded:: Neon CLI Example: .. code-block:: bash salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' '' base '' .. versionchanged:: 2014.7.0 ``follow_symlinks`` option added ''' name = os.path.expanduser(name) if not ret: ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} # Ensure that user-provided hash string is lowercase if source_sum and ('hsum' in source_sum): source_sum['hsum'] = source_sum['hsum'].lower() if source: if not sfn: # File is not present, cache it sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) htype = source_sum.get('hash_type', __opts__['hash_type']) # Recalculate source sum now that file has been cached source_sum = { 'hash_type': htype, 'hsum': get_hash(sfn, form=htype) } if keep_mode: if _urlparse(source).scheme in ('salt', 'file', ''): try: mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True) except Exception as exc: log.warning('Unable to stat %s: %s', sfn, exc) # Check changes if the target file exists if os.path.isfile(name) or os.path.islink(name): if os.path.islink(name) and follow_symlinks: real_name = os.path.realpath(name) else: real_name = name # Only test the checksums on files with managed contents if source and not (not follow_symlinks and os.path.islink(real_name)): name_sum = get_hash(real_name, source_sum.get('hash_type', __opts__['hash_type'])) else: name_sum = None # Check if file needs to be replaced if source and (name_sum is None or source_sum.get('hsum', __opts__['hash_type']) != name_sum): if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server or local # source, and we are not skipping checksum verification, then # verify that it matches the specified checksum. if not skip_verify \ and _urlparse(source).scheme != 'salt': dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3}). If the \'source_hash\' value ' 'refers to a remote file with multiple possible ' 'matches, then it may be necessary to set ' '\'source_hash_name\'.'.format( source_sum['hash_type'], source, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret # Print a diff equivalent to diff -u old new if __salt__['config.option']('obfuscate_templates'): ret['changes']['diff'] = '<Obfuscated Template>' elif not show_changes: ret['changes']['diff'] = '<show_changes=False>' else: try: ret['changes']['diff'] = get_diff( real_name, sfn, show_filenames=False) except CommandExecutionError as exc: ret['changes']['diff'] = exc.strerror # Pre requisites are met, and the file needs to be replaced, do it try: salt.utils.files.copyfile(sfn, real_name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except IOError as io_error: __clean_tmp(sfn) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) if contents is not None: # Write the static contents to a temporary file tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) if salt.utils.platform.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) with salt.utils.files.fopen(tmp, 'wb') as tmp_: if encoding: log.debug('File will be encoded with %s', encoding) tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors)) else: tmp_.write(salt.utils.stringutils.to_bytes(contents)) try: differences = get_diff( real_name, tmp, show_filenames=False, show_changes=show_changes, template=True) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to detect changes to file: {0}'.format(exc.strerror) ) differences = '' if differences: ret['changes']['diff'] = differences # Pre requisites are met, the file needs to be replaced, do it try: salt.utils.files.copyfile(tmp, real_name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except IOError as io_error: __clean_tmp(tmp) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) __clean_tmp(tmp) # Check for changing symlink to regular file here if os.path.islink(name) and not follow_symlinks: if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server source verify # that it matches the intended sum value if not skip_verify and _urlparse(source).scheme != 'salt': dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3})'.format( source_sum['hash_type'], name, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret try: salt.utils.files.copyfile(sfn, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except IOError as io_error: __clean_tmp(sfn) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) ret['changes']['diff'] = \ 'Replace symbolic link with regular file' if salt.utils.platform.is_windows(): # This function resides in win_file.py and will be available # on Windows. The local function will be overridden # pylint: disable=E1120,E1121,E1123 ret = check_perms( path=name, ret=ret, owner=kwargs.get('win_owner'), grant_perms=kwargs.get('win_perms'), deny_perms=kwargs.get('win_deny_perms'), inheritance=kwargs.get('win_inheritance', True), reset=kwargs.get('win_perms_reset', False)) # pylint: enable=E1120,E1121,E1123 else: ret, _ = check_perms(name, ret, user, group, mode, attrs, follow_symlinks, seuser=seuser, serole=serole, setype=setype, serange=serange) if ret['changes']: ret['comment'] = 'File {0} updated'.format( salt.utils.data.decode(name) ) elif not ret['changes'] and ret['result']: ret['comment'] = 'File {0} is in the correct state'.format( salt.utils.data.decode(name) ) if sfn: __clean_tmp(sfn) return ret else: # target file does not exist contain_dir = os.path.dirname(name) def _set_mode_and_make_dirs(name, dir_mode, mode, user, group): # check for existence of windows drive letter if salt.utils.platform.is_windows(): drive, _ = os.path.splitdrive(name) if drive and not os.path.exists(drive): __clean_tmp(sfn) return _error(ret, '{0} drive not present'.format(drive)) if dir_mode is None and mode is not None: # Add execute bit to each nonzero digit in the mode, if # dir_mode was not specified. Otherwise, any # directories created with makedirs_() below can't be # listed via a shell. mode_list = [x for x in six.text_type(mode)][-3:] for idx in range(len(mode_list)): if mode_list[idx] != '0': mode_list[idx] = six.text_type(int(mode_list[idx]) | 1) dir_mode = ''.join(mode_list) if salt.utils.platform.is_windows(): # This function resides in win_file.py and will be available # on Windows. The local function will be overridden # pylint: disable=E1120,E1121,E1123 makedirs_( path=name, owner=kwargs.get('win_owner'), grant_perms=kwargs.get('win_perms'), deny_perms=kwargs.get('win_deny_perms'), inheritance=kwargs.get('win_inheritance', True), reset=kwargs.get('win_perms_reset', False)) # pylint: enable=E1120,E1121,E1123 else: makedirs_(name, user=user, group=group, mode=dir_mode) if source: # Apply the new file if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server source verify # that it matches the intended sum value if not skip_verify \ and _urlparse(source).scheme != 'salt': dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3})'.format( source_sum['hash_type'], name, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret # It is a new file, set the diff accordingly ret['changes']['diff'] = 'New file' if not os.path.isdir(contain_dir): if makedirs: _set_mode_and_make_dirs(name, dir_mode, mode, user, group) else: __clean_tmp(sfn) # No changes actually made ret['changes'].pop('diff', None) return _error(ret, 'Parent directory not present') else: # source != True if not os.path.isdir(contain_dir): if makedirs: _set_mode_and_make_dirs(name, dir_mode, mode, user, group) else: __clean_tmp(sfn) # No changes actually made ret['changes'].pop('diff', None) return _error(ret, 'Parent directory not present') # Create the file, user rw-only if mode will be set to prevent # a small security race problem before the permissions are set with salt.utils.files.set_umask(0o077 if mode else None): # Create a new file when test is False and source is None if contents is None: if not __opts__['test']: if touch(name): ret['changes']['new'] = 'file {0} created'.format(name) ret['comment'] = 'Empty file' else: return _error( ret, 'Empty file {0} not created'.format(name) ) else: if not __opts__['test']: if touch(name): ret['changes']['diff'] = 'New file' else: return _error( ret, 'File {0} not created'.format(name) ) if contents is not None: # Write the static contents to a temporary file tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) with salt.utils.files.fopen(tmp, 'wb') as tmp_: if encoding: if salt.utils.platform.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) log.debug('File will be encoded with %s', encoding) tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors)) else: tmp_.write(salt.utils.stringutils.to_bytes(contents)) # Copy into place salt.utils.files.copyfile(tmp, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) __clean_tmp(tmp) # Now copy the file contents if there is a source file elif sfn: salt.utils.files.copyfile(sfn, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) __clean_tmp(sfn) # This is a new file, if no mode specified, use the umask to figure # out what mode to use for the new file. if mode is None and not salt.utils.platform.is_windows(): # Get current umask mask = salt.utils.files.get_umask() # Calculate the mode value that results from the umask mode = oct((0o777 ^ mask) & 0o666) if salt.utils.platform.is_windows(): # This function resides in win_file.py and will be available # on Windows. The local function will be overridden # pylint: disable=E1120,E1121,E1123 ret = check_perms( path=name, ret=ret, owner=kwargs.get('win_owner'), grant_perms=kwargs.get('win_perms'), deny_perms=kwargs.get('win_deny_perms'), inheritance=kwargs.get('win_inheritance', True), reset=kwargs.get('win_perms_reset', False)) # pylint: enable=E1120,E1121,E1123 else: ret, _ = check_perms(name, ret, user, group, mode, attrs, seuser=seuser, serole=serole, setype=setype, serange=serange) if not ret['comment']: ret['comment'] = 'File ' + name + ' updated' if __opts__['test']: ret['comment'] = 'File ' + name + ' not updated' elif not ret['changes'] and ret['result']: ret['comment'] = 'File ' + name + ' is in the correct state' if sfn: __clean_tmp(sfn) return ret
Checks the destination against what was retrieved with get_managed and makes the appropriate modifications (if necessary). name location to place the file sfn location of cached file on the minion This is the path to the file stored on the minion. This file is placed on the minion using cp.cache_file. If the hash sum of that file matches the source_sum, we do not transfer the file to the minion again. This file is then grabbed and if it has template set, it renders the file to be placed into the correct place on the system using salt.files.utils.copyfile() ret The initial state return data structure. Pass in ``None`` to use the default structure. source file reference on the master source_sum sum hash for source user user owner group group owner backup backup_mode attrs attributes to be set on file: '' means remove all of them .. versionadded:: 2018.3.0 makedirs make directories if they do not exist template format of templating show_changes Include diff in state return contents: contents to be placed in the file dir_mode mode for directories created with makedirs skip_verify : False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 keep_mode : False If ``True``, and the ``source`` is a file from the Salt fileserver (or a local file on the minion), the mode of the destination file will be set to the mode of the source file. .. note:: keep_mode does not work with salt-ssh. As a consequence of how the files are transferred to the minion, and the inability to connect back to the master with salt-ssh, salt is unable to stat the file as it exists on the fileserver and thus cannot mirror the mode on the salt-ssh minion encoding If specified, then the specified encoding will be used. Otherwise, the file will be encoded using the system locale (usually UTF-8). See https://docs.python.org/3/library/codecs.html#standard-encodings for the list of available encodings. .. versionadded:: 2017.7.0 encoding_errors : 'strict' Default is ```'strict'```. See https://docs.python.org/2/library/codecs.html#codec-base-classes for the error handling schemes. .. versionadded:: 2017.7.0 seuser selinux user attribute .. versionadded:: Neon serange selinux range attribute .. versionadded:: Neon setype selinux type attribute .. versionadded:: Neon serange selinux range attribute .. versionadded:: Neon CLI Example: .. code-block:: bash salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' '' base '' .. versionchanged:: 2014.7.0 ``follow_symlinks`` option added
def _read_loop_polling(self): """Read packets by polling the Engine.IO server.""" while self.state == 'connected': self.logger.info( 'Sending polling GET request to ' + self.base_url) r = self._send_request( 'GET', self.base_url + self._get_url_timestamp()) if r is None: self.logger.warning( 'Connection refused by the server, aborting') self.queue.put(None) break if r.status_code != 200: self.logger.warning('Unexpected status code %s in server ' 'response, aborting', r.status_code) self.queue.put(None) break try: p = payload.Payload(encoded_payload=r.content) except ValueError: self.logger.warning( 'Unexpected packet from server, aborting') self.queue.put(None) break for pkt in p.packets: self._receive_packet(pkt) self.logger.info('Waiting for write loop task to end') self.write_loop_task.join() self.logger.info('Waiting for ping loop task to end') self.ping_loop_event.set() self.ping_loop_task.join() if self.state == 'connected': self._trigger_event('disconnect', run_async=False) try: connected_clients.remove(self) except ValueError: # pragma: no cover pass self._reset() self.logger.info('Exiting read loop task')
Read packets by polling the Engine.IO server.
def maybe_stream(s): """Ensure that the given argument is a stream.""" if isinstance(s, Stream): return s if s is None: stream = InMemStream() stream.close() # we don't intend to write anything return stream if isinstance(s, unicode): s = s.encode('utf-8') if isinstance(s, bytearray): s = bytes(s) if isinstance(s, bytes): stream = InMemStream(s) stream.close() # we don't intend to write anything return stream # s may still conform to the Stream interface. Yay duck typing. return s
Ensure that the given argument is a stream.
async def set_speaker_settings(self, target: str, value: str): """Set speaker settings.""" params = {"settings": [{"target": target, "value": value}]} return await self.services["audio"]["setSpeakerSettings"](params)
Set speaker settings.
def load(cls, file_path): """ Load the object from a JSON file (saved with :py:func:`ConciseCV.save`) Returns: ConciseCV: Loaded ConciseCV object. """ data = helper.read_json(file_path) return ConciseCV.from_dict(data)
Load the object from a JSON file (saved with :py:func:`ConciseCV.save`) Returns: ConciseCV: Loaded ConciseCV object.
def xyinterp(x,y,xval): """ :Purpose: Interpolates y based on the given xval. x and y are a pair of independent/dependent variable arrays that must be the same length. The x array must also be sorted. xval is a user-specified value. This routine looks up xval in the x array and uses that information to properly interpolate the value in the y array. Notes ===== Use the searchsorted method on the X array to determine the bin in which xval falls; then use that information to compute the corresponding y value. See Also ======== numpy Parameters ========== x: 1D numpy array independent variable array: MUST BE SORTED y: 1D numpy array dependent variable array xval: float the x value at which you want to know the value of y Returns ======= y: float the value of y corresponding to xval Raises ====== ValueError: If arrays are unequal length; or x array is unsorted; or if xval falls outside the bounds of x (extrapolation is unsupported :version: 0.1 last modified 2006-07-06 """ #Enforce conditions on x, y, and xval: #x and y must correspond if len(x) != len(y): raise ValueError("Input arrays must be equal lengths") #Extrapolation not supported if xval < x[0]: raise ValueError("Value %f < min(x) %f: Extrapolation unsupported"%(xval,x[0])) if xval > x[-1]: raise ValueError("Value > max(x): Extrapolation unsupported") #This algorithm only works on sorted data if x.argsort().all() != N.arange(len(x)).all(): raise ValueError("Input array x must be sorted") # Now do the real work. hi = x.searchsorted(xval) lo = hi - 1 try: seg = (float(xval)-x[lo]) / (x[hi] - x[lo]) except ZeroDivisionError: seg = 0.0 yval = y[lo] + seg*(y[hi] - y[lo]) return yval
:Purpose: Interpolates y based on the given xval. x and y are a pair of independent/dependent variable arrays that must be the same length. The x array must also be sorted. xval is a user-specified value. This routine looks up xval in the x array and uses that information to properly interpolate the value in the y array. Notes ===== Use the searchsorted method on the X array to determine the bin in which xval falls; then use that information to compute the corresponding y value. See Also ======== numpy Parameters ========== x: 1D numpy array independent variable array: MUST BE SORTED y: 1D numpy array dependent variable array xval: float the x value at which you want to know the value of y Returns ======= y: float the value of y corresponding to xval Raises ====== ValueError: If arrays are unequal length; or x array is unsorted; or if xval falls outside the bounds of x (extrapolation is unsupported :version: 0.1 last modified 2006-07-06
def AFF4AddChild(self, subject, child, extra_attributes=None): """Adds a child to the specified parent.""" precondition.AssertType(child, Text) attributes = { DataStore.AFF4_INDEX_DIR_TEMPLATE % child: [DataStore.EMPTY_DATA_PLACEHOLDER] } if extra_attributes: attributes.update(extra_attributes) self.MultiSet(subject, attributes)
Adds a child to the specified parent.
def render_table(data, headers=None): """ Return a dictionary list formatted as a HTML table. Args: data: a list of dictionaries, one per row. headers: the keys in the dictionary to use as table columns, in order. """ builder = HtmlBuilder() builder._render_objects(data, headers, datatype='dict') return builder._to_html()
Return a dictionary list formatted as a HTML table. Args: data: a list of dictionaries, one per row. headers: the keys in the dictionary to use as table columns, in order.
def distance(pos0, pos1): """distance between two positions defined by (separation, PA) """ r0, pa0 = pos0 #logging.debug('r0={}, pa0={} (from {})'.format(r0, pa0, self)) ra0 = r0*np.sin(pa0*np.pi/180) dec0 = r0*np.cos(pa0*np.pi/180) r1, pa1 = pos1 #logging.debug('r1={}, pa1={} (from {})'.format(r0, pa0, other)) ra1 = r1*np.sin(pa1*np.pi/180) dec1 = r1*np.cos(pa1*np.pi/180) dra = (ra1 - ra0) ddec = (dec1 - dec0) return np.sqrt(dra**2 + ddec**2)
distance between two positions defined by (separation, PA)
def _get_api_version(self): """Fetches the most recent API version Returns: str """ url = "{base_url}/api/server_info".format(base_url=self._base_url()) server_info = self._make_request(url=url, method="get") return server_info["latest_api_version"]
Fetches the most recent API version Returns: str
def expiration_time(self): """ Returns the time until this access attempt is forgotten. """ logging_forgotten_time = configuration.behavior.login_forgotten_seconds if logging_forgotten_time <= 0: return None now = timezone.now() delta = now - self.modified time_remaining = logging_forgotten_time - delta.seconds return time_remaining
Returns the time until this access attempt is forgotten.
def _reroot(self): '''Run the re-rooting algorithm in the Rerooter class.''' rerooter = Rerooter() self.tree = rerooter.reroot_by_tree(self.reference_tree, self.tree)
Run the re-rooting algorithm in the Rerooter class.
def make_a_copy(self, location=None): """ Creates a backup of the file specified in the location. The backup filename appends a .bak.NO where number is a number that is not yet used in the backup directory. TODO: This function should be moved to another file maybe XShell :param location: the location of the file to be backed up """ import shutil destination = backup_name(location) shutil.copyfile(location, destination)
Creates a backup of the file specified in the location. The backup filename appends a .bak.NO where number is a number that is not yet used in the backup directory. TODO: This function should be moved to another file maybe XShell :param location: the location of the file to be backed up
def real_out_dtype(self): """The real dtype corresponding to this space's `out_dtype`.""" if self.__real_out_dtype is None: raise AttributeError( 'no real variant of output dtype {} defined' ''.format(dtype_repr(self.scalar_out_dtype))) else: return self.__real_out_dtype
The real dtype corresponding to this space's `out_dtype`.
def get_scenario_data(scenario_id,**kwargs): """ Get all the datasets from the group with the specified name @returns a list of dictionaries """ user_id = kwargs.get('user_id') scenario_data = db.DBSession.query(Dataset).filter(Dataset.id==ResourceScenario.dataset_id, ResourceScenario.scenario_id==scenario_id).options(joinedload_all('metadata')).distinct().all() for sd in scenario_data: if sd.hidden == 'Y': try: sd.check_read_permission(user_id) except: sd.value = None sd.metadata = [] db.DBSession.expunge_all() log.info("Retrieved %s datasets", len(scenario_data)) return scenario_data
Get all the datasets from the group with the specified name @returns a list of dictionaries
def start(name=None, id=None, bootpath=None, disk=None, disks=None, local_iface=False, memory=None, nics=0, switch=None): ''' Starts a VM defined by the specified parameters. When both a name and id are provided, the id is ignored. name: Name of the defined VM. id: VM id. bootpath: Path to a kernel or BIOS image to load. disk: Path to a single disk to use. disks: List of multiple disks to use. local_iface: Whether to add a local network interface. See "LOCAL INTERFACES" in the vmctl(8) manual page for more information. memory: Memory size of the VM specified in megabytes. switch: Add a network interface that is attached to the specified virtual switch on the host. CLI Example: .. code-block:: bash salt '*' vmctl.start 2 # start VM with id 2 salt '*' vmctl.start name=web1 bootpath='/bsd.rd' nics=2 memory=512M disk='/disk.img' ''' ret = {'changes': False, 'console': None} cmd = ['vmctl', 'start'] if not (name or id): raise SaltInvocationError('Must provide either "name" or "id"') elif name: cmd.append(name) else: cmd.append(id) name = _id_to_name(id) if nics > 0: cmd.append('-i {0}'.format(nics)) # Paths cannot be appended as otherwise the inserted whitespace is treated by # vmctl as being part of the path. if bootpath: cmd.extend(['-b', bootpath]) if memory: cmd.append('-m {0}'.format(memory)) if switch: cmd.append('-n {0}'.format(switch)) if local_iface: cmd.append('-L') if disk and disks: raise SaltInvocationError('Must provide either "disks" or "disk"') if disk: cmd.extend(['-d', disk]) if disks: cmd.extend(['-d', x] for x in disks) # Before attempting to define a new VM, make sure it doesn't already exist. # Otherwise return to indicate nothing was changed. if len(cmd) > 3: vmstate = status(name) if vmstate: ret['comment'] = 'VM already exists and cannot be redefined' return ret result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) if result['retcode'] == 0: ret['changes'] = True m = re.match(r'.*successfully, tty (\/dev.*)', result['stderr']) if m: ret['console'] = m.groups()[0] else: m = re.match(r'.*Operation already in progress$', result['stderr']) if m: ret['changes'] = False else: raise CommandExecutionError( 'Problem encountered running vmctl', info={'errors': [result['stderr']], 'changes': ret} ) return ret
Starts a VM defined by the specified parameters. When both a name and id are provided, the id is ignored. name: Name of the defined VM. id: VM id. bootpath: Path to a kernel or BIOS image to load. disk: Path to a single disk to use. disks: List of multiple disks to use. local_iface: Whether to add a local network interface. See "LOCAL INTERFACES" in the vmctl(8) manual page for more information. memory: Memory size of the VM specified in megabytes. switch: Add a network interface that is attached to the specified virtual switch on the host. CLI Example: .. code-block:: bash salt '*' vmctl.start 2 # start VM with id 2 salt '*' vmctl.start name=web1 bootpath='/bsd.rd' nics=2 memory=512M disk='/disk.img'
def _redshift(distance, **kwargs): r"""Uses astropy to get redshift from the given luminosity distance. Parameters ---------- distance : float The luminosity distance, in Mpc. \**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float : The redshift corresponding to the given luminosity distance. """ cosmology = get_cosmology(**kwargs) return z_at_value(cosmology.luminosity_distance, distance, units.Mpc)
r"""Uses astropy to get redshift from the given luminosity distance. Parameters ---------- distance : float The luminosity distance, in Mpc. \**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float : The redshift corresponding to the given luminosity distance.
def get_wd_entity(self): """ retrieve a WD item in json representation from Wikidata :rtype: dict :return: python complex dictionary represenation of a json """ params = { 'action': 'wbgetentities', 'sites': 'enwiki', 'ids': self.wd_item_id, 'format': 'json' } headers = { 'User-Agent': self.user_agent } json_data = self.mediawiki_api_call("GET", self.mediawiki_api_url, params=params, headers=headers) return self.parse_wd_json(wd_json=json_data['entities'][self.wd_item_id])
retrieve a WD item in json representation from Wikidata :rtype: dict :return: python complex dictionary represenation of a json
def to_scanner(self, x, y, z): """ Converts a 3d position in MRSData space to the scanner reference frame :param x: :param y: :param z: :return: """ if self.transform is None: raise ValueError("No transform set for MRSData object {}".format(self)) transformed_point = self.transform * numpy.matrix([x, y, z, 1]).T return numpy.squeeze(numpy.asarray(transformed_point))[0:3]
Converts a 3d position in MRSData space to the scanner reference frame :param x: :param y: :param z: :return:
def wrap_prompts_class(Klass): """ Wrap an IPython's Prompt class This is needed in order for Prompt to inject the correct escape sequences at the right positions for shell integrations. """ try: from prompt_toolkit.token import ZeroWidthEscape except ImportError: return Klass class ITerm2IPythonPrompt(Klass): def in_prompt_tokens(self, cli=None): return [ (ZeroWidthEscape, last_status(self.shell)+BEFORE_PROMPT), ]+\ super(ITerm2IPythonPrompt, self).in_prompt_tokens(cli)+\ [(ZeroWidthEscape, AFTER_PROMPT)] return ITerm2IPythonPrompt
Wrap an IPython's Prompt class This is needed in order for Prompt to inject the correct escape sequences at the right positions for shell integrations.
def generate_mesh( geo_object, verbose=True, dim=3, prune_vertices=True, prune_z_0=False, remove_faces=False, gmsh_path=None, extra_gmsh_arguments=None, # for debugging purposes: geo_filename=None, mesh_file_type="msh", ): """Return a meshio.Mesh, storing the mesh points, cells, and data, generated by Gmsh from the `geo_object`, written to a temporary file, and reread by `meshio`. Gmsh's native "msh" format is ill-suited to fast I/O. This can greatly reduce the performance of pygmsh. As alternatives, try `mesh_file_type=`: - "vtk"`, though Gmsh doesn't write the physical tags to VTK <https://gitlab.onelab.info/gmsh/gmsh/issues/389> or - `"mesh"`, though this only supports a few basic elements - "line", "triangle", "quad", "tetra", "hexahedron" - and doesn't preserve the `$PhysicalNames`, just the `int` tags. """ if extra_gmsh_arguments is None: extra_gmsh_arguments = [] # For format "mesh", ask Gmsh to save the physical tags # http://gmsh.info/doc/texinfo/gmsh.html#index-Mesh_002eSaveElementTagType if mesh_file_type == "mesh": extra_gmsh_arguments += ["-string", "Mesh.SaveElementTagType=2;"] preserve_geo = geo_filename is not None if geo_filename is None: with tempfile.NamedTemporaryFile(suffix=".geo") as f: geo_filename = f.name with open(geo_filename, "w") as f: f.write(geo_object.get_code()) # As of Gmsh 4.1.3, the mesh format options are # ``` # auto, msh1, msh2, msh3, msh4, msh, unv, vtk, wrl, mail, stl, p3d, mesh, bdf, cgns, # med, diff, ir3, inp, ply2, celum, su2, x3d, dat, neu, m, key # ``` # Pick the correct filename suffix. filename_suffix = "msh" if mesh_file_type[:3] == "msh" else mesh_file_type with tempfile.NamedTemporaryFile(suffix="." + filename_suffix) as handle: msh_filename = handle.name gmsh_executable = gmsh_path if gmsh_path is not None else _get_gmsh_exe() args = [ "-{}".format(dim), geo_filename, "-format", mesh_file_type, "-bin", "-o", msh_filename, ] + extra_gmsh_arguments # https://stackoverflow.com/a/803421/353337 p = subprocess.Popen( [gmsh_executable] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) if verbose: while True: line = p.stdout.readline() if not line: break print(line.decode("utf-8"), end="") p.communicate() assert p.returncode == 0, "Gmsh exited with error (return code {}).".format( p.returncode ) mesh = meshio.read(msh_filename) if remove_faces: # Only keep the cells of highest topological dimension; discard faces # and such. two_d_cells = set(["triangle", "quad"]) three_d_cells = set( ["tetra", "hexahedron", "wedge", "pyramid", "penta_prism", "hexa_prism"] ) if any(k in mesh.cells for k in three_d_cells): keep_keys = three_d_cells.intersection(mesh.cells.keys()) elif any(k in mesh.cells for k in two_d_cells): keep_keys = two_d_cells.intersection(mesh.cells.keys()) else: keep_keys = mesh.cells.keys() mesh.cells = {key: mesh.cells[key] for key in keep_keys} mesh.cell_data = {key: mesh.cell_data[key] for key in keep_keys} if prune_vertices: # Make sure to include only those vertices which belong to a cell. ncells = numpy.concatenate([numpy.concatenate(c) for c in mesh.cells.values()]) uvertices, uidx = numpy.unique(ncells, return_inverse=True) k = 0 for key in mesh.cells.keys(): n = numpy.prod(mesh.cells[key].shape) mesh.cells[key] = uidx[k : k + n].reshape(mesh.cells[key].shape) k += n mesh.points = mesh.points[uvertices] for key in mesh.point_data: mesh.point_data[key] = mesh.point_data[key][uvertices] # clean up os.remove(msh_filename) if preserve_geo: print("\ngeo file: {}".format(geo_filename)) else: os.remove(geo_filename) if ( prune_z_0 and mesh.points.shape[1] == 3 and numpy.all(numpy.abs(mesh.points[:, 2]) < 1.0e-13) ): mesh.points = mesh.points[:, :2] return mesh
Return a meshio.Mesh, storing the mesh points, cells, and data, generated by Gmsh from the `geo_object`, written to a temporary file, and reread by `meshio`. Gmsh's native "msh" format is ill-suited to fast I/O. This can greatly reduce the performance of pygmsh. As alternatives, try `mesh_file_type=`: - "vtk"`, though Gmsh doesn't write the physical tags to VTK <https://gitlab.onelab.info/gmsh/gmsh/issues/389> or - `"mesh"`, though this only supports a few basic elements - "line", "triangle", "quad", "tetra", "hexahedron" - and doesn't preserve the `$PhysicalNames`, just the `int` tags.
def get_parser(parser): """ Grabs the parser. args: parser: The parser """ parser.description = textwrap.dedent(""" Segment the .po files in LOCALE(s) based on the segmenting rules in config.yaml. Note that segmenting is *not* idempotent: it modifies the input file, so be careful that you don't run it twice on the same file. """.strip()) parser.add_argument("locale", nargs="+", help="a locale to segment")
Grabs the parser. args: parser: The parser
def createSparseCNNModel(self): """ Create a sparse network composed of two CNN / MaxPool layers followed by a sparse linear layer with using k-winner activation between the layers """ # Create sparseCNN2 model model = nn.Sequential( nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels[0], kernel_size=self.kernel_size[0], stride=self.stride[0], padding=self.padding[0]), nn.MaxPool2d(kernel_size=2), KWinners2d(n=self.cnn_output_len[0], k=self.cnn_k[0], channels=self.out_channels[0], kInferenceFactor=self.k_inference_factor, boostStrength=self.boost_strength, boostStrengthFactor=self.boost_strength_factor), nn.Conv2d(in_channels=self.out_channels[0], out_channels=self.out_channels[1], kernel_size=self.kernel_size[1], stride=self.stride[1], padding=self.padding[1]), nn.MaxPool2d(kernel_size=2), KWinners2d(n=self.cnn_output_len[1], k=self.cnn_k[1], channels=self.out_channels[1], kInferenceFactor=self.k_inference_factor, boostStrength=self.boost_strength, boostStrengthFactor=self.boost_strength_factor), Flatten(), SparseWeights( nn.Linear(self.cnn_output_len[1], self.n), self.weight_sparsity), KWinners(n=self.n, k=self.k, kInferenceFactor=self.k_inference_factor, boostStrength=self.boost_strength, boostStrengthFactor=self.boost_strength_factor), nn.Linear(self.n, self.output_size), nn.LogSoftmax(dim=1) ) model.to(self.device) if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) return model
Create a sparse network composed of two CNN / MaxPool layers followed by a sparse linear layer with using k-winner activation between the layers
def update_rule(self, name, id_env, contents, blocks_id, id_rule): """ Save an environment rule :param name: Name of the rule :param id_env: Environment id :param contents: Lists of contents in order. Ex: ['content one', 'content two', ...] :param blocks_id: Lists of blocks id or 0 if is as custom content. Ex: ['0', '5', '0' ...] :param id_rule: Rule id :return: None :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise InvalidValueError: Invalid parameter. :raise UserNotAuthorizedError: Permissão negada. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta. """ url = 'rule/update/' map_dict = dict() map_dict['name'] = name map_dict['id_env'] = id_env map_dict['contents'] = contents map_dict['blocks_id'] = blocks_id map_dict['id_rule'] = id_rule try: code, xml = self.submit({'map': map_dict}, 'PUT', url) except Exception as e: raise e return self.response(code, xml)
Save an environment rule :param name: Name of the rule :param id_env: Environment id :param contents: Lists of contents in order. Ex: ['content one', 'content two', ...] :param blocks_id: Lists of blocks id or 0 if is as custom content. Ex: ['0', '5', '0' ...] :param id_rule: Rule id :return: None :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise InvalidValueError: Invalid parameter. :raise UserNotAuthorizedError: Permissão negada. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta.
def to_xml(self): ''' Returns a DOM representation of the line. @return: Element ''' for n, v in {"name": self.name, "quantity": self.quantity, "unit_price": self.unit_price}.items(): if is_empty_or_none(v): raise LineError("'%s' attribute cannot be empty or None." % n) doc = Document() root = doc.createElement("line") super(Line, self).to_xml(root) self._create_text_node(root, "date", self.date) self._create_text_node(root, "name", self.name, True) self._create_text_node(root, "description", self.description, True) self._create_text_node(root, "quantity", self.quantity) self._create_text_node(root, "unitPrice", self.unit_price) self._create_text_node(root, "unit", self.unit) self._create_text_node(root, "gin", self.gin) self._create_text_node(root, "gtin", self.gtin) self._create_text_node(root, "sscc", self.sscc) if len(self.__discounts): discounts = root.ownerDocument.createElement("discounts") root.appendChild(discounts) for discount in self.__discounts: if not issubclass(discount.__class__, Discount): raise LineError('discount of type %s is not an ' \ 'instance or a subclass of %s' % (discount.__class__.__name__, Discount.__name__)) discounts.appendChild(discount.to_xml()) if len(self.__taxes): taxes = root.ownerDocument.createElement("taxes") root.appendChild(taxes) for tax in self.__taxes: if not issubclass(tax.__class__, Tax): raise LineError('tax of type %s is not an instance ' \ 'or a subclass of %s' % (tax.__class__.__name__, Tax.__name__)) taxes.appendChild(tax.to_xml()) return root
Returns a DOM representation of the line. @return: Element
def process_request(self): """Processing the call and set response_data.""" self.response = self.request_handler.process_request( self.method, self.request_data)
Processing the call and set response_data.
def company_add_user(self, email, name, password, receiver, admin): """Add a user to the company account. :param email: :param name: :param password: Pass without storing in plain text :param receiver: Can user receive files :param admin: :type email: ``str`` or ``unicode`` :type name: ``str`` or ``unicode`` :type password: ``str`` or ``unicode`` :type receiver: ``bool`` :type admin: ``bool`` :rtype: ``bool`` """ method, url = get_URL('company_add_user') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'email': email, 'name': name, 'password': password, 'canreceivefiles': receiver, 'admin': admin } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
Add a user to the company account. :param email: :param name: :param password: Pass without storing in plain text :param receiver: Can user receive files :param admin: :type email: ``str`` or ``unicode`` :type name: ``str`` or ``unicode`` :type password: ``str`` or ``unicode`` :type receiver: ``bool`` :type admin: ``bool`` :rtype: ``bool``
def get_hours_for_week(self, week_start=None): """ Gets all ProjectHours entries in the 7-day period beginning on week_start. """ week_start = week_start if week_start else self.week_start week_end = week_start + relativedelta(days=7) return ProjectHours.objects.filter( week_start__gte=week_start, week_start__lt=week_end)
Gets all ProjectHours entries in the 7-day period beginning on week_start.
def _dendropy_to_dataframe( tree, add_node_labels=True, use_uids=True): """Convert Dendropy tree to Pandas dataframe.""" # Maximum distance from root. tree.max_distance_from_root() # Initialize the data object. idx = [] data = { 'type': [], 'id': [], 'parent': [], 'length': [], 'label': [], 'distance': []} if use_uids: data['uid'] = [] # Add labels to internal nodes if set to true. if add_node_labels: for i, node in enumerate(tree.internal_nodes()): node.label = str(i) for node in tree.nodes(): # Get node type if node.is_leaf(): type_ = 'leaf' label = str(node.taxon.label).replace(' ', '_') elif node.is_internal(): type_ = 'node' label = str(node.label) # Set node label and parent. id_ = label parent_node = node.parent_node length = node.edge_length distance = node.distance_from_root() # Is this node a root? if parent_node is None and length is None: parent_label = None parent_node = None length = 0 distance = 0 type_ = 'root' # Set parent node label elif parent_node.is_internal(): parent_label = str(parent_node.label) else: raise Exception("Subtree is not attached to tree?") # Add this node to the data. data['type'].append(type_) data['id'].append(id_) data['parent'].append(parent_label) data['length'].append(length) data['label'].append(label) data['distance'].append(distance) if use_uids: data['uid'].append(get_random_id(10)) # Construct dataframe. df = pandas.DataFrame(data) return df
Convert Dendropy tree to Pandas dataframe.