Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
5,400
def _initalize_tree(self, position, momentum, slice_var, stepsize): position_bar, momentum_bar, _ = self.simulate_dynamics(self.model, position, momentum, stepsize, self.grad_log_pdf).get_proposed_values() _, logp_bar = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf() hamiltonian = logp_bar - 0.5 * np.dot(momentum_bar, momentum_bar) candidate_set_size = slice_var < np.exp(hamiltonian) accept_set_bool = hamiltonian > np.log(slice_var) - 10000 return position_bar, momentum_bar, candidate_set_size, accept_set_bool
Initalizes root node of the tree, i.e depth = 0
5,401
def status_to_string(cls, status): strings = {CheckerMessages.INFO: "Info", CheckerMessages.WARNING: "Warning", CheckerMessages.ERROR: "Error"} return strings[status]
Converts a message status to a string. :param status: Status to convert (p yqode.core.modes.CheckerMessages) :return: The status string. :rtype: str
5,402
def endpoint_update(auth=None, **kwargs): *** cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.update_endpoint(**kwargs)
Update an endpoint CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 interface=public enabled=False salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 region=newregion salt '*' keystoneng.endpoint_update endpoint_id=4f961ad09d2d48948896bbe7c6a79717 service_name_or_id=glance url=https://example.org:9292
5,403
def get_gpio_mode(self, gpio_id): if not self._connected: return return self._protocol.status.get("OTGW_GPIO_{}".format(gpio_id))
Return the gpio mode for gpio :gpio_id:. @gpio_id Character A or B.
5,404
def run_flag_maf_zero(in_prefix, in_type, out_prefix, base_dir, options): os.mkdir(out_prefix) required_type = "bfile" check_input_files(in_prefix, in_type, required_type) script_prefix = os.path.join(out_prefix, "flag_maf_0") options += ["--{}".format(required_type), in_prefix, "--out", script_prefix] try: flag_maf_zero.main(options) except flag_maf_zero.ProgramError as e: msg = "flag_maf_zero: {}".format(e) raise ProgramError(msg) nb_flagged = None flagged_fn = script_prefix + ".list" with open(flagged_fn, "r") as i_file: nb_flagged = len(i_file.read().splitlines()) latex_file = os.path.join(script_prefix + ".summary.tex") try: with open(latex_file, "w") as o_file: print >>o_file, latex_template.subsection( flag_maf_zero.pretty_name ) safe_fn = latex_template.sanitize_tex(os.path.basename(flagged_fn)) text = ( "After computing minor allele frequencies (MAF) of all " "markers using Plink, a total of {:,d} marker{} had a MAF " "of zero and were flagged ({}).".format( nb_flagged, "s" if nb_flagged - 1 > 1 else "", "see file " + latex_template.texttt(safe_fn) + " for more information" ) ) print >>o_file, latex_template.wrap_lines(text) except IOError: msg = "{}: cannot write LaTeX summary".format(latex_file) raise ProgramError(msg) with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file: print >>o_file, " print >>o_file, ("Number of markers flagged for MAF of 0\t" "{:,d}".format(nb_flagged)) print >>o_file, "---" return _StepResult( next_file=in_prefix, next_file_type=required_type, latex_summary=latex_file, description=flag_maf_zero.desc, long_description=flag_maf_zero.long_desc, graph_path=None, )
Runs step11 (flag MAF zero). :param in_prefix: the prefix of the input files. :param in_type: the type of the input files. :param out_prefix: the output prefix. :param base_dir: the output directory. :param options: the options needed. :type in_prefix: str :type in_type: str :type out_prefix: str :type base_dir: str :type options: list :returns: a tuple containing the prefix of the output files (the input prefix for the next script) and the type of the output files (``bfile``). This function calls the :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module. The required file type for this module is ``bfile``, hence the need to use the :py:func:`check_input_files` to check if the file input file type is the good one, or to create it if needed. .. note:: The :py:mod:`pyGenClean.FlagMAF.flag_maf_zero` module doesn't return usable output files. Hence, this function returns the input file prefix and its type.
5,405
def is_descendant_of_log(self, id_, log_id): if self._catalog_session is not None: return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=log_id) return self._hierarchy_session.is_descendant(id_=id_, descendant_id=log_id)
Tests if an ``Id`` is a descendant of a log. arg: id (osid.id.Id): an ``Id`` arg: log_id (osid.id.Id): the ``Id`` of a log return: (boolean) - ``true`` if the ``id`` is a descendant of the ``log_id,`` ``false`` otherwise raise: NotFound - ``log_id`` is not found raise: NullArgument - ``id`` or ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` is not found return ``false``.
5,406
def from_json(cls, json_info): if json_info is None: return None return TrialRecord( trial_id=json_info["trial_id"], job_id=json_info["job_id"], trial_status=json_info["status"], start_time=json_info["start_time"], params=json_info["params"])
Build a Trial instance from a json string.
5,407
def _add_metadata(item, metadata, remotes, only_metadata=False): for check_key in [item["description"]] + _get_file_keys(item) + _get_vrn_keys(item): item_md = metadata.get(check_key) if item_md: break if not item_md: item_md = _find_glob_metadata(item["files"], metadata) if remotes.get("region"): item["algorithm"]["variant_regions"] = remotes["region"] TOP_LEVEL = set(["description", "genome_build", "lane", "vrn_file", "files", "analysis"]) keep_sample = True if item_md and len(item_md) > 0: if "metadata" not in item: item["metadata"] = {} for k, v in item_md.items(): if v: if k in TOP_LEVEL: item[k] = v elif k in run_info.ALGORITHM_KEYS: v = _handle_special_yaml_cases(v) item["algorithm"][k] = v else: v = _handle_special_yaml_cases(v) item["metadata"][k] = v elif len(metadata) > 0: warn = "Dropped sample" if only_metadata else "Added minimal sample information" print("WARNING: %s: metadata not found for %s, %s" % (warn, item["description"], [os.path.basename(f) for f in item["files"]])) keep_sample = not only_metadata if tz.get_in(["metadata", "ped"], item): item["metadata"] = _add_ped_metadata(item["description"], item["metadata"]) return item if keep_sample else None
Add metadata information from CSV file to current item. Retrieves metadata based on 'description' parsed from input CSV file. Adds to object and handles special keys: - `description`: A new description for the item. Used to relabel items based on the pre-determined description from fastq name or BAM read groups. - Keys matching supported names in the algorithm section map to key/value pairs there instead of metadata.
5,408
def gen_tensor_data(): X, y = toy_interaction(return_X_y=True, n=10000) gam = LinearGAM(te(0, 1,lam=0.1)).fit(X, y) XX = gam.generate_X_grid(term=0, meshgrid=True) Z = gam.partial_dependence(term=0, meshgrid=True) fig = plt.figure(figsize=(9,6)) ax = plt.axes(projection=) ax.dist = 7.5 ax.plot_surface(XX[0], XX[1], Z, cmap=) ax.set_axis_off() fig.tight_layout() plt.savefig(, transparent=True, dpi=300)
toy interaction data
5,409
def plot_ecg_grids(ecg_grids, fs, units, time_units, axes): "Add ecg grids to the axes" if ecg_grids == : ecg_grids = range(0, len(axes)) for ch in ecg_grids: auto_xlims = axes[ch].get_xlim() auto_ylims= axes[ch].get_ylim() (major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y) = calc_ecg_grids(auto_ylims[0], auto_ylims[1], units[ch], fs, auto_xlims[1], time_units) min_x, max_x = np.min(minor_ticks_x), np.max(minor_ticks_x) min_y, max_y = np.min(minor_ticks_y), np.max(minor_ticks_y) for tick in minor_ticks_x: axes[ch].plot([tick, tick], [min_y, max_y], c=, marker=, zorder=1) for tick in major_ticks_x: axes[ch].plot([tick, tick], [min_y, max_y], c=, marker=, zorder=2) for tick in minor_ticks_y: axes[ch].plot([min_x, max_x], [tick, tick], c=, marker=, zorder=1) for tick in major_ticks_y: axes[ch].plot([min_x, max_x], [tick, tick], c=, marker=, zorder=2) axes[ch].set_xlim(auto_xlims) axes[ch].set_ylim(auto_ylims)
Add ecg grids to the axes
5,410
def _get_video(edx_video_id): try: return Video.objects.prefetch_related("encoded_videos", "courses").get(edx_video_id=edx_video_id) except Video.DoesNotExist: error_message = u"Video not found for edx_video_id: {0}".format(edx_video_id) raise ValVideoNotFoundError(error_message) except Exception: error_message = u"Could not get edx_video_id: {0}".format(edx_video_id) logger.exception(error_message) raise ValInternalError(error_message)
Get a Video instance, prefetching encoded video and course information. Raises ValVideoNotFoundError if the video cannot be retrieved.
5,411
def cross_list_section(self, id, new_course_id): path = {} data = {} params = {} path["id"] = id path["new_course_id"] = new_course_id self.logger.debug("POST /api/v1/sections/{id}/crosslist/{new_course_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/sections/{id}/crosslist/{new_course_id}".format(**path), data=data, params=params, single_item=True)
Cross-list a Section. Move the Section to another course. The new course may be in a different account (department), but must belong to the same root account (institution).
5,412
def load(filename): if os.path.isfile(filename): with open(filename) as handle: return yaml_load(handle, Loader=Loader) raise RuntimeError("File %s doesn't exist!" % filename)
Load yaml file with specific include loader.
5,413
def unquote (s, matching=False): if not s: return s if len(s) < 2: return s if matching: if s[0] in ("\""): s = s[1:] if s[-1] in ("\"'"): s = s[:-1] return s
Remove leading and ending single and double quotes. The quotes need to match if matching is True. Only one quote from each end will be stripped. @return: if s evaluates to False, return s as is, else return string with stripped quotes @rtype: unquoted string, or s unchanged if it is evaluting to False
5,414
def _error(self, exc_info): if self.exc_info: if self.traceback: return exc_info return exc_info[:2] return exc_info[1]
Retrieves the error info
5,415
def get_apis(self): out = set(x.api for x in self.types.values() if x.api) for ft in self.features.values(): out.update(ft.get_apis()) for ext in self.extensions.values(): out.update(ext.get_apis()) return out
Returns set of api names referenced in this Registry :return: set of api name strings
5,416
def list_all_return_line_items(cls, **kwargs): kwargs[] = True if kwargs.get(): return cls._list_all_return_line_items_with_http_info(**kwargs) else: (data) = cls._list_all_return_line_items_with_http_info(**kwargs) return data
List ReturnLineItems Return a list of ReturnLineItems This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_return_line_items(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[ReturnLineItem] If the method is called asynchronously, returns the request thread.
5,417
def rpc(commands, method=, **kwargs): show version nxos_api_kwargs = __salt__[](, {}) nxos_api_kwargs.update(**kwargs) if in __proxy__ and __salt__[]() == : return __proxy__[](commands, method=method, **nxos_api_kwargs) nxos_api_kwargs = __salt__[](, {}) nxos_api_kwargs.update(**kwargs) return __utils__[](commands, method=method, **nxos_api_kwargs)
Execute an arbitrary RPC request via the Nexus API. commands The commands to be executed. method: ``cli`` The type of the response, i.e., raw text (``cli_ascii``) or structured document (``cli``). Defaults to ``cli`` (structured data). transport: ``https`` Specifies the type of connection transport to use. Valid values for the connection are ``http``, and ``https``. host: ``localhost`` The IP address or DNS host name of the connection device. username: ``admin`` The username to pass to the device to authenticate the NX-API connection. password The password to pass to the device to authenticate the NX-API connection. port The TCP port of the endpoint for the NX-API connection. If this keyword is not specified, the default value is automatically determined by the transport type (``80`` for ``http``, or ``443`` for ``https``). timeout: ``60`` Time in seconds to wait for the device to respond. Default: 60 seconds. verify: ``True`` Either a boolean, in which case it controls whether we verify the NX-API TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. CLI Example: .. code-block:: bash salt-call --local nxps_api.rpc 'show version'
5,418
def check_conf_enabled(conf): ** if conf.endswith(): conf_file = conf else: conf_file = .format(conf) return os.path.islink(.format(conf_file))
.. versionadded:: 2016.3.0 Checks to see if the specific conf symlink is in /etc/apache2/conf-enabled. This will only be functional on Debian-based operating systems (Ubuntu, Mint, etc). CLI Examples: .. code-block:: bash salt '*' apache.check_conf_enabled security salt '*' apache.check_conf_enabled security.conf
5,419
def cashFlowDF(symbol, token=, version=): val = cashFlow(symbol, token, version) df = pd.io.json.json_normalize(val, , ) _toDatetime(df) _reindex(df, ) df.replace(to_replace=[None], value=np.nan, inplace=True) return df
Pulls cash flow data. Available quarterly (4 quarters) or annually (4 years). https://iexcloud.io/docs/api/#cash-flow Updates at 8am, 9am UTC daily Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
5,420
def stop(self): with self._lock: self._stop_event.set() self._shell_event.clear() if self._context is not None: self._context.remove_service_listener(self) self.clear_shell() self._context = None
Clears all members
5,421
def getItemXML(): xml="" for item in range(NUM_GOALS): x = str(random.randint(old_div(-ARENA_WIDTH,2),old_div(ARENA_WIDTH,2))) z = str(random.randint(old_div(-ARENA_BREADTH,2),old_div(ARENA_BREADTH,2))) xml += + x + + z + + GOAL_TYPE + return xml
Build an XML string that contains some randomly positioned goal items
5,422
def get_summaries(client, filter=None): try: index = 0 while True: rb = _RightBarPage(client, index) summaries = rb.summaries() if filter is not None: summaries = filter.filter(summaries) for summary in summaries: yield summary index += len(summaries) except StopIteration: pass
Generate presentation summaries in a reverse chronological order. A filter class can be supplied to filter summaries or bound the fetching process.
5,423
def gmdaOnes(shape, dtype, mask=None, numGhosts=1): res = GhostedMaskedDistArray(shape, dtype) res.mask = mask res.setNumberOfGhosts(numGhosts) res[:] = 1 return res
ghosted distributed array one constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0)
5,424
def read_ipv6_route(self, length, extension): if length is None: length = len(self) _next = self._read_protos(1) _hlen = self._read_unpack(1) _type = self._read_unpack(1) _left = self._read_unpack(1) ipv6_route = dict( next=_next, length=(_hlen + 1) * 8, type=_ROUTING_TYPE.get(_type, ), seg_left=_left, ) _dlen = _hlen * 8 - 4 if _dlen: _func = _ROUTE_PROC.get(_type, ) _data = eval(f)(_dlen) ipv6_route.update(_data) length -= ipv6_route[] ipv6_route[] = self._read_packet(header=ipv6_route[], payload=length) if extension: self._protos = None return ipv6_route return self._decode_next_layer(ipv6_route, _next, length)
Read Routing Header for IPv6. Structure of IPv6-Route header [RFC 8200][RFC 5095]: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Hdr Ext Len | Routing Type | Segments Left | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | . . . type-specific data . . . | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 route.next Next Header 1 8 route.length Header Extensive Length 2 16 route.type Routing Type 3 24 route.seg_left Segments Left 4 32 route.data Type-Specific Data
5,425
def get_event_type(self, event_type): route_values = {} if event_type is not None: route_values[] = self._serialize.url(, event_type, ) response = self._send(http_method=, location_id=, version=, route_values=route_values) return self._deserialize(, response)
GetEventType. [Preview API] Get a specific event type. :param str event_type: :rtype: :class:`<NotificationEventType> <azure.devops.v5_0.notification.models.NotificationEventType>`
5,426
def contents(self, path, ref=None): url = self._build_url(, path, base_url=self._api) json = self._json(self._get(url, params={: ref}), 200) if isinstance(json, dict): return Contents(json, self) elif isinstance(json, list): return dict((j.get(), Contents(j, self)) for j in json) return None
Get the contents of the file pointed to by ``path``. If the path provided is actually a directory, you will receive a dictionary back of the form:: { 'filename.md': Contents(), # Where Contents an instance 'github.py': Contents(), } :param str path: (required), path to file, e.g. github3/repo.py :param str ref: (optional), the string name of a commit/branch/tag. Default: master :returns: :class:`Contents <github3.repos.contents.Contents>` or dict if successful, else None
5,427
def frame2expnum(frameid): result = {} parts = re.search(, frameid) assert parts is not None result[] = parts.group() result[] = parts.group() result[] = parts.group() return result
Given a standard OSSOS frameid return the expnum, version and ccdnum as a dictionary.
5,428
def ls(self, startswith=None): logger.info( % startswith) startswith = unicode(startswith or ) tables = sorted(name for name in self.db_tables if name.startswith(startswith)) return tables
List all cubes available to the calling client. :param startswith: string to use in a simple "startswith" query filter :returns list: sorted list of cube names
5,429
def facade(factory): wrapper = FacadeDescriptor(factory.__name__, factory) return update_wrapper(wrapper, factory)
Declare a method as a facade factory.
5,430
def gaussian_convolve (maj1, min1, pa1, maj2, min2, pa2): c1 = np.cos (pa1) s1 = np.sin (pa1) c2 = np.cos (pa2) s2 = np.sin (pa2) a = (maj1*c1)**2 + (min1*s1)**2 + (maj2*c2)**2 + (min2*s2)**2 b = (maj1*s1)**2 + (min1*c1)**2 + (maj2*s2)**2 + (min2*c2)**2 g = 2 * ((min1**2 - maj1**2) * s1 * c1 + (min2**2 - maj2**2) * s2 * c2) s = a + b t = np.sqrt ((a - b)**2 + g**2) maj3 = np.sqrt (0.5 * (s + t)) min3 = np.sqrt (0.5 * (s - t)) if abs (g) + abs (a - b) == 0: pa3 = 0. else: pa3 = 0.5 * np.arctan2 (-g, a - b) return maj3, min3, pa3
Convolve two Gaussians analytically. Given the shapes of two 2-dimensional Gaussians, this function returns the shape of their convolution. Arguments: maj1 Major axis of input Gaussian 1. min1 Minor axis of input Gaussian 1. pa1 Orientation angle of input Gaussian 1, in radians. maj2 Major axis of input Gaussian 2. min2 Minor axis of input Gaussian 2. pa2 Orientation angle of input Gaussian 2, in radians. The return value is ``(maj3, min3, pa3)``, with the same format as the input arguments. The axes can be measured in any units, so long as they're consistent. Implementation copied from MIRIAD’s ``gaufac``.
5,431
def _get_goslimids_norel(self, dagslim): go_slims = set() go2obj = self.gosubdag.go2obj for goid in dagslim: goobj = go2obj[goid] if not goobj.relationship: go_slims.add(goobj.id) return go_slims
Get all GO slim GO IDs that do not have a relationship.
5,432
def create_guest_screen_info(self, display, status, primary, change_origin, origin_x, origin_y, width, height, bits_per_pixel): if not isinstance(display, baseinteger): raise TypeError("display can only be an instance of type baseinteger") if not isinstance(status, GuestMonitorStatus): raise TypeError("status can only be an instance of type GuestMonitorStatus") if not isinstance(primary, bool): raise TypeError("primary can only be an instance of type bool") if not isinstance(change_origin, bool): raise TypeError("change_origin can only be an instance of type bool") if not isinstance(origin_x, baseinteger): raise TypeError("origin_x can only be an instance of type baseinteger") if not isinstance(origin_y, baseinteger): raise TypeError("origin_y can only be an instance of type baseinteger") if not isinstance(width, baseinteger): raise TypeError("width can only be an instance of type baseinteger") if not isinstance(height, baseinteger): raise TypeError("height can only be an instance of type baseinteger") if not isinstance(bits_per_pixel, baseinteger): raise TypeError("bits_per_pixel can only be an instance of type baseinteger") guest_screen_info = self._call("createGuestScreenInfo", in_p=[display, status, primary, change_origin, origin_x, origin_y, width, height, bits_per_pixel]) guest_screen_info = IGuestScreenInfo(guest_screen_info) return guest_screen_info
Make a IGuestScreenInfo object with the provided parameters. in display of type int The number of the guest display. in status of type :class:`GuestMonitorStatus` @c True, if this guest screen is enabled, @c False otherwise. in primary of type bool Whether this guest monitor must be primary. in change_origin of type bool @c True, if the origin of the guest screen should be changed, @c False otherwise. in origin_x of type int The X origin of the guest screen. in origin_y of type int The Y origin of the guest screen. in width of type int The width of the guest screen. in height of type int The height of the guest screen. in bits_per_pixel of type int The number of bits per pixel of the guest screen. return guest_screen_info of type :class:`IGuestScreenInfo` The created object.
5,433
def flip(self): tmp = self.A.xyz self.A = self.B self.B = tmp
:returns: None Swaps the positions of A and B.
5,434
def _get_argument(self, argument_node): argument = FritzActionArgument() argument.name = argument_node.find(self.nodename()).text argument.direction = argument_node.find(self.nodename()).text rsv = argument_node.find(self.nodename()).text argument.data_type = self.state_variables.get(rsv, None) return argument
Returns a FritzActionArgument instance for the given argument_node.
5,435
def cublasSdgmm(handle, mode, m, n, A, lda, x, incx, C, ldc): status = _libcublas.cublasSdgmm(handle, _CUBLAS_SIDE[mode], m, n, int(A), lda, int(x), incx, int(C), ldc) cublasCheckStatus(status)
Matrix-diagonal matrix product for real general matrix.
5,436
def get_braintree_gateway_by_id(cls, braintree_gateway_id, **kwargs): kwargs[] = True if kwargs.get(): return cls._get_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs) else: (data) = cls._get_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs) return data
Find BraintreeGateway Return single instance of BraintreeGateway by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_braintree_gateway_by_id(braintree_gateway_id, async=True) >>> result = thread.get() :param async bool :param str braintree_gateway_id: ID of braintreeGateway to return (required) :return: BraintreeGateway If the method is called asynchronously, returns the request thread.
5,437
def graph(data): title = data[] + + data[] + plt.title(title) plt.xlabel() plt.ylabel() rf,ef=graphdata(data) col=[, , ] for i in range(len(rf)): x,y=ef[i],rf[i] k = i + 1 plt.plot(x, y,color=col[i%3]) x1, x2, y1, y2 = plt.axis() y2 = 10 if y1 > 7: y1 = 7 plt.axis([x1, x2, y1, y2]) plt.show()
Draws graph of rating vs episode number
5,438
def format_BLB(): rc("figure", facecolor="white") rc(, family = , size=10) rc(, labelsize=10) rc(, labelsize=10) rc(, linewidth=1) rc(, size=4, width=1) rc(, size=2, width=1) rc(, size=4, width=1) rc(, size=2, width=1)
Sets some formatting options in Matplotlib.
5,439
def insert(self, table, insert_obj, ignore=True): if isinstance(insert_obj, pd.DataFrame): if insert_obj.empty: raise ValueError() insert_obj = insert_obj.to_dict(orient=) elif not isinstance(insert_obj, list): raise ValueError( f"The {reprlib.repr(insert_obj)} must be list of dicts type!") ignore_str = if ignore else return self._session.execute( table.__table__.insert().prefix_with(ignore_str), insert_obj)
[insert bulk data] Arguments: table {[DeclarativeMeta cls]} -- [reflection of table] insert_obj {[pd.DataFrame or list of dicts]} -- [insert_obj] Keyword Arguments: ignore {bool} -- [wether ignore exception or not] (default: {True}) Raises: ValueError -- [f"The {reprlib.repr(insert_obj)} must be list of dicts type!"] Returns: [type] -- [description]
5,440
def _params_extend(params, _ignore_name=False, **kwargs): namenamevisible_namefirstnamenames docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see modules docstring) :return: Extended params dictionary with parameters. _namefirstnamenamefirstnamevisible_namenamevisible_name') return params
Extends the params dictionary by values from keyword arguments. .. versionadded:: 2016.3.0 :param params: Dictionary with parameters for zabbix API. :param _ignore_name: Salt State module is passing first line as 'name' parameter. If API uses optional parameter 'name' (for ex. host_create, user_create method), please use 'visible_name' or 'firstname' instead of 'name' to not mess these values. :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: Extended params dictionary with parameters.
5,441
def _generate(self, message): raw_params = {"INPUT_TEXT" : message.encode(), "INPUT_TYPE" : self.input_type, "OUTPUT_TYPE" : self.output_type, "LOCALE" : self._locale, "AUDIO" : self.audio, "VOICE" : self._voice, } params = urlencode(raw_params) headers = {} logging.debug( % repr(raw_params)) conn = httplib.HTTPConnection(self._host, self._port) conn.request("POST", "/process", params, headers) response = conn.getresponse() if response.status != 200: logging.error(response.getheaders()) raise Exception ("{0}: {1}".format(response.status, response.reason)) return response.read()
Given a message in message, return a response in the appropriate format.
5,442
def get_provider(): global _provider if _provider is None: if sys.platform.startswith(): from .bluez_dbus.provider import BluezProvider _provider = BluezProvider() elif sys.platform == : from .corebluetooth.provider import CoreBluetoothProvider _provider = CoreBluetoothProvider() else: raise RuntimeError(.format(sys.platform)) return _provider
Return an instance of the BLE provider for the current platform.
5,443
def add_file_normal(f, targetdir, generator,script, source): basename = os.path.basename(f) if targetdir != ".": relativepath = os.path.join(targetdir, basename) else: relativepath = basename relpath = os.path.relpath(f, os.getcwd()) filetype = if script: filetype = if generator: filetype = update = OrderedDict([ (, filetype), (, generator), (, relativepath), (, ""), (, source), (, f), (, relpath) ]) update = annotate_record(update) return (basename, update)
Add a normal file including its source
5,444
def files(self, *, bundle: str=None, tags: List[str]=None, version: int=None, path: str=None) -> models.File: query = self.File.query if bundle: query = (query.join(self.File.version, self.Version.bundle) .filter(self.Bundle.name == bundle)) if tags: query = ( query.join(self.File.tags) .filter(self.Tag.name.in_(tags)) .group_by(models.File.id) .having(func.count(models.Tag.name) == len(tags)) ) if version: query = query.join(self.File.version).filter(self.Version.id == version) if path: query = query.filter_by(path=path) return query
Fetch files from the store.
5,445
def any_unique(keys, axis=semantics.axis_default): index = as_index(keys, axis) return np.any(index.count == 1)
returns true if any of the keys is unique
5,446
def json_changepass(self): code1234code12 post_data = self.get_post_data() check_usr_status = MUser.check_user(self.userinfo.uid, post_data[]) if check_usr_status == 1: user_create_status = self.__check_valid_pass(post_data) if not user_create_status[]: return json.dump(user_create_status, self) form_pass = SumFormPass(self.request.arguments) if form_pass.validate(): MUser.update_pass(self.userinfo.uid, post_data[]) return json.dump(user_create_status, self) return json.dump(user_create_status, self) return False
The first char of 'code' stands for the different field. '1' for user_name '2' for user_email '3' for user_pass '4' for user_role The seconde char of 'code' stands for different status. '1' for invalide '2' for already exists.
5,447
def dataframe(self): frames = [] for game in self.__iter__(): df = game.dataframe if df is not None: frames.append(df) if frames == []: return None return pd.concat(frames)
Returns a pandas DataFrame where each row is a representation of the Game class. Rows are indexed by the boxscore string.
5,448
def get_devices(device_type: DeviceType) -> Iterator[str]: for device in BASE_PATH.iterdir(): with open(str(Path(device, ))) as type_file: if type_file.readline().strip() == device_type.value: yield device.name
Gets names of power devices of the specified type. :param str device_type: the type of the devices to retrieve :return: the device names :rtype: Iterator[str]
5,449
def expand_requirement(request, paths=None): if not in request: return request from rez.vendor.version.version import VersionRange from rez.vendor.version.requirement import Requirement from rez.packages_ import get_latest_package from uuid import uuid4 wildcard_map = {} expanded_versions = {} request_ = request while "**" in request_: uid = "_%s_" % uuid4().hex request_ = request_.replace("**", uid, 1) wildcard_map[uid] = "**" while in request_: uid = "_%s_" % uuid4().hex request_ = request_.replace(, uid, 1) wildcard_map[uid] = req = Requirement(request_, invalid_bound_error=False) def expand_version(version): rank = len(version) wildcard_found = False while version and str(version[-1]) in wildcard_map: token = wildcard_map[str(version[-1])] version = version.trim(len(version) - 1) if token == "**": if wildcard_found: return None else: wildcard_found = True rank = 0 break wildcard_found = True if not wildcard_found: return None range_ = VersionRange(str(version)) package = get_latest_package(name=req.name, range_=range_, paths=paths) if package is None: return version if rank: return package.version.trim(rank) else: return package.version def visit_version(version): for v, expanded_v in expanded_versions.iteritems(): if version == v.next(): return expanded_v.next() version_ = expand_version(version) if version_ is None: return None expanded_versions[version] = version_ return version_ if req.range_ is not None: req.range_.visit_versions(visit_version) result = str(req) expanded_req = Requirement(result) return str(expanded_req)
Expands a requirement string like 'python-2.*', 'foo-2.*+<*', etc. Wildcards are expanded to the latest version that matches. There is also a special wildcard '**' that will expand to the full version, but it cannot be used in combination with '*'. Wildcards MUST placehold a whole version token, not partial - while 'foo-2.*' is valid, 'foo-2.v*' is not. Wildcards MUST appear at the end of version numbers - while 'foo-1.*.*' is valid, 'foo-1.*.0' is not. It is possible that an expansion will result in an invalid request string (such as 'foo-2+<2'). The appropriate exception will be raised if this happens. Examples: >>> print expand_requirement('python-2.*') python-2.7 >>> print expand_requirement('python==2.**') python==2.7.12 >>> print expand_requirement('python<**') python<3.0.5 Args: request (str): Request to expand, eg 'python-2.*' paths (list of str, optional): paths to search for package families, defaults to `config.packages_path`. Returns: str: Expanded request string.
5,450
def IntegerAbs(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: return Integer(context.jvm_view().IntegerAbsVertex, label, cast_to_integer_vertex(input_vertex))
Takes the absolute value of a vertex :param input_vertex: the vertex
5,451
def add_filter(self, component, filter_group="pyxley-filter"): if getattr(component, "name") != "Filter": raise Exception("Component is not an instance of Filter") if filter_group not in self.filters: self.filters[filter_group] = [] self.filters[filter_group].append(component)
Add a filter to the layout.
5,452
def GetTaskPendingMerge(self, current_task): next_task = self._tasks_pending_merge.PeekTask() if not next_task: return None if current_task and next_task.merge_priority > current_task.merge_priority: return None with self._lock: next_task = self._tasks_pending_merge.PopTask() self._tasks_merging[next_task.identifier] = next_task return next_task
Retrieves the first task that is pending merge or has a higher priority. This function will check if there is a task with a higher merge priority than the current_task being merged. If so, that task with the higher priority is returned. Args: current_task (Task): current task being merged or None if no such task. Returns: Task: the next task to merge or None if there is no task pending merge or with a higher priority.
5,453
def style_similarity(page1, page2): classes_page1 = get_classes(page1) classes_page2 = get_classes(page2) return jaccard_similarity(classes_page1, classes_page2)
Computes CSS style Similarity between two DOM trees A = classes(Document_1) B = classes(Document_2) style_similarity = |A & B| / (|A| + |B| - |A & B|) :param page1: html of the page1 :param page2: html of the page2 :return: Number between 0 and 1. If the number is next to 1 the page are really similar.
5,454
def _normalize_str(self, string): if string: if not isinstance(string, str): string = str(string, , ) return unicodedata.normalize(, string).encode( , ).decode() return
Remove special characters and strip spaces
5,455
def _finalized_internal(self, context, pipeline_key, root_pipeline_key, caller_output, aborted): result_status = _PipelineRecord.RUN if aborted: result_status = _PipelineRecord.ABORTED self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, result_status) logging.debug(, self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) try: self.finalized() except NotImplementedError: pass
Used by the Pipeline evaluator to finalize this Pipeline.
5,456
def is_valid_sound_tuple(sound_tuple, final_form=True): sound_tuple = SoundTuple._make([s.lower() for s in sound_tuple]) if not sound_tuple.vowel: result = True elif final_form: result = \ has_valid_consonants(sound_tuple) and \ has_valid_vowel(sound_tuple) and \ has_valid_accent(sound_tuple) else: result = \ has_valid_consonants(sound_tuple) and \ has_valid_vowel_non_final(sound_tuple) return result
Check if a character combination complies to Vietnamese phonology. The basic idea is that if one can pronunce a sound_tuple then it's valid. Sound tuples containing consonants exclusively (almost always abbreviations) are also valid. Input: sound_tuple - a SoundTuple final_form - whether the tuple represents a complete word Output: True if the tuple seems to be Vietnamese, False otherwise.
5,457
def _get_max_size(parts, size=1): max_group_size = 0 for part in parts: if isinstance(part, list): group_size = 0 for input_group in part: group_size += 1 if group_size > max_group_size: max_group_size = group_size magic_size = _get_magic_size(parts) return max_group_size * magic_size
Given a list of parts, find the maximum number of commands contained in it.
5,458
def get_new_oids(self): table = self.lconfig.get() _oid = self.lconfig.get() if is_array(_oid): _oid = _oid[0] last_id = self.container.get_last_field(field=) ids = [] if last_id: try: last_id = float(last_id) where = "%s.%s > %s" % (table, _oid, last_id) except (TypeError, ValueError): where = "%s.%s > " % (table, _oid, last_id) ids = self.sql_get_oids(where) return ids
Returns a list of unique oids that have not been extracted yet. Essentially, a diff of distinct oids in the source database compared to cube.
5,459
def from_file(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening=, group_marker_closing=): source_string = open(source, ).read() return cls.from_string(source_string, distance_weights, merge_same_words, group_marker_opening=group_marker_opening, group_marker_closing=group_marker_closing)
Read a string from a file and derive a ``Graph`` from it. This is a convenience function for opening a file and passing its contents to ``Graph.from_string()`` (see that for more detail) Args: source (str): the file to read and derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. See ``Graph.from_string`` for more detail. merge_same_words (bool): whether nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. Returns: Graph Example: >>> graph = Graph.from_file('cage.txt') # doctest: +SKIP >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'poetry i have nothing to say and i'
5,460
def get_most_recent_release(self, group, artifact, remote=False): url = self._base_url + params = {: group, : artifact, : self._repo, : int(remote)} self._logger.debug("Using latest version API at %s - params %s", url, params) response = self._session.get(url, params=params) response.raise_for_status() return response.text.strip()
Get the version number of the most recent release (non-integration version) of a particular group and artifact combination. :param str group: Group of the artifact to get the version of :param str artifact: Name of the artifact to get the version of :param bool remote: Should remote repositories be searched to find the latest version? Note this can make the request much slower. Default is false. :return: Version number of the most recent release :rtype: str :raises requests.exceptions.HTTPError: For any non-success HTTP responses from the Artifactory API.
5,461
def StaticAdd(cls, collection_urn, rdf_value, timestamp=None, suffix=None, mutation_pool=None): if not isinstance(rdf_value, cls.RDF_TYPE): raise ValueError("This collection only accepts values of type %s." % cls.RDF_TYPE.__name__) if mutation_pool is None: raise ValueError("Mutation pool can't be none.") if timestamp is None: timestamp = rdfvalue.RDFDatetime.Now() if isinstance(timestamp, rdfvalue.RDFDatetime): timestamp = timestamp.AsMicrosecondsSinceEpoch() if not rdf_value.age: rdf_value.age = rdfvalue.RDFDatetime.Now() if not isinstance(collection_urn, rdfvalue.RDFURN): collection_urn = rdfvalue.RDFURN(collection_urn) _, timestamp, suffix = mutation_pool.CollectionAddItem( collection_urn, rdf_value, timestamp, suffix=suffix) return timestamp, suffix
Adds an rdf value to a collection. Adds an rdf value to a collection. Does not require that the collection be open. NOTE: The caller is responsible for ensuring that the collection exists and is of the correct type. Args: collection_urn: The urn of the collection to add to. rdf_value: The rdf value to add to the collection. timestamp: The timestamp (in microseconds) to store the rdf value at. Defaults to the current time. suffix: A 'fractional timestamp' suffix to reduce the chance of collisions. Defaults to a random number. mutation_pool: A MutationPool object to write to. Returns: The pair (timestamp, suffix) which identifies the value within the collection. Raises: ValueError: rdf_value has unexpected type.
5,462
def list_since(self, message_id, limit=None): return self.list(since_id=message_id, limit=limit)
Return a page of group messages created since a message. This is used to fetch the most recent messages after another. There may exist messages between the one given and the ones returned. Use :func:`list_after` to retrieve newer messages without skipping any. :param str message_id: the ID of a message :param int limit: maximum number of messages per page :return: group messages :rtype: :class:`~groupy.pagers.MessageList`
5,463
def get_rejection_reasons(self, keyword=None): keys = [, ] if keyword is None: return sum(map(self.get_rejection_reasons, keys), []) if keyword not in keys: return [] rejection_reasons = self.context.getRejectionReasons() rejection_reasons = rejection_reasons and rejection_reasons[0] or {} if keyword == : return rejection_reasons.get(keyword, ) and [rejection_reasons.get(keyword, )] or [] return rejection_reasons.get(keyword, [])
Returns a list with the rejection reasons as strings :param keyword: set of rejection reasons to be retrieved. Possible values are: - 'selected': Get, amongst the set of predefined reasons, the ones selected - 'other': Get the user free-typed reason for rejection - None: Get all rejection reasons :return: list of rejection reasons as strings or an empty list
5,464
def wait_on_receipt(self): with self.receipt_condition: while not self.received: self.receipt_condition.wait() self.received = False
Wait until we receive a message receipt.
5,465
def set_extra_info(self, username, extra_info): url = self._get_extra_info_url(username) make_request(url, method=, body=extra_info, timeout=self.timeout)
Set extra info for the given user. Raise a ServerError if an error occurs in the request process. @param username The username for the user to update. @param info The extra info as a JSON encoded string, or as a Python dictionary like object.
5,466
def subarc_between_points(self, p_from=None, p_to=None): a_from = self.point_as_angle(p_from) if p_from is not None else None a_to = self.point_as_angle(p_to) if p_to is not None else None return self.subarc(a_from, a_to)
Given two points on the arc, extract a sub-arc between those points. No check is made to verify the points are actually on the arc. It is basically a wrapper around subarc(point_as_angle(p_from), point_as_angle(p_to)). Either p_from or p_to may be None to denote first or last arc endpoints. >>> a = Arc((0, 0), 1, 0, 90, True) >>> a.subarc_between_points((1, 0), (np.cos(np.pi/4), np.sin(np.pi/4))) Arc([0.000, 0.000], 1.000, 0.000, 45.000, True, degrees=45.000) >>> a.subarc_between_points(None, None) Arc([0.000, 0.000], 1.000, 0.000, 90.000, True, degrees=90.000) >>> a.subarc_between_points((np.cos(np.pi/4), np.sin(np.pi/4))) Arc([0.000, 0.000], 1.000, 45.000, 90.000, True, degrees=45.000)
5,467
def sorted_bases(bases): ret = [] for base in bases: lst = _bases(base) if not ret: ret = lst elif not any(b in ret for b in lst): ret += lst else: buf = [] for b in lst: if b in ret: if buf: ret = graft(ret, buf, ret.index(b)) buf = [] else: buf.append(b) if buf: ret += buf return ret
If a class subclasses each class in bases (in that order), then this function returns the would-be python mro for the created class, minus <object>.
5,468
def read_h5ad(filename, backed: Optional[str] = None, chunk_size: int = 6000): if isinstance(backed, bool): backed = if backed else None warnings.warn( "In a future version, read_h5ad will no longer explicitly support " "boolean arguments. Specify the read mode, or leave `backed=None`.", DeprecationWarning, ) if backed: return AnnData(filename=filename, filemode=backed) else: constructor_args = _read_args_from_h5ad(filename=filename, chunk_size=chunk_size) X = constructor_args[0] dtype = None if X is not None: dtype = X.dtype.name return AnnData(*_read_args_from_h5ad(filename=filename, chunk_size=chunk_size), dtype=dtype)
Read ``.h5ad``-formatted hdf5 file. Parameters ---------- filename File name of data file. backed : {``None``, ``'r'``, ``'r+'``} If ``'r'``, load :class:`~anndata.AnnData` in ``backed`` mode instead of fully loading it into memory (`memory` mode). If you want to modify backed attributes of the AnnData object, you need to choose ``'r+'``. chunk_size Used only when loading sparse dataset that is stored as dense. Loading iterates through chunks of the dataset of this row size until it reads the whole dataset. Higher size means higher memory consumption and higher loading speed.
5,469
def no_login_required(func): @functools.wraps(func) def decorated_view(*args, **kwargs): return func(*args, **kwargs) return decorated_view
Dummy decorator. @login_required will inspect the method to look for this decorator Use this decorator when you want do not require login in a "@login_required" class/method :param func: :return:
5,470
def verify(self, secret_key): verification_input = NotificationMessage.SERVICE_NAME verification_input += NotificationMessage.OPERATION_NAME verification_input += self.timestamp h = hmac.new(key=secret_key, digestmod=sha) h.update(verification_input) signature_calc = base64.b64encode(h.digest()) return self.signature == signature_calc
Verifies the authenticity of a notification message. TODO: This is doing a form of authentication and this functionality should really be merged with the pluggable authentication mechanism at some point.
5,471
def manage_file(name, sfn, ret, source, source_sum, user, group, mode, attrs, saltenv, backup, makedirs=False, template=None, show_changes=True, contents=None, dir_mode=None, follow_symlinks=True, skip_verify=False, keep_mode=False, encoding=None, encoding_errors=, seuser=None, serole=None, setype=None, serange=None, **kwargs): strictstrict*{}{hash_type: , : <md5sum>}755 name = os.path.expanduser(name) if not ret: ret = {: name, : {}, : , : True} if source_sum and ( in source_sum): source_sum[] = source_sum[].lower() if source: if not sfn: sfn = __salt__[](source, saltenv) if not sfn: return _error( ret, {0}\.format(source)) htype = source_sum.get(, __opts__[]) source_sum = { : htype, : get_hash(sfn, form=htype) } if keep_mode: if _urlparse(source).scheme in (, , ): try: mode = __salt__[](source, saltenv=saltenv, octal=True) except Exception as exc: log.warning(, sfn, exc) if os.path.isfile(name) or os.path.islink(name): if os.path.islink(name) and follow_symlinks: real_name = os.path.realpath(name) else: real_name = name if source and not (not follow_symlinks and os.path.islink(real_name)): name_sum = get_hash(real_name, source_sum.get(, __opts__[])) else: name_sum = None if source and (name_sum is None or source_sum.get(, __opts__[]) != name_sum): if not sfn: sfn = __salt__[](source, saltenv) if not sfn: return _error( ret, {0}\.format(source)) if not skip_verify \ and _urlparse(source).scheme != : dl_sum = get_hash(sfn, source_sum[]) if dl_sum != source_sum[]: ret[] = ( source_hash\ source_hash_name\.format( source_sum[], source, source_sum[], dl_sum ) ) ret[] = False return ret if __salt__[](): ret[][] = elif not show_changes: ret[][] = else: try: ret[][] = get_diff( real_name, sfn, show_filenames=False) except CommandExecutionError as exc: ret[][] = exc.strerror try: salt.utils.files.copyfile(sfn, real_name, __salt__[](backup), __opts__[]) except IOError as io_error: __clean_tmp(sfn) return _error( ret, .format(io_error)) if contents is not None: tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) if salt.utils.platform.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) with salt.utils.files.fopen(tmp, ) as tmp_: if encoding: log.debug(, encoding) tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors)) else: tmp_.write(salt.utils.stringutils.to_bytes(contents)) try: differences = get_diff( real_name, tmp, show_filenames=False, show_changes=show_changes, template=True) except CommandExecutionError as exc: ret.setdefault(, []).append( .format(exc.strerror) ) differences = if differences: ret[][] = differences try: salt.utils.files.copyfile(tmp, real_name, __salt__[](backup), __opts__[]) except IOError as io_error: __clean_tmp(tmp) return _error( ret, .format(io_error)) __clean_tmp(tmp) if os.path.islink(name) and not follow_symlinks: if not sfn: sfn = __salt__[](source, saltenv) if not sfn: return _error( ret, {0}\.format(source)) if not skip_verify and _urlparse(source).scheme != : dl_sum = get_hash(sfn, source_sum[]) if dl_sum != source_sum[]: ret[] = ( .format( source_sum[], name, source_sum[], dl_sum ) ) ret[] = False return ret try: salt.utils.files.copyfile(sfn, name, __salt__[](backup), __opts__[]) except IOError as io_error: __clean_tmp(sfn) return _error( ret, .format(io_error)) ret[][] = \ if salt.utils.platform.is_windows(): ret = check_perms( path=name, ret=ret, owner=kwargs.get(), grant_perms=kwargs.get(), deny_perms=kwargs.get(), inheritance=kwargs.get(, True), reset=kwargs.get(, False)) else: ret, _ = check_perms(name, ret, user, group, mode, attrs, follow_symlinks, seuser=seuser, serole=serole, setype=setype, serange=serange) if ret[]: ret[] = .format( salt.utils.data.decode(name) ) elif not ret[] and ret[]: ret[] = .format( salt.utils.data.decode(name) ) if sfn: __clean_tmp(sfn) return ret else: contain_dir = os.path.dirname(name) def _set_mode_and_make_dirs(name, dir_mode, mode, user, group): if salt.utils.platform.is_windows(): drive, _ = os.path.splitdrive(name) if drive and not os.path.exists(drive): __clean_tmp(sfn) return _error(ret, .format(drive)) if dir_mode is None and mode is not None: if sfn: __clean_tmp(sfn) return ret
Checks the destination against what was retrieved with get_managed and makes the appropriate modifications (if necessary). name location to place the file sfn location of cached file on the minion This is the path to the file stored on the minion. This file is placed on the minion using cp.cache_file. If the hash sum of that file matches the source_sum, we do not transfer the file to the minion again. This file is then grabbed and if it has template set, it renders the file to be placed into the correct place on the system using salt.files.utils.copyfile() ret The initial state return data structure. Pass in ``None`` to use the default structure. source file reference on the master source_sum sum hash for source user user owner group group owner backup backup_mode attrs attributes to be set on file: '' means remove all of them .. versionadded:: 2018.3.0 makedirs make directories if they do not exist template format of templating show_changes Include diff in state return contents: contents to be placed in the file dir_mode mode for directories created with makedirs skip_verify : False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 keep_mode : False If ``True``, and the ``source`` is a file from the Salt fileserver (or a local file on the minion), the mode of the destination file will be set to the mode of the source file. .. note:: keep_mode does not work with salt-ssh. As a consequence of how the files are transferred to the minion, and the inability to connect back to the master with salt-ssh, salt is unable to stat the file as it exists on the fileserver and thus cannot mirror the mode on the salt-ssh minion encoding If specified, then the specified encoding will be used. Otherwise, the file will be encoded using the system locale (usually UTF-8). See https://docs.python.org/3/library/codecs.html#standard-encodings for the list of available encodings. .. versionadded:: 2017.7.0 encoding_errors : 'strict' Default is ```'strict'```. See https://docs.python.org/2/library/codecs.html#codec-base-classes for the error handling schemes. .. versionadded:: 2017.7.0 seuser selinux user attribute .. versionadded:: Neon serange selinux range attribute .. versionadded:: Neon setype selinux type attribute .. versionadded:: Neon serange selinux range attribute .. versionadded:: Neon CLI Example: .. code-block:: bash salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' '' base '' .. versionchanged:: 2014.7.0 ``follow_symlinks`` option added
5,472
def _read_loop_polling(self): while self.state == : self.logger.info( + self.base_url) r = self._send_request( , self.base_url + self._get_url_timestamp()) if r is None: self.logger.warning( ) self.queue.put(None) break if r.status_code != 200: self.logger.warning( , r.status_code) self.queue.put(None) break try: p = payload.Payload(encoded_payload=r.content) except ValueError: self.logger.warning( ) self.queue.put(None) break for pkt in p.packets: self._receive_packet(pkt) self.logger.info() self.write_loop_task.join() self.logger.info() self.ping_loop_event.set() self.ping_loop_task.join() if self.state == : self._trigger_event(, run_async=False) try: connected_clients.remove(self) except ValueError: pass self._reset() self.logger.info()
Read packets by polling the Engine.IO server.
5,473
def maybe_stream(s): if isinstance(s, Stream): return s if s is None: stream = InMemStream() stream.close() return stream return s
Ensure that the given argument is a stream.
5,474
async def set_speaker_settings(self, target: str, value: str): params = {"settings": [{"target": target, "value": value}]} return await self.services["audio"]["setSpeakerSettings"](params)
Set speaker settings.
5,475
def load(cls, file_path): data = helper.read_json(file_path) return ConciseCV.from_dict(data)
Load the object from a JSON file (saved with :py:func:`ConciseCV.save`) Returns: ConciseCV: Loaded ConciseCV object.
5,476
def xyinterp(x,y,xval): if len(x) != len(y): raise ValueError("Input arrays must be equal lengths") if xval < x[0]: raise ValueError("Value %f < min(x) %f: Extrapolation unsupported"%(xval,x[0])) if xval > x[-1]: raise ValueError("Value > max(x): Extrapolation unsupported") if x.argsort().all() != N.arange(len(x)).all(): raise ValueError("Input array x must be sorted") hi = x.searchsorted(xval) lo = hi - 1 try: seg = (float(xval)-x[lo]) / (x[hi] - x[lo]) except ZeroDivisionError: seg = 0.0 yval = y[lo] + seg*(y[hi] - y[lo]) return yval
:Purpose: Interpolates y based on the given xval. x and y are a pair of independent/dependent variable arrays that must be the same length. The x array must also be sorted. xval is a user-specified value. This routine looks up xval in the x array and uses that information to properly interpolate the value in the y array. Notes ===== Use the searchsorted method on the X array to determine the bin in which xval falls; then use that information to compute the corresponding y value. See Also ======== numpy Parameters ========== x: 1D numpy array independent variable array: MUST BE SORTED y: 1D numpy array dependent variable array xval: float the x value at which you want to know the value of y Returns ======= y: float the value of y corresponding to xval Raises ====== ValueError: If arrays are unequal length; or x array is unsorted; or if xval falls outside the bounds of x (extrapolation is unsupported :version: 0.1 last modified 2006-07-06
5,477
def AFF4AddChild(self, subject, child, extra_attributes=None): precondition.AssertType(child, Text) attributes = { DataStore.AFF4_INDEX_DIR_TEMPLATE % child: [DataStore.EMPTY_DATA_PLACEHOLDER] } if extra_attributes: attributes.update(extra_attributes) self.MultiSet(subject, attributes)
Adds a child to the specified parent.
5,478
def render_table(data, headers=None): builder = HtmlBuilder() builder._render_objects(data, headers, datatype=) return builder._to_html()
Return a dictionary list formatted as a HTML table. Args: data: a list of dictionaries, one per row. headers: the keys in the dictionary to use as table columns, in order.
5,479
def distance(pos0, pos1): r0, pa0 = pos0 ra0 = r0*np.sin(pa0*np.pi/180) dec0 = r0*np.cos(pa0*np.pi/180) r1, pa1 = pos1 ra1 = r1*np.sin(pa1*np.pi/180) dec1 = r1*np.cos(pa1*np.pi/180) dra = (ra1 - ra0) ddec = (dec1 - dec0) return np.sqrt(dra**2 + ddec**2)
distance between two positions defined by (separation, PA)
5,480
def _get_api_version(self): url = "{base_url}/api/server_info".format(base_url=self._base_url()) server_info = self._make_request(url=url, method="get") return server_info["latest_api_version"]
Fetches the most recent API version Returns: str
5,481
def expiration_time(self): logging_forgotten_time = configuration.behavior.login_forgotten_seconds if logging_forgotten_time <= 0: return None now = timezone.now() delta = now - self.modified time_remaining = logging_forgotten_time - delta.seconds return time_remaining
Returns the time until this access attempt is forgotten.
5,482
def _reroot(self): rerooter = Rerooter() self.tree = rerooter.reroot_by_tree(self.reference_tree, self.tree)
Run the re-rooting algorithm in the Rerooter class.
5,483
def make_a_copy(self, location=None): import shutil destination = backup_name(location) shutil.copyfile(location, destination)
Creates a backup of the file specified in the location. The backup filename appends a .bak.NO where number is a number that is not yet used in the backup directory. TODO: This function should be moved to another file maybe XShell :param location: the location of the file to be backed up
5,484
def real_out_dtype(self): if self.__real_out_dtype is None: raise AttributeError( .format(dtype_repr(self.scalar_out_dtype))) else: return self.__real_out_dtype
The real dtype corresponding to this space's `out_dtype`.
5,485
def get_scenario_data(scenario_id,**kwargs): user_id = kwargs.get() scenario_data = db.DBSession.query(Dataset).filter(Dataset.id==ResourceScenario.dataset_id, ResourceScenario.scenario_id==scenario_id).options(joinedload_all()).distinct().all() for sd in scenario_data: if sd.hidden == : try: sd.check_read_permission(user_id) except: sd.value = None sd.metadata = [] db.DBSession.expunge_all() log.info("Retrieved %s datasets", len(scenario_data)) return scenario_data
Get all the datasets from the group with the specified name @returns a list of dictionaries
5,486
def start(name=None, id=None, bootpath=None, disk=None, disks=None, local_iface=False, memory=None, nics=0, switch=None): **/bsd.rd/disk.img ret = {: False, : None} cmd = [, ] if not (name or id): raise SaltInvocationError() elif name: cmd.append(name) else: cmd.append(id) name = _id_to_name(id) if nics > 0: cmd.append(.format(nics)) if bootpath: cmd.extend([, bootpath]) if memory: cmd.append(.format(memory)) if switch: cmd.append(.format(switch)) if local_iface: cmd.append() if disk and disks: raise SaltInvocationError() if disk: cmd.extend([, disk]) if disks: cmd.extend([, x] for x in disks) ) return ret
Starts a VM defined by the specified parameters. When both a name and id are provided, the id is ignored. name: Name of the defined VM. id: VM id. bootpath: Path to a kernel or BIOS image to load. disk: Path to a single disk to use. disks: List of multiple disks to use. local_iface: Whether to add a local network interface. See "LOCAL INTERFACES" in the vmctl(8) manual page for more information. memory: Memory size of the VM specified in megabytes. switch: Add a network interface that is attached to the specified virtual switch on the host. CLI Example: .. code-block:: bash salt '*' vmctl.start 2 # start VM with id 2 salt '*' vmctl.start name=web1 bootpath='/bsd.rd' nics=2 memory=512M disk='/disk.img'
5,487
def _redshift(distance, **kwargs): r cosmology = get_cosmology(**kwargs) return z_at_value(cosmology.luminosity_distance, distance, units.Mpc)
r"""Uses astropy to get redshift from the given luminosity distance. Parameters ---------- distance : float The luminosity distance, in Mpc. \**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float : The redshift corresponding to the given luminosity distance.
5,488
def get_wd_entity(self): params = { : , : , : self.wd_item_id, : } headers = { : self.user_agent } json_data = self.mediawiki_api_call("GET", self.mediawiki_api_url, params=params, headers=headers) return self.parse_wd_json(wd_json=json_data[][self.wd_item_id])
retrieve a WD item in json representation from Wikidata :rtype: dict :return: python complex dictionary represenation of a json
5,489
def to_scanner(self, x, y, z): if self.transform is None: raise ValueError("No transform set for MRSData object {}".format(self)) transformed_point = self.transform * numpy.matrix([x, y, z, 1]).T return numpy.squeeze(numpy.asarray(transformed_point))[0:3]
Converts a 3d position in MRSData space to the scanner reference frame :param x: :param y: :param z: :return:
5,490
def wrap_prompts_class(Klass): try: from prompt_toolkit.token import ZeroWidthEscape except ImportError: return Klass class ITerm2IPythonPrompt(Klass): def in_prompt_tokens(self, cli=None): return [ (ZeroWidthEscape, last_status(self.shell)+BEFORE_PROMPT), ]+\ super(ITerm2IPythonPrompt, self).in_prompt_tokens(cli)+\ [(ZeroWidthEscape, AFTER_PROMPT)] return ITerm2IPythonPrompt
Wrap an IPython's Prompt class This is needed in order for Prompt to inject the correct escape sequences at the right positions for shell integrations.
5,491
def generate_mesh( geo_object, verbose=True, dim=3, prune_vertices=True, prune_z_0=False, remove_faces=False, gmsh_path=None, extra_gmsh_arguments=None, geo_filename=None, mesh_file_type="msh", ): if extra_gmsh_arguments is None: extra_gmsh_arguments = [] if mesh_file_type == "mesh": extra_gmsh_arguments += ["-string", "Mesh.SaveElementTagType=2;"] preserve_geo = geo_filename is not None if geo_filename is None: with tempfile.NamedTemporaryFile(suffix=".geo") as f: geo_filename = f.name with open(geo_filename, "w") as f: f.write(geo_object.get_code()) filename_suffix = "msh" if mesh_file_type[:3] == "msh" else mesh_file_type with tempfile.NamedTemporaryFile(suffix="." + filename_suffix) as handle: msh_filename = handle.name gmsh_executable = gmsh_path if gmsh_path is not None else _get_gmsh_exe() args = [ "-{}".format(dim), geo_filename, "-format", mesh_file_type, "-bin", "-o", msh_filename, ] + extra_gmsh_arguments p = subprocess.Popen( [gmsh_executable] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) if verbose: while True: line = p.stdout.readline() if not line: break print(line.decode("utf-8"), end="") p.communicate() assert p.returncode == 0, "Gmsh exited with error (return code {}).".format( p.returncode ) mesh = meshio.read(msh_filename) if remove_faces: two_d_cells = set(["triangle", "quad"]) three_d_cells = set( ["tetra", "hexahedron", "wedge", "pyramid", "penta_prism", "hexa_prism"] ) if any(k in mesh.cells for k in three_d_cells): keep_keys = three_d_cells.intersection(mesh.cells.keys()) elif any(k in mesh.cells for k in two_d_cells): keep_keys = two_d_cells.intersection(mesh.cells.keys()) else: keep_keys = mesh.cells.keys() mesh.cells = {key: mesh.cells[key] for key in keep_keys} mesh.cell_data = {key: mesh.cell_data[key] for key in keep_keys} if prune_vertices: ncells = numpy.concatenate([numpy.concatenate(c) for c in mesh.cells.values()]) uvertices, uidx = numpy.unique(ncells, return_inverse=True) k = 0 for key in mesh.cells.keys(): n = numpy.prod(mesh.cells[key].shape) mesh.cells[key] = uidx[k : k + n].reshape(mesh.cells[key].shape) k += n mesh.points = mesh.points[uvertices] for key in mesh.point_data: mesh.point_data[key] = mesh.point_data[key][uvertices] os.remove(msh_filename) if preserve_geo: print("\ngeo file: {}".format(geo_filename)) else: os.remove(geo_filename) if ( prune_z_0 and mesh.points.shape[1] == 3 and numpy.all(numpy.abs(mesh.points[:, 2]) < 1.0e-13) ): mesh.points = mesh.points[:, :2] return mesh
Return a meshio.Mesh, storing the mesh points, cells, and data, generated by Gmsh from the `geo_object`, written to a temporary file, and reread by `meshio`. Gmsh's native "msh" format is ill-suited to fast I/O. This can greatly reduce the performance of pygmsh. As alternatives, try `mesh_file_type=`: - "vtk"`, though Gmsh doesn't write the physical tags to VTK <https://gitlab.onelab.info/gmsh/gmsh/issues/389> or - `"mesh"`, though this only supports a few basic elements - "line", "triangle", "quad", "tetra", "hexahedron" - and doesn't preserve the `$PhysicalNames`, just the `int` tags.
5,492
def get_parser(parser): parser.description = textwrap.dedent(.strip()) parser.add_argument("locale", nargs="+", help="a locale to segment")
Grabs the parser. args: parser: The parser
5,493
def createSparseCNNModel(self): model = nn.Sequential( nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels[0], kernel_size=self.kernel_size[0], stride=self.stride[0], padding=self.padding[0]), nn.MaxPool2d(kernel_size=2), KWinners2d(n=self.cnn_output_len[0], k=self.cnn_k[0], channels=self.out_channels[0], kInferenceFactor=self.k_inference_factor, boostStrength=self.boost_strength, boostStrengthFactor=self.boost_strength_factor), nn.Conv2d(in_channels=self.out_channels[0], out_channels=self.out_channels[1], kernel_size=self.kernel_size[1], stride=self.stride[1], padding=self.padding[1]), nn.MaxPool2d(kernel_size=2), KWinners2d(n=self.cnn_output_len[1], k=self.cnn_k[1], channels=self.out_channels[1], kInferenceFactor=self.k_inference_factor, boostStrength=self.boost_strength, boostStrengthFactor=self.boost_strength_factor), Flatten(), SparseWeights( nn.Linear(self.cnn_output_len[1], self.n), self.weight_sparsity), KWinners(n=self.n, k=self.k, kInferenceFactor=self.k_inference_factor, boostStrength=self.boost_strength, boostStrengthFactor=self.boost_strength_factor), nn.Linear(self.n, self.output_size), nn.LogSoftmax(dim=1) ) model.to(self.device) if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) return model
Create a sparse network composed of two CNN / MaxPool layers followed by a sparse linear layer with using k-winner activation between the layers
5,494
def update_rule(self, name, id_env, contents, blocks_id, id_rule): url = map_dict = dict() map_dict[] = name map_dict[] = id_env map_dict[] = contents map_dict[] = blocks_id map_dict[] = id_rule try: code, xml = self.submit({: map_dict}, , url) except Exception as e: raise e return self.response(code, xml)
Save an environment rule :param name: Name of the rule :param id_env: Environment id :param contents: Lists of contents in order. Ex: ['content one', 'content two', ...] :param blocks_id: Lists of blocks id or 0 if is as custom content. Ex: ['0', '5', '0' ...] :param id_rule: Rule id :return: None :raise AmbienteNaoExisteError: Ambiente não cadastrado. :raise InvalidValueError: Invalid parameter. :raise UserNotAuthorizedError: Permissão negada. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta.
5,495
def to_xml(self): for n, v in {"name": self.name, "quantity": self.quantity, "unit_price": self.unit_price}.items(): if is_empty_or_none(v): raise LineError(" attribute cannot be empty or None." % n) doc = Document() root = doc.createElement("line") super(Line, self).to_xml(root) self._create_text_node(root, "date", self.date) self._create_text_node(root, "name", self.name, True) self._create_text_node(root, "description", self.description, True) self._create_text_node(root, "quantity", self.quantity) self._create_text_node(root, "unitPrice", self.unit_price) self._create_text_node(root, "unit", self.unit) self._create_text_node(root, "gin", self.gin) self._create_text_node(root, "gtin", self.gtin) self._create_text_node(root, "sscc", self.sscc) if len(self.__discounts): discounts = root.ownerDocument.createElement("discounts") root.appendChild(discounts) for discount in self.__discounts: if not issubclass(discount.__class__, Discount): raise LineError( \ % (discount.__class__.__name__, Discount.__name__)) discounts.appendChild(discount.to_xml()) if len(self.__taxes): taxes = root.ownerDocument.createElement("taxes") root.appendChild(taxes) for tax in self.__taxes: if not issubclass(tax.__class__, Tax): raise LineError( \ % (tax.__class__.__name__, Tax.__name__)) taxes.appendChild(tax.to_xml()) return root
Returns a DOM representation of the line. @return: Element
5,496
def process_request(self): self.response = self.request_handler.process_request( self.method, self.request_data)
Processing the call and set response_data.
5,497
def company_add_user(self, email, name, password, receiver, admin): method, url = get_URL() payload = { : self.config.get(), : self.session.cookies.get(), : email, : name, : password, : receiver, : admin } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
Add a user to the company account. :param email: :param name: :param password: Pass without storing in plain text :param receiver: Can user receive files :param admin: :type email: ``str`` or ``unicode`` :type name: ``str`` or ``unicode`` :type password: ``str`` or ``unicode`` :type receiver: ``bool`` :type admin: ``bool`` :rtype: ``bool``
5,498
def get_hours_for_week(self, week_start=None): week_start = week_start if week_start else self.week_start week_end = week_start + relativedelta(days=7) return ProjectHours.objects.filter( week_start__gte=week_start, week_start__lt=week_end)
Gets all ProjectHours entries in the 7-day period beginning on week_start.
5,499
def _dendropy_to_dataframe( tree, add_node_labels=True, use_uids=True): tree.max_distance_from_root() idx = [] data = { : [], : [], : [], : [], : [], : []} if use_uids: data[] = [] if add_node_labels: for i, node in enumerate(tree.internal_nodes()): node.label = str(i) for node in tree.nodes(): if node.is_leaf(): type_ = label = str(node.taxon.label).replace(, ) elif node.is_internal(): type_ = label = str(node.label) id_ = label parent_node = node.parent_node length = node.edge_length distance = node.distance_from_root() if parent_node is None and length is None: parent_label = None parent_node = None length = 0 distance = 0 type_ = elif parent_node.is_internal(): parent_label = str(parent_node.label) else: raise Exception("Subtree is not attached to tree?") data[].append(type_) data[].append(id_) data[].append(parent_label) data[].append(length) data[].append(label) data[].append(distance) if use_uids: data[].append(get_random_id(10)) df = pandas.DataFrame(data) return df
Convert Dendropy tree to Pandas dataframe.