Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
5,600
def _build_robots_txt_checker(cls, session: AppSession): if session.args.robots: robots_txt_pool = session.factory.new() robots_txt_checker = session.factory.new( , web_client=session.factory[], robots_txt_pool=robots_txt_pool ) return robots_txt_checker
Build robots.txt checker.
5,601
def get_schedule_by_regid_and_term(regid, term, non_time_schedule_instructors=True, per_section_prefetch_callback=None, transcriptable_course="", **kwargs): if "include_instructor_not_on_time_schedule" in kwargs: include = kwargs["include_instructor_not_on_time_schedule"] non_time_schedule_instructors = include params = [ (, regid), ] if transcriptable_course != "": params.append(("transcriptable_course", transcriptable_course,)) params.extend([ (, term.quarter), (, ), (, term.year), ]) url = "{}?{}".format(registration_res_url_prefix, urlencode(params)) return _json_to_schedule(get_resource(url), term, regid, non_time_schedule_instructors, per_section_prefetch_callback)
Returns a uw_sws.models.ClassSchedule object for the regid and term passed in.
5,602
def near_sphere(self, x, y, max_distance=None): expr = { self : { : [x, y]} } if max_distance is not None: expr[self][] = max_distance return QueryExpression(expr)
Return documents near the given point using sphere distances
5,603
def get_factors(self, node=None): if node: if node not in self.nodes(): raise ValueError() node_factors = [] for factor in self.factors: if node in factor.scope(): node_factors.append(factor) return node_factors else: return self.factors
Returns all the factors containing the node. If node is not specified returns all the factors that have been added till now to the graph. Parameter --------- node: any hashable python object (optional) The node whose factor we want. If node is not specified Examples -------- >>> from pgmpy.models import MarkovModel >>> from pgmpy.factors.discrete import DiscreteFactor >>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles')]) >>> factor1 = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2], ... values=np.random.rand(4)) >>> factor2 = DiscreteFactor(['Bob', 'Charles'], cardinality=[2, 3], ... values=np.ones(6)) >>> student.add_factors(factor1,factor2) >>> student.get_factors() [<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>, <DiscreteFactor representing phi(Bob:2, Charles:3) at 0x7f8a0e9bf5f8>] >>> student.get_factors('Alice') [<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>]
5,604
def _TerminateProcessByPid(self, pid): self._RaiseIfNotRegistered(pid) process = self._processes_per_pid[pid] self._TerminateProcess(process) self._StopMonitoringProcess(process)
Terminate a process that's monitored by the engine. Args: pid (int): process identifier (PID). Raises: KeyError: if the process is not registered with and monitored by the engine.
5,605
def chain_input_files(self): ret_list = [] for key, val in self.file_dict.items(): if val & FileFlags.in_ch_mask == FileFlags.input_mask: ret_list.append(key) return ret_list
Return a list of the input files needed by this chain. For `Link` sub-classes this will return only those files that were not created by any internal `Link`
5,606
def load_sound(self, loc, title, group): self.sounds.setdefault(group, {}) self.sounds[group][title] = Sound(loc, self)
Used internally when loading sounds. You should probably use load_objects().
5,607
def issues(self, from_date=DEFAULT_DATETIME, offset=None, max_issues=MAX_ISSUES): resource = self.RISSUES + self.CJSON ts = datetime_to_utc(from_date) ts = ts.strftime("%Y-%m-%dT%H:%M:%SZ") params = { self.PSTATUS_ID: , self.PSORT: self.PUPDATED_ON, self.PUPDATED_ON: + ts, self.PLIMIT: max_issues } if offset is not None: params[self.POFFSET] = offset response = self._call(resource, params) return response
Get the information of a list of issues. :param from_date: retrieve issues that where updated from that date; dates are converted to UTC :param offset: starting position for the search :param max_issues: maximum number of issues to reteurn per query
5,608
def can_update_repositories(self): url_path = construct_url(, bank_id=self._catalog_idstr) return self._get_request(url_path)[][]
Tests if this user can update ``Repositories``. A return of true does not guarantee successful authorization. A return of false indicates that it is known updating a ``Repository`` will result in a ``PermissionDenied``. This is intended as a hint to an application that may not wish to offer update operations to unauthorized users. :return: ``false`` if ``Repository`` modification is not authorized, ``true`` otherwise :rtype: ``boolean`` *compliance: mandatory -- This method must be implemented.*
5,609
def identity(ctx, variant_id): if not variant_id: LOG.warning("Please provide a variant id") ctx.abort() adapter = ctx.obj[] version = ctx.obj[] LOG.info("Search variants {0}".format(adapter)) result = adapter.get_clusters(variant_id) if result.count() == 0: LOG.info("No hits for variant %s", variant_id) return for res in result: click.echo(res)
Check how well SVs are working in the database
5,610
def match_var(self, tokens, item): setvar, = tokens if setvar != wildcard: if setvar in self.names: self.add_check(self.names[setvar] + " == " + item) else: self.add_def(setvar + " = " + item) self.names[setvar] = item
Matches a variable.
5,611
def _context_source_file_url(path_or_url): if path_or_url.startswith(): return path_or_url if path_or_url.startswith(): return "file://" + path_or_url return "file://" + os.path.join(os.path.realpath(os.getcwd()), path_or_url)
Returns a URL for a remote or local context CSV file
5,612
def is_free_chunk(self, chk): cs = self.get_chunk_status(chk) if cs & 0x1 != 0: return True return False
Check the chunk is free or not
5,613
def until_some(*args, **kwargs): done_at_least = kwargs.pop(, None) timeout = kwargs.pop(, None) if done_at_least is None: done_at_least = len(args) + len(kwargs) wait_iterator = tornado.gen.WaitIterator(*args, **kwargs) maybe_timeout = future_timeout_manager(timeout) results = [] while not wait_iterator.done(): result = yield maybe_timeout(wait_iterator.next()) results.append((wait_iterator.current_index, result)) if len(results) >= done_at_least: break raise tornado.gen.Return(results)
Return a future that resolves when some of the passed futures resolve. The futures can be passed as either a sequence of *args* or a dict of *kwargs* (but not both). Some additional keyword arguments are supported, as described below. Once a specified number of underlying futures have resolved, the returned future resolves as well, or a timeout could be raised if specified. Parameters ---------- done_at_least : None or int Number of futures that need to resolve before this resolves or None to wait for all (default None) timeout : None or float Timeout in seconds, or None for no timeout (the default) Returns ------- This command returns a tornado Future that resolves with a list of (index, value) tuples containing the results of all futures that resolved, with corresponding indices (numbers for *args* futures or keys for *kwargs* futures). Raises ------ :class:`tornado.gen.TimeoutError` If operation times out before the requisite number of futures resolve
5,614
def description_of(file, name=): u = UniversalDetector() for line in file: u.feed(line) u.close() result = u.result if result[]: return % (name, result[], result[]) else: return % name
Return a string describing the probable encoding of a file.
5,615
def restore(s, t): t = (c for c in t) return .join(next(t) if not is_blacksquare(c) else c for c in s)
s is the source string, it can contain '.' t is the target, it's smaller than s by the number of '.'s in s Each char in s is replaced by the corresponding char in t, jumping over '.'s in s. >>> restore('ABC.DEF', 'XYZABC') 'XYZ.ABC'
5,616
def event(self, event): if event.type() in (QEvent.Shortcut, QEvent.ShortcutOverride): return True else: return super(ShortcutEditor, self).event(event)
Qt method override.
5,617
def selector(C, style): clas = C.classname(style.name) if style.type == : outlineLvl = int((style.properties.get() or {}).get() or 8) + 1 if outlineLvl < 9: tag = % outlineLvl else: tag = elif style.type == : tag = elif style.type == : tag = elif style.type == : tag = return "%s.%s" % (tag, clas)
return the selector for the given stylemap style
5,618
def get_queryset(self, **kwargs): queryset = self.derive_queryset(**kwargs) return self.order_queryset(queryset)
Gets our queryset. This takes care of filtering if there are any fields to filter by.
5,619
def in_order(self) -> Iterator["BSP"]: if self.children: yield from self.children[0].in_order() yield self yield from self.children[1].in_order() else: yield self
Iterate over this BSP's hierarchy in order. .. versionadded:: 8.3
5,620
def make_assignment(instr, queue, stack): value = make_expr(stack) targets = [] while isinstance(instr, instrs.DUP_TOP): targets.append(make_assign_target(queue.popleft(), queue, stack)) instr = queue.popleft() targets.append(make_assign_target(instr, queue, stack)) return ast.Assign(targets=targets, value=value)
Make an ast.Assign node.
5,621
def recursive_update(d, u): for k, v in u.iteritems(): if isinstance(v, collections.Mapping): r = recursive_update(d.get(k, {}), v) d[k] = r else: d[k] = u[k] return d
Dict recursive update. Based on Alex Martelli code on stackoverflow http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth?answertab=votes#tab-top :param d: dict to update :param u: dict with new data :return:
5,622
def get_fault(self, reply): reply = self.replyfilter(reply) sax = Parser() faultroot = sax.parse(string=reply) soapenv = faultroot.getChild() soapbody = soapenv.getChild() fault = soapbody.getChild() unmarshaller = self.unmarshaller(False) p = unmarshaller.process(fault) if self.options().faults: raise WebFault(p, faultroot) return (faultroot, p.detail)
Extract the fault from the specified soap reply. If I{faults} is True, an exception is raised. Otherwise, the I{unmarshalled} fault L{Object} is returned. This method is called when the server raises a I{web fault}. @param reply: A soap reply message. @type reply: str @return: A fault object. @rtype: tuple ( L{Element}, L{Object} )
5,623
def batch_message_from_parts(cls, messages): middle = b.join(messages) if not middle: raise ProtocolError.empty_batch() return b.join([b, middle, b])
Convert messages, one per batch item, into a batch message. At least one message must be passed.
5,624
def _choices(self): pairs = [] for key, value in self.choices.items(): pairs.append(str(value) + "=" + str(key)) return GPTaskSpec.manifest_escape(";".join(pairs))
Generate a string of choices as key/value pairs :return: string
5,625
def nl_msg_dump(msg, ofd=_LOGGER.debug): hdr = nlmsg_hdr(msg) ofd() ofd(, hdr.SIZEOF) print_hdr(ofd, msg) if hdr.nlmsg_type == libnl.linux_private.netlink.NLMSG_ERROR: dump_error_msg(msg, ofd) elif nlmsg_len(hdr) > 0: print_msg(msg, ofd, hdr) ofd()
Dump message in human readable format to callable. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L970 Positional arguments: msg -- message to print (nl_msg class instance). Keyword arguments: ofd -- function to call with arguments similar to `logging.debug`.
5,626
def from_cone(cls, center, radius=3*u.arcmin, magnitudelimit=None, **kw): center = parse_center(center) criteria = {} if magnitudelimit is not None: criteria[cls.defaultfilter + ] = .format(magnitudelimit) v = Vizier(columns=cls.columns, column_filters=criteria) v.ROW_LIMIT = -1 print(.format(cls.name, center, radius, magnitudelimit)) table = v.query_region(coordinates=center, radius=radius, catalog=cls.catalog)[0] c = cls(cls.standardize_table(table)) c.standardized.meta[] = cls.catalog c.standardized.meta[] = center c.standardized.meta[] = radius c.standardized.meta[] = magnitudelimit return c
Create a Constellation from a cone search of the sky, characterized by a positional center and a radius from it. Parameters ---------- center : SkyCoord object The center around which the query will be made. radius : float, with units of angle The angular radius for the query. magnitudelimit : float The maximum magnitude to include in the download. (This is explicitly thinking UV/optical/IR, would need to change to flux to be able to include other wavelengths.)
5,627
def __solve(self, lvl, x, b, cycle): A = self.levels[lvl].A self.levels[lvl].presmoother(A, x, b) residual = b - A * x coarse_b = self.levels[lvl].R * residual coarse_x = np.zeros_like(coarse_b) if lvl == len(self.levels) - 2: coarse_x[:] = self.coarse_solver(self.levels[-1].A, coarse_b) else: if cycle == : self.__solve(lvl + 1, coarse_x, coarse_b, ) elif cycle == : self.__solve(lvl + 1, coarse_x, coarse_b, cycle) self.__solve(lvl + 1, coarse_x, coarse_b, cycle) elif cycle == : self.__solve(lvl + 1, coarse_x, coarse_b, cycle) self.__solve(lvl + 1, coarse_x, coarse_b, ) elif cycle == "AMLI": nAMLI = 2 Ac = self.levels[lvl + 1].A p = np.zeros((nAMLI, coarse_b.shape[0]), dtype=coarse_b.dtype) beta = np.zeros((nAMLI, nAMLI), dtype=coarse_b.dtype) for k in range(nAMLI): p[k, :] = 1 self.__solve(lvl + 1, p[k, :].reshape(coarse_b.shape), coarse_b, cycle) for j in range(k): beta[k, j] = np.inner(p[j, :].conj(), Ac * p[k, :]) /\ np.inner(p[j, :].conj(), Ac * p[j, :]) p[k, :] -= beta[k, j] * p[j, :] Ap = Ac * p[k, :] alpha = np.inner(p[k, :].conj(), np.ravel(coarse_b)) /\ np.inner(p[k, :].conj(), Ap) coarse_x += alpha * p[k, :].reshape(coarse_x.shape) coarse_b -= alpha * Ap.reshape(coarse_b.shape) else: raise TypeError( % cycle) x += self.levels[lvl].P * coarse_x self.levels[lvl].postsmoother(A, x, b)
Multigrid cycling. Parameters ---------- lvl : int Solve problem on level `lvl` x : numpy array Initial guess `x` and return correction b : numpy array Right-hand side for Ax=b cycle : {'V','W','F','AMLI'} Recursively called cycling function. The Defines the cycling used: cycle = 'V', V-cycle cycle = 'W', W-cycle cycle = 'F', F-cycle cycle = 'AMLI', AMLI-cycle
5,628
def in_period(period, dt=None): if dt is None: dt = datetime.now() for sp in sub_periods: if _is_in_sub_period(sp, dt): return True return False
Determines if a datetime is within a certain time period. If the time is omitted the current time will be used. in_period return True is the datetime is within the time period, False if not. If the expression is malformed a TimePeriod.InvalidFormat exception will be raised. (Note that this differs from Time::Period, which returns -1 if the expression is invalid). The format for the time period is like Perl's Time::Period module, which is documented in some detail here: http://search.cpan.org/~pryan/Period-1.20/Period.pm Here's the quick and dirty version. Each period is composed of one or more sub-period seperated by a comma. A datetime must match at least one of the sub periods to be considered in that time period. Each sub-period is composed of one or more tests, like so: scale {value} scale {a-b} scale {a b c} The datetime must pass each test for a sub-period for the sub-period to be considered true. For example: Match Mondays wd {mon} Match Monday mornings wd {mon} hr {9-16} Match Monday morning or Friday afternoon wd {mon} hr {0-12}, wd {fri} hr {0-12} Valid scales are: year month week yday mday wday hour minute second Those can be substituted with their corresponding code: yd mo wk yd md wd hr min sec
5,629
def column_width(tokens): get_len = tools.display_len if PY3 else len lens = sorted(map(get_len, tokens or [])) or [0] width = lens[-1] if width >= 18: most = lens[int(len(lens) * 0.9)] if most < width + 6: return most return width
Return a suitable column width to display one or more strings.
5,630
def duplicate( self, insert_sheet_index=None, new_sheet_id=None, new_sheet_name=None ): return self.spreadsheet.duplicate_sheet( self.id, insert_sheet_index, new_sheet_id, new_sheet_name )
Duplicate the sheet. :param int insert_sheet_index: (optional) The zero-based index where the new sheet should be inserted. The index of all sheets after this are incremented. :param int new_sheet_id: (optional) The ID of the new sheet. If not set, an ID is chosen. If set, the ID must not conflict with any existing sheet ID. If set, it must be non-negative. :param str new_sheet_name: (optional) The name of the new sheet. If empty, a new name is chosen for you. :returns: a newly created :class:`<gspread.models.Worksheet>`. .. versionadded:: 3.1.0
5,631
def create_float(self, value: float) -> Float: self.append((4, value)) return self.get(self.raw_count - 1)
Creates a new :class:`ConstantFloat`, adding it to the pool and returning it. :param value: The value of the new float.
5,632
def create_url(self): headers = self.headers headers[] = str(self.file_size) headers[] = .join(self.encode_metadata()) resp = requests.post(self.client.url, headers=headers) url = resp.headers.get("location") if url is None: msg = .format(resp.status_code) raise TusCommunicationError(msg, resp.status_code, resp.content) return urljoin(self.client.url, url)
Return upload url. Makes request to tus server to create a new upload url for the required file upload.
5,633
async def on_raw_cap_ls(self, params): to_request = set() for capab in params[0].split(): capab, value = self._capability_normalize(capab) if capab in self._capabilities: continue attr = + pydle.protocol.identifierify(capab) + supported = (await getattr(self, attr)(value)) if hasattr(self, attr) else False if supported: if isinstance(supported, str): to_request.add(capab + CAPABILITY_VALUE_DIVIDER + supported) else: to_request.add(capab) else: self._capabilities[capab] = False if to_request: self._capabilities_requested.update(x.split(CAPABILITY_VALUE_DIVIDER, 1)[0] for x in to_request) await self.rawmsg(, , .join(to_request)) else: await self.rawmsg(, )
Update capability mapping. Request capabilities.
5,634
def find_connection(self): if self.connection is not None: return self.connection for m in self.mpstate.mav_master: if in m.messages: if m.messages[].type == mavutil.mavlink.MAV_TYPE_ANTENNA_TRACKER: return m return None
find an antenna tracker connection if possible
5,635
def _explicit_close(napalm_device): if salt.utils.napalm.not_always_alive(__opts__): try: napalm_device[].close() except Exception as err: log.error() log.error(err) log.error()
Will explicily close the config session with the network device, when running in a now-always-alive proxy minion or regular minion. This helper must be used in configuration-related functions, as the session is preserved and not closed before making any changes.
5,636
def map_wrap(f): @functools.wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) return wrapper
Wrap standard function to easily pass into 'map' processing.
5,637
def fold(data, prefix=, delimeter=): if not isinstance(delimeter, (tuple, list)): delimeter = (delimeter, ) def deep(data): if len(data) == 1 and len(data[0][0]) < 2: if data[0][0]: return {data[0][0][0]: data[0][1]} return data[0][1] collect = {} for key, group in groupby(data, lambda kv: kv[0][0]): nest_data = [(k[1:], v) for k, v in group] collect[key] = deep(nest_data) is_num = all(k.isdigit() for k in collect.keys()) if is_num: return [i[1] for i in sorted(collect.items())] return collect data_ = [ (split(key, delimeter), value) for key, value in sorted(data.items()) ] result = deep(data_) return result[prefix] if prefix else result
>>> _dd(fold({'a__a': 4})) "{'a': {'a': 4}}" >>> _dd(fold({'a__a': 4, 'a__b': 5})) "{'a': {'a': 4, 'b': 5}}" >>> _dd(fold({'a__1': 2, 'a__0': 1, 'a__2': 3})) "{'a': [1, 2, 3]}" >>> _dd(fold({'form__a__b': 5, 'form__a__a': 4}, 'form')) "{'a': {'a': 4, 'b': 5}}" >>> _dd(fold({'form__a__b': 5, 'form__a__a__0': 4, 'form__a__a__1': 7}, 'form')) "{'a': {'a': [4, 7], 'b': 5}}" >>> repr(fold({'form__1__b': 5, 'form__0__a__0': 4, 'form__0__a__1': 7}, 'form')) "[{'a': [4, 7]}, {'b': 5}]"
5,638
def derive_fields(self): fields = [] if self.fields: fields.append(self.fields) return fields
Default implementation
5,639
def foreign(self, value, context=None): if self.separator is None: separator = else: separator = self.separator.strip() if self.strip and hasattr(self.separator, ) else self.separator value = self._clean(value) try: value = separator.join(value) except Exception as e: raise Concern("{0} caught, failed to convert to string: {1}", e.__class__.__name__, str(e)) return super().foreign(value)
Construct a string-like representation for an iterable of string-like objects.
5,640
def Pitzer(T, Tc, omega): rs critical temperature and acentric factor. The enthalpy of vaporization is given by: .. math:: \frac{\Delta_{vap} H}{RT_c}=7.08(1-T_r)^{0.354}+10.95\omega(1-T_r)^{0.456} Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] omega : float Acentric factor [-] Returns ------- Hvap : float Enthalpy of vaporization, [J/mol] Notes ----- This equation is listed in [3]_, page 2-487 as method Hvap. This cites [2]_. The recommended range is 0.6 to 1 Tr. Users should expect up to 5% error. T must be under Tc, or an exception is raised. The original article has been reviewed and found to have a set of tabulated values which could be used instead of the fit function to provide additional accuracy. Examples -------- Example as in [3]_, p2-487; exp: 37.51 kJ/mol >>> Pitzer(452, 645.6, 0.35017) 36696.736640106414 References ---------- .. [1] Pitzer, Kenneth S. "The Volumetric and Thermodynamic Properties of Fluids. I. Theoretical Basis and Virial Coefficients." Journal of the American Chemical Society 77, no. 13 (July 1, 1955): 3427-33. doi:10.1021/ja01618a001 .. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000. .. [3] Green, Don, and Robert Perry. Perry Handbook, Eighth Edition. McGraw-Hill Professional, 2007. ' Tr = T/Tc return R*Tc * (7.08*(1. - Tr)**0.354 + 10.95*omega*(1. - Tr)**0.456)
r'''Calculates enthalpy of vaporization at arbitrary temperatures using a fit by [2]_ to the work of Pitzer [1]_; requires a chemical's critical temperature and acentric factor. The enthalpy of vaporization is given by: .. math:: \frac{\Delta_{vap} H}{RT_c}=7.08(1-T_r)^{0.354}+10.95\omega(1-T_r)^{0.456} Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] omega : float Acentric factor [-] Returns ------- Hvap : float Enthalpy of vaporization, [J/mol] Notes ----- This equation is listed in [3]_, page 2-487 as method #2 for estimating Hvap. This cites [2]_. The recommended range is 0.6 to 1 Tr. Users should expect up to 5% error. T must be under Tc, or an exception is raised. The original article has been reviewed and found to have a set of tabulated values which could be used instead of the fit function to provide additional accuracy. Examples -------- Example as in [3]_, p2-487; exp: 37.51 kJ/mol >>> Pitzer(452, 645.6, 0.35017) 36696.736640106414 References ---------- .. [1] Pitzer, Kenneth S. "The Volumetric and Thermodynamic Properties of Fluids. I. Theoretical Basis and Virial Coefficients." Journal of the American Chemical Society 77, no. 13 (July 1, 1955): 3427-33. doi:10.1021/ja01618a001 .. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000. .. [3] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook, Eighth Edition. McGraw-Hill Professional, 2007.
5,641
def download_listing(self, file: Optional[IO], duration_timeout: Optional[float]=None) -> \ ListingResponse: if self._session_state != SessionState.directory_request_sent: raise RuntimeError() self._session_state = SessionState.file_request_sent yield from self.download(file=file, rewind=False, duration_timeout=duration_timeout) try: if self._response.body.tell() == 0: listings = () elif self._listing_type == : self._response.body.seek(0) machine_listings = wpull.protocol.ftp.util.parse_machine_listing( self._response.body.read().decode(, errors=), convert=True, strict=False ) listings = list( wpull.protocol.ftp.util.machine_listings_to_file_entries( machine_listings )) else: self._response.body.seek(0) file = io.TextIOWrapper(self._response.body, encoding=, errors=) listing_parser = ListingParser(file=file) listings = list(listing_parser.parse_input()) _logger.debug(, listing_parser.type) file.detach() except (ListingError, ValueError) as error: raise ProtocolError(*error.args) from error self._response.files = listings self._response.body.seek(0) self._session_state = SessionState.response_received return self._response
Read file listings. Args: file: A file object or asyncio stream. duration_timeout: Maximum time in seconds of which the entire file must be read. Returns: A Response populated the file listings Be sure to call :meth:`start_file_listing` first. Coroutine.
5,642
def meta(self): if not self.__meta_received: raise RuntimeError( ) if isinstance(self.raw.value, dict): return self.raw.value return {}
Get metadata from the query itself. This is guaranteed to only return a Python dictionary. Note that if the query failed, the metadata might not be in JSON format, in which case there may be additional, non-JSON data which can be retrieved using the following :: raw_meta = req.raw.value :return: A dictionary containing the query metadata
5,643
def unmasked(self, depth=0.01): return 1 - (np.hstack(self._O2) + np.hstack(self._O3) / depth) / np.hstack(self._O1)
Return the unmasked overfitting metric for a given transit depth.
5,644
async def async_init(self) -> None: if not self._client_established: await self.request( , .format(self.client_uuid), data={ : DEFAULT_APP_ID, : DEFAULT_APP_VERSION, : self._locale }) self._client_established = True resp = await self.request( , .format(self.client_uuid), data={ : self._email, : self._password }) if not self.user_uuid: self.user_uuid = resp[][][] self._session_expiry = resp[][] self.tiles = Tile(self.request, self.user_uuid)
Create a Tile session.
5,645
def _check_data_port_id(self, data_port): valid, message = super(ContainerState, self)._check_data_port_id(data_port) if not valid: return False, message for scoped_variable_id, scoped_variable in self.scoped_variables.items(): if data_port.data_port_id == scoped_variable_id and data_port is not scoped_variable: return False, "data port id already existing in state" return True, message
Checks the validity of a data port id Checks whether the id of the given data port is already used by anther data port (input, output, scoped vars) within the state. :param rafcon.core.data_port.DataPort data_port: The data port to be checked :return bool validity, str message: validity is True, when the data port is valid, False else. message gives more information especially if the data port is not valid
5,646
def doc(self, export=): rows = [] title = .format(self.__class__.__name__) table = Tab(export=export, title=title) for opt in sorted(self.config_descr): if hasattr(self, opt): c1 = opt c2 = self.config_descr[opt] c3 = self.__dict__.get(opt, ) c4 = self.get_alt(opt) rows.append([c1, c2, c3, c4]) else: print(. format(self.__class__.__name__, opt)) table.add_rows(rows, header=False) table.header([, , , ]) return table.draw()
Dump help document for setting classes
5,647
def _get_default_value_to_cache(self, xblock): try: return self.from_json(xblock._field_data.default(xblock, self.name)) except KeyError: if self._default is UNIQUE_ID: return self._check_or_enforce_type(self._calculate_unique_id(xblock)) else: return self.default
Perform special logic to provide a field's default value for caching.
5,648
def housekeeping(self, **kwargs): path = % self.get_id() self.manager.gitlab.http_post(path, **kwargs)
Start the housekeeping task. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabHousekeepingError: If the server failed to perform the request
5,649
def setEmergencyDecel(self, typeID, decel): self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_EMERGENCY_DECEL, typeID, decel)
setDecel(string, double) -> None Sets the maximal physically possible deceleration in m/s^2 of vehicles of this type.
5,650
def order_quote(self, quote_id, extra): container = self.generate_order_template(quote_id, extra) return self.client.call(, , container, id=quote_id)
Places an order using a quote :: extras = { 'hardware': {'hostname': 'test', 'domain': 'testing.com'}, 'quantity': 2 } manager = ordering.OrderingManager(env.client) result = manager.order_quote(12345, extras) :param int quote_id: ID for the target quote :param dictionary extra: Overrides for the defaults of SoftLayer_Container_Product_Order :param int quantity: Quantity to override default
5,651
def _serialize_value(self, value): if isinstance(value, (list, tuple, set)): return [self._serialize_value(v) for v in value] elif isinstance(value, dict): return dict([(k, self._serialize_value(v)) for k, v in value.items()]) elif isinstance(value, ModelBase): return value._serialize() elif isinstance(value, datetime.date): return value.isoformat() else: return value
Called by :py:meth:`._serialize` to serialise an individual value.
5,652
def generate_config_file(): shutil.copy(os.path.join(os.path.dirname(__file__), ), os.path.join(os.getcwd(), ))
Generate a config file for a ProTECT run on hg19. :return: None
5,653
def update(data, id, medium, credentials): _op(data, id, medium, tokens.Operations.UPDATE, credentials)
Updates the [medium] with the given id and data on the user's [medium]List. :param data The data for the [medium] to update. :param id The id of the data to update. :param medium Anime or manga (tokens.Medium.ANIME or tokens.Medium.MANGA). :raise ValueError For bad arguments.
5,654
def head(self, url, **kwargs): r kwargs.setdefault(, False) return self.request(, url, **kwargs)
r"""Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response
5,655
def append_panel(panels, size_x, size_y, max_col=12): bottom_lines = bottoms(panels) shape = find_shape(bottom_lines, max_col) lines = longest_lines(shape) line = find_place(lines, size_x) if not line: return panel = { : line[], : line[], : size_x, : size_y, } panels.append(panel) return panel
Appends a panel to the list of panels. Finds the highest palce at the left for the new panel. :param panels: :param size_x: :param size_y: :param max_col: :return: a new panel or None if it is not possible to place a panel with such size_x
5,656
def apply_to_field_if_exists(effect, field_name, fn, default): value = getattr(effect, field_name, None) if value is None: return default else: return fn(value)
Apply function to specified field of effect if it is not None, otherwise return default.
5,657
def list_drafts(self): target_url = self.client.get_url(, , ) return base.Query(self, target_url)
A filterable list views of layers, returning the draft version of each layer. If the most recent version of a layer or table has been published already, it won’t be returned here.
5,658
def dlogpdf_link_dr(self, inv_link_f, y, Y_metadata=None): c = np.zeros_like(y) if Y_metadata is not None and in Y_metadata.keys(): c = Y_metadata[] link_f = inv_link_f y_link_f = y/link_f log_y_link_f = np.log(y) - np.log(link_f) y_link_f_r = y_link_f**self.r censored = c*(-y_link_f_r*log_y_link_f/(1 + y_link_f_r)) uncensored = (1-c)*(1./self.r + np.log(y) - np.log(link_f) - (2*y_link_f_r*log_y_link_f) / (1 + y_link_f_r)) dlogpdf_dr = censored + uncensored return dlogpdf_dr
Gradient of the log-likelihood function at y given f, w.r.t shape parameter .. math:: :param inv_link_f: latent variables link(f) :type inv_link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: includes censoring information in dictionary key 'censored' :returns: derivative of likelihood evaluated at points f w.r.t variance parameter :rtype: float
5,659
def workspace_from_url(self, mets_url, dst_dir=None, clobber_mets=False, mets_basename=None, download=False, baseurl=None): if dst_dir and not dst_dir.startswith(): dst_dir = abspath(dst_dir) if mets_url is None: if baseurl is None: raise Exception("Must pass mets_url and/or baseurl to workspace_from_url") else: mets_url = % (baseurl, mets_basename if mets_basename else ) if baseurl is None: baseurl = mets_url.rsplit(, 1)[0] log.debug("workspace_from_url\nmets_url=\nbaseurl=\ndst_dir=", mets_url, baseurl, dst_dir) if not in mets_url: mets_url = % abspath(mets_url) if dst_dir is None: if mets_url.startswith(): dst_dir = dirname(mets_url[len():]) else: dst_dir = tempfile.mkdtemp(prefix=TMP_PREFIX) log.debug("Creating workspace for METS @ <%s>", dst_dir, mets_url) if mets_basename is None: mets_basename = mets_url \ .rsplit(, 1)[-1] \ .split()[0] \ .split()[0] dst_mets = join(dst_dir, mets_basename) log.debug("Copying mets url to ", mets_url, dst_mets) if + dst_mets == mets_url: log.debug("Target and source mets are identical") else: if exists(dst_mets) and not clobber_mets: raise Exception("File already exists but clobber_mets is false" % dst_mets) else: self.download_to_directory(dst_dir, mets_url, basename=mets_basename) workspace = Workspace(self, dst_dir, mets_basename=mets_basename, baseurl=baseurl) if download: for f in workspace.mets.find_files(): workspace.download_file(f) return workspace
Create a workspace from a METS by URL. Sets the mets.xml file Arguments: mets_url (string): Source mets URL dst_dir (string, None): Target directory for the workspace clobber_mets (boolean, False): Whether to overwrite existing mets.xml. By default existing mets.xml will raise an exception. download (boolean, False): Whether to download all the files baseurl (string, None): Base URL for resolving relative file locations Returns: Workspace
5,660
def add_backends(self, *backends): for backend in backends: full = self._expand_host(backend) self.backends[full] = 0 self.task_counter[full] = 0
See the documentation for __init__() to see an explanation of the *backends argument.
5,661
def FindUnspentCoins(self, from_addr=None, use_standard=False, watch_only_val=0): ret = [] for coin in self.GetCoins(): if coin.State & CoinState.Confirmed > 0 and \ coin.State & CoinState.Spent == 0 and \ coin.State & CoinState.Locked == 0 and \ coin.State & CoinState.Frozen == 0 and \ coin.State & CoinState.WatchOnly == watch_only_val: do_exclude = False if self._vin_exclude: for to_exclude in self._vin_exclude: if coin.Reference.PrevIndex == to_exclude.PrevIndex and \ coin.Reference.PrevHash == to_exclude.PrevHash: do_exclude = True if do_exclude: continue if from_addr is not None: if coin.Output.ScriptHash == from_addr: ret.append(coin) elif use_standard: contract = self._contracts[coin.Output.ScriptHash.ToBytes()] if contract.IsStandard: ret.append(coin) else: ret.append(coin) return ret
Finds unspent coin objects in the wallet. Args: from_addr (UInt160): a bytearray (len 20) representing an address. use_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ). watch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses. Returns: list: a list of ``neo.Wallet.Coins`` in the wallet that are not spent.
5,662
def write_packed(self, outfile, rows): self.write_preamble(outfile) if self.compression is not None: compressor = zlib.compressobj(self.compression) else: compressor = zlib.compressobj() data.append(0) data.extend(row) if len(data) > self.chunk_limit: compressed = compressor.compress(bytes(data)) if len(compressed): write_chunk(outfile, b, compressed) data = bytearray() compressed = compressor.compress(bytes(data)) flushed = compressor.flush() if len(compressed) or len(flushed): write_chunk(outfile, b, compressed + flushed) write_chunk(outfile, b) return i + 1
Write PNG file to `outfile`. `rows` should be an iterator that yields each packed row; a packed row being a sequence of packed bytes. The rows have a filter byte prefixed and are then compressed into one or more IDAT chunks. They are not processed any further, so if bitdepth is other than 1, 2, 4, 8, 16, the pixel values should have been scaled before passing them to this method. This method does work for interlaced images but it is best avoided. For interlaced images, the rows should be presented in the order that they appear in the file.
5,663
def get_dummy_thread(nsamples, **kwargs): seed = kwargs.pop(, False) ndim = kwargs.pop(, 2) logl_start = kwargs.pop(, -np.inf) logl_range = kwargs.pop(, 1) if kwargs: raise TypeError(.format(kwargs)) if seed is not False: np.random.seed(seed) thread = {: np.sort(np.random.random(nsamples)) * logl_range, : np.full(nsamples, 1.), : np.random.random((nsamples, ndim)), : np.zeros(nsamples).astype(int)} if logl_start != -np.inf: thread[] += logl_start thread[] = np.asarray([[logl_start, thread[][-1]]]) return thread
Generate dummy data for a single nested sampling thread. Log-likelihood values of points are generated from a uniform distribution in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is not -np.inf). Theta values of each point are each generated from a uniform distribution in (0, 1). Parameters ---------- nsamples: int Number of samples in thread. ndim: int, optional Number of dimensions. seed: int, optional If not False, the seed is set with np.random.seed(seed). logl_start: float, optional logl at which thread starts. logl_range: float, optional Scale factor applied to logl values.
5,664
def CopyFileInZip(from_zip, from_name, to_zip, to_name=None): data = from_zip.read(from_name) if to_name is None: to_name = from_name to_zip.writestr(to_name, data)
Read a file from a ZipFile and write it to a new ZipFile.
5,665
def get_accessibility_packs(self): packs = [] for node in list(self.nodes.values()): node[] = for node_id, node in self.nodes.items(): if node[] == : packs.append(self.dfs_get_all_childs(node_id)) for node in list(self.nodes.values()): del node[] return packs
Get accessibility packs of the graph: in one pack element are related in a way. Between packs, there is no relation at all. TODO: Make it work for directional graph too Because for now, edge must be father->son AND son->father :return: packs of nodes :rtype: list
5,666
def validate(cnpj_number): _cnpj = compat.clear_punctuation(cnpj_number) if (len(_cnpj) != 14 or len(set(_cnpj)) == 1): return False first_part = _cnpj[:12] second_part = _cnpj[:13] first_digit = _cnpj[12] second_digit = _cnpj[13] if (first_digit == calc.calculate_first_digit(first_part) and second_digit == calc.calculate_second_digit(second_part)): return True return False
This function validates a CNPJ number. This function uses calculation package to calculate both digits and then validates the number. :param cnpj_number: a CNPJ number to be validated. Only numbers. :type cnpj_number: string :return: Bool -- True for a valid number, False otherwise.
5,667
def _expand_target(self): target = self.opts[] if isinstance(target, list): return hostname = self.opts[].split()[-1] needs_expansion = not in hostname and \ salt.utils.network.is_reachable_host(hostname) and \ salt.utils.network.is_ip(hostname) if needs_expansion: hostname = salt.utils.network.ip_to_host(hostname) if hostname is None: return self._get_roster() for roster_filename in self.__parsed_rosters: roster_data = self.__parsed_rosters[roster_filename] if not isinstance(roster_data, bool): for host_id in roster_data: if hostname in [host_id, roster_data.get()]: if hostname != self.opts[]: self.opts[] = hostname self.__parsed_rosters[self.ROSTER_UPDATE_FLAG] = False return
Figures out if the target is a reachable host without wildcards, expands if any. :return:
5,668
def get_scaled_cutout_wdht_view(shp, x1, y1, x2, y2, new_wd, new_ht): x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) new_wd, new_ht = int(new_wd), int(new_ht) old_wd = x2 - x1 + 1 old_ht = y2 - y1 + 1 max_x, max_y = shp[1] - 1, shp[0] - 1 if (new_wd != old_wd) or (new_ht != old_ht): yi = np.mgrid[0:new_ht].reshape(-1, 1) xi = np.mgrid[0:new_wd].reshape(1, -1) iscale_x = float(old_wd) / float(new_wd) iscale_y = float(old_ht) / float(new_ht) xi = (x1 + xi * iscale_x).clip(0, max_x).astype(np.int, copy=False) yi = (y1 + yi * iscale_y).clip(0, max_y).astype(np.int, copy=False) wd, ht = xi.shape[1], yi.shape[0] xi_max, yi_max = xi[0, -1], yi[-1, 0] assert xi_max <= max_x, ValueError("X index (%d) exceeds shape bounds (%d)" % (xi_max, max_x)) assert yi_max <= max_y, ValueError("Y index (%d) exceeds shape bounds (%d)" % (yi_max, max_y)) view = np.s_[yi, xi] else: wd, ht = old_wd, old_ht view = np.s_[y1:y2 + 1, x1:x2 + 1] old_wd, old_ht = max(old_wd, 1), max(old_ht, 1) scale_x = float(wd) / old_wd scale_y = float(ht) / old_ht return (view, (scale_x, scale_y))
Like get_scaled_cutout_wdht, but returns the view/slice to extract from an image instead of the extraction itself.
5,669
def insertData(self, offset: int, string: str) -> None: self._insert_data(offset, string)
Insert ``string`` at offset on this node.
5,670
def __parse_domain_to_employer_line(self, raw_domain, raw_org): d = re.match(self.DOMAIN_REGEX, raw_domain, re.UNICODE) if not d: cause = "invalid domain format: " % raw_domain raise InvalidFormatError(cause=cause) dom = d.group().strip() o = re.match(self.ORGANIZATION_REGEX, raw_org, re.UNICODE) if not o: cause = "invalid organization format: " % raw_org raise InvalidFormatError(cause=cause) org = o.group().strip() org = self.__encode(org) dom = self.__encode(dom) return org, dom
Parse domain to employer lines
5,671
def _ensure_started(self): if not self.started: async_handlers = [startup_handler for startup_handler in self.startup_handlers if introspect.is_coroutine(startup_handler)] if async_handlers: loop = asyncio.get_event_loop() loop.run_until_complete(asyncio.gather(*[handler(self) for handler in async_handlers], loop=loop)) for startup_handler in self.startup_handlers: if not startup_handler in async_handlers: startup_handler(self)
Marks the API as started and runs all startup handlers
5,672
def _seconds_as_string(seconds): TIME_UNITS = [(, 60), (, 60), (, 24), (, None)] unit_strings = [] cur = max(int(seconds), 1) for suffix, size in TIME_UNITS: if size is not None: cur, rest = divmod(cur, size) else: rest = cur if rest > 0: unit_strings.insert(0, % (rest, suffix)) return .join(unit_strings)
Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s'
5,673
def detect_client_auth_request(server_handshake_bytes): for record_type, _, record_data in parse_tls_records(server_handshake_bytes): if record_type != b: continue for message_type, message_data in parse_handshake_messages(record_data): if message_type == b: return True return False
Determines if a CertificateRequest message is sent from the server asking the client for a certificate :param server_handshake_bytes: A byte string of the handshake data received from the server :return: A boolean - if a client certificate request was found
5,674
def fitTo_t(what: Union[RtlSignal, Value], where_t: HdlType, extend: bool=True, shrink: bool=True): whatWidth = what._dtype.bit_length() toWidth = where_t.bit_length() if toWidth == whatWidth: return what elif toWidth < whatWidth: if not shrink: raise BitWidthErr() return what[toWidth:] else: if not extend: raise BitWidthErr() w = toWidth - whatWidth if what._dtype.signed: msb = what[whatWidth - 1] ext = reduce(lambda a, b: a._concat(b), [msb for _ in range(w)]) else: ext = vec(0, w) return ext._concat(what)
Slice signal "what" to fit in "where" or arithmetically (for signed by MSB / unsigned, vector with 0) extend "what" to same width as "where" little-endian impl.
5,675
def guess_filename(obj): name = getattr(obj, , None) if name and name[0] != and name[-1] != : return os.path.basename(name)
Tries to guess the filename of the given object.
5,676
def output(self): output_params = dict( self._raw["output"], grid=self.output_pyramid.grid, pixelbuffer=self.output_pyramid.pixelbuffer, metatiling=self.output_pyramid.metatiling ) if "path" in output_params: output_params.update( path=absolute_path(path=output_params["path"], base_dir=self.config_dir) ) if "format" not in output_params: raise MapcheteConfigError("output format not specified") if output_params["format"] not in available_output_formats(): raise MapcheteConfigError( "format %s not available in %s" % ( output_params["format"], str(available_output_formats()) ) ) writer = load_output_writer(output_params) try: writer.is_valid_with_config(output_params) except Exception as e: logger.exception(e) raise MapcheteConfigError( "driver %s not compatible with configuration: %s" % ( writer.METADATA["driver_name"], e ) ) return writer
Output object of driver.
5,677
def classes_(self): if self.__classes is None: try: return self.estimator.classes_ except AttributeError: return None return self.__classes
Proxy property to smartly access the classes from the estimator or stored locally on the score visualizer for visualization.
5,678
def parseCmdline(rh): rh.printSysLog("Enter cmdVM.parseCmdline") if rh.totalParms >= 2: rh.userid = rh.request[1].upper() else: msg = msgs.msg[][1] % modId rh.printLn("ES", msg) rh.updateResults(msgs.msg[][0]) rh.printSysLog("Exit cmdVM.parseCmdLine, rc: " + rh.results[]) return rh.results[] if rh.totalParms == 2: rh.subfunction = rh.userid rh.userid = if rh.totalParms >= 3: rh.subfunction = rh.request[2].upper() if rh.subfunction not in subfuncHandler: subList = .join(sorted(subfuncHandler.keys())) msg = msgs.msg[][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg[][0]) if rh.results[] == 0: rh.argPos = 3 generalUtils.parseCmdline(rh, posOpsList, keyOpsList) rh.printSysLog("Exit cmdVM.parseCmdLine, rc: " + str(rh.results[])) return rh.results[]
Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error
5,679
def Extract_Checkpoints(self): if self.page is None: raise Exception("The HTML data was not fetched due to some reasons") if in self.page: raise ValueError() soup = BeautifulSoup(self.page,) current_status = soup.find(,id=).text.strip() if current_status == : self.status = elif current_status == : self.status = else: self.status = rows = soup.findAll(,{:}) rows += soup.findAll(,{:}) for row in rows: location = row.find(,{:}).string.strip() date_time = row.find(,{:}).string.strip() status = row.find(,{:}).string.strip() location = self.remove_non_ascii(location) date_time_format = "%d-%b-%Y %H:%M" date_time = parse(self.remove_non_ascii(date_time)) status = self.remove_non_ascii(status) self.tracking_data.append({:status,:date_time,:location}) self.tracking_data = sorted(self.tracking_data, key=lambda k: k[])
Extract the checkpoints and store in self.tracking_data
5,680
def predict_sequence(self, X, A, pi, inference=): obsll = self.predict_proba(X) T, S = obsll.shape alpha = np.zeros((T, S)) alpha[0, :] = pi for t in range(1, T): alpha[t, :] = np.dot(alpha[t-1, :], A) for s in range(S): alpha[t, s] *= obsll[t, s] alpha[t, :] = alpha[t, :]/sum(alpha[t, :]) if inference == : return alpha else: beta = np.zeros((T, S)) gamma = np.zeros((T, S)) beta[T-1, :] = np.ones(S) for t in range(T-2, -1, -1): for i in range(S): for j in range(S): beta[t, i] += A[i, j]*obsll[t+1, j]*beta[t+1, j] beta[t, :] = beta[t, :]/sum(beta[t, :]) for t in range(T): gamma[t, :] = alpha[t, :]*beta[t, :] gamma[t, :] = gamma[t, :]/sum(gamma[t, :]) return gamma
Calculate class probabilities for a sequence of data. Parameters ---------- X : array Test data, of dimension N times d (rows are time frames, columns are data dimensions) A : class transition matrix, where A[i,j] contains p(y_t=j|y_{t-1}=i) pi : vector of initial class probabilities inference : can be 'smoothing' or 'filtering'. Returns: ------- y_prob : array An array of dimension N times n_inlier_classes+1, containing the probabilities of each row of X being one of the inlier classes, or the outlier class (last column).
5,681
def to_period(self, freq=None): from pandas.core.arrays import PeriodArray if self.tz is not None: warnings.warn("Converting to PeriodArray/Index representation " "will drop timezone information.", UserWarning) if freq is None: freq = self.freqstr or self.inferred_freq if freq is None: raise ValueError("You must pass a freq argument as " "current index has none.") freq = get_period_alias(freq) return PeriodArray._from_datetime64(self._data, freq, tz=self.tz)
Cast to PeriodArray/Index at a particular frequency. Converts DatetimeArray/Index to PeriodArray/Index. Parameters ---------- freq : str or Offset, optional One of pandas' :ref:`offset strings <timeseries.offset_aliases>` or an Offset object. Will be inferred by default. Returns ------- PeriodArray/Index Raises ------ ValueError When converting a DatetimeArray/Index with non-regular values, so that a frequency cannot be inferred. See Also -------- PeriodIndex: Immutable ndarray holding ordinal values. DatetimeIndex.to_pydatetime: Return DatetimeIndex as object. Examples -------- >>> df = pd.DataFrame({"y": [1, 2, 3]}, ... index=pd.to_datetime(["2000-03-31 00:00:00", ... "2000-05-31 00:00:00", ... "2000-08-31 00:00:00"])) >>> df.index.to_period("M") PeriodIndex(['2000-03', '2000-05', '2000-08'], dtype='period[M]', freq='M') Infer the daily frequency >>> idx = pd.date_range("2017-01-01", periods=2) >>> idx.to_period() PeriodIndex(['2017-01-01', '2017-01-02'], dtype='period[D]', freq='D')
5,682
def _get_methods_that_calculate_outputs(inputs, outputs, methods): t calculate something we already have, and only contains equations that might help calculate the outputs from the inputs. re not already returning it if args not in output_dict.keys(): needed = [] for arg in args: if arg in inputs: pass elif arg in outputs: pass elif arg in intermediates: if arg not in outputs: needed.append(arg) else: break else: output_dict[args] = func if len(needed) > 0: outputs.extend(needed) keep_going = True if len(output_dict) > 0: return_methods[output] = output_dict return return_methods
Given iterables of input variable names, output variable names, and a methods dictionary, returns the subset of the methods dictionary that can be calculated, doesn't calculate something we already have, and only contains equations that might help calculate the outputs from the inputs.
5,683
def in_virtual_env(): import sys has_venv = False if hasattr(sys, ): has_venv = True elif hasattr(sys, ): has_venv = sys.base_prefix != sys.prefix return has_venv
returns True if you are running inside a python virtual environment. (DOES NOT WORK IF IN IPYTHON AND USING A VIRTUALENV) sys.prefix gives the location of the virtualenv Notes: It seems IPython does not respect virtual environments properly. TODO: find a solution http://stackoverflow.com/questions/7335992/ipython-and-virtualenv-ignoring-site-packages References: http://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv CommandLine: python -m utool.util_sysreq in_virtual_env Example: >>> # DISABLE_DOCTEST >>> from utool.util_sysreq import * # NOQA >>> import utool as ut >>> result = in_virtual_env() >>> print(result)
5,684
def definition(self, suffix = "", local=False, ctype=None, optionals=True, customdim=None, modifiers=None): kind = "({})".format(self.kind) if self.kind is not None else "" cleanmods = [m for m in self.modifiers if m != "" and m != " " and not (local and ("intent" in m or m == "optional")) and not (not optionals and m == "optional")] if modifiers is not None: cleanmods.extend(modifiers) if len(cleanmods) > 0: mods = ", " + ", ".join(cleanmods) + " " else: mods = " " if customdim is not None: dimension = "({})".format(customdim) else: dimension = "({})".format(self.dimension) if self.dimension is not None else "" if self.default is None: default = "" else: if ">" in self.default: default = " ={}".format(self.default) if self.default is not None else "" else: default = " = {}".format(self.default) if self.default is not None else "" name = "{}{}".format(self.name, suffix) stype = self.dtype if ctype is None else ctype return "{}{}{}:: {}{}{}".format(stype, kind, mods, name, dimension, default)
Returns the fortran code string that would define this value element. :arg suffix: an optional suffix to append to the name of the variable. Useful for re-using definitions with new names. :arg local: when True, the parameter definition is re-cast as a local variable definition that has the "intent" and "optional" modifiers removed. :arg ctype: if a ctype should be used as the data type of the variable instead of the original type, specify that string here. :arg optionals: removes the "optional" modifier from the definition before generating it. :arg customdim: if the dimension string needs to be changed, specify the new one here. :arg modifiers: specify an additional list of modifiers to add to the variable definition.
5,685
def update_filenames(self): self.sky_file = os.path.abspath(os.path.join(os.path.join(self.input_path, ), + self.sky_state + + str( self.sky_zenith) + + str( self.sky_azimuth) + + str( self.num_bands) + + self.ds_code))
Does nothing currently. May not need this method
5,686
def headercontent(self, method): content = [] wsse = self.options().wsse if wsse is not None: content.append(wsse.xml()) headers = self.options().soapheaders if not isinstance(headers, (tuple, list, dict)): headers = (headers,) elif not headers: return content pts = self.headpart_types(method) if isinstance(headers, (tuple, list)): n = 0 for header in headers: if isinstance(header, Element): content.append(deepcopy(header)) continue if len(pts) == n: break h = self.mkheader(method, pts[n], header) ns = pts[n][1].namespace("ns0") h.setPrefix(ns[0], ns[1]) content.append(h) n += 1 else: for pt in pts: header = headers.get(pt[0]) if header is None: continue h = self.mkheader(method, pt, header) ns = pt[1].namespace("ns0") h.setPrefix(ns[0], ns[1]) content.append(h) return content
Get the content for the SOAP I{Header} node. @param method: A service method. @type method: I{service.Method} @return: The XML content for the <body/>. @rtype: [L{Element},...]
5,687
def get_data_filename(filename): global _data_map if _data_map is None: _data_map = {} for root, dirs, files in os.walk(specdir): for fname in files: _data_map[fname] = os.path.join(root, fname) if filename not in _data_map: raise KeyError(filename + + specdir) return _data_map[filename]
Map filename to its actual path. Parameters ---------- filename : str Filename to search. Returns ------- path : str Full path to the file in data directory.
5,688
def get_user(uid, channel=14, **kwargs): name = get_user_name(uid, **kwargs) access = get_user_access(uid, channel, **kwargs) data = {: name, : uid, : channel, : access[]} return data
Get user from uid and access on channel :param uid: user number [1:16] :param channel: number [1:7] :param kwargs: - api_host=127.0.0.1 - api_user=admin - api_pass=example - api_port=623 - api_kg=None Return Data .. code-block:: none name: (str) uid: (int) channel: (int) access: - callback (bool) - link_auth (bool) - ipmi_msg (bool) - privilege_level: (str)[callback, user, operatorm administrator, proprietary, no_access] CLI Examples: .. code-block:: bash salt-call ipmi.get_user uid=2
5,689
def from_dict(cls, ctx): ctx = Context(ctx) s = cls() ContextFlags = ctx[] s.ContextFlags = ContextFlags for key in cls._others: if key != : setattr(s, key, ctx[key]) else: w = ctx[key] v = (M128A * len(w))() i = 0 for x in w: y = M128A() y.High = x >> 64 y.Low = x - (x >> 64) v[i] = y i += 1 setattr(s, key, v) if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL: for key in cls._control: setattr(s, key, ctx[key]) if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER: for key in cls._integer: setattr(s, key, ctx[key]) if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS: for key in cls._segments: setattr(s, key, ctx[key]) if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS: for key in cls._debug: setattr(s, key, ctx[key]) if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS: xmm = s.FltSave.xmm for key in cls._mmx: y = M128A() y.High = x >> 64 y.Low = x - (x >> 64) setattr(xmm, key, y) return s
Instance a new structure from a Python native type.
5,690
def _page(q, chunk=1000): offset = 0 while True: r = False for elem in q.limit(chunk).offset(offset): r = True yield elem offset += chunk if not r: break
Quick utility to page a query, 1000 items at a time. We need this so we don't OOM (out of memory) ourselves loading the world.
5,691
def _process_marked_candidate_indexes(candidate, markers): match = RE_SIGNATURE_CANDIDATE.match(markers[::-1]) return candidate[-match.end():] if match else []
Run regexes against candidate's marked indexes to strip signature candidate. >>> _process_marked_candidate_indexes([9, 12, 14, 15, 17], 'clddc') [15, 17]
5,692
def extract_connected_components(graph, connectivity_type, node_to_id): nx_graph = nx.from_scipy_sparse_matrix(graph, create_using=nx.DiGraph()) if connectivity_type == "weak": largest_connected_component_list = nxalgcom.weakly_connected_component_subgraphs(nx_graph) elif connectivity_type == "strong": largest_connected_component_list = nxalgcom.strongly_connected_component_subgraphs(nx_graph) else: print("Invalid connectivity type input.") raise RuntimeError try: largest_connected_component = max(largest_connected_component_list, key=len) except ValueError: print("Error: Empty graph.") raise RuntimeError old_node_list = largest_connected_component.nodes() node_to_node = dict(zip(np.arange(len(old_node_list)), old_node_list)) largest_connected_component = nx.to_scipy_sparse_matrix(largest_connected_component, dtype=np.float64, format="csr") new_node_to_id = {k: node_to_id[v] for k, v in node_to_node.items()} return largest_connected_component, new_node_to_id, old_node_list
Extract the largest connected component from a graph. Inputs: - graph: An adjacency matrix in scipy sparse matrix format. - connectivity_type: A string that can be either: "strong" or "weak". - node_to_id: A map from graph node id to Twitter id, in python dictionary format. Outputs: - largest_connected_component: An adjacency matrix in scipy sparse matrix format. - new_node_to_id: A map from graph node id to Twitter id, in python dictionary format. - old_node_list: List of nodes from the possibly disconnected original graph. Raises: - RuntimeError: If there the input graph is empty.
5,693
def __chopStringDict(self, data): r = {} d = data.split() for item in d: item_parts = item.split() if len(item_parts) == 2: (name, value) = item_parts else: name = item_parts[0] value = item_parts[1] name = self.__filter(name) r[name] = value if "hostperfdata" in r: r["type"] = "hostcheck" r["perfdata"] = r["hostperfdata"] r["checkcommand"] = re.search("(.*?)!\(?.*", r["hostcheckcommand"]).group(1) r["name"] = "hostcheck" else: r["type"] = "servicecheck" r["perfdata"] = r["serviceperfdata"] r["checkcommand"] = re.search("((.*)(?=\!)|(.*))", r["servicecheckcommand"]).group(1) r["name"] = self.__filter(r["servicedesc"]) r["hostname"] = self.replacePeriod(self.__filter(r["hostname"])) return r
Returns a dictionary of the provided raw service/host check string.
5,694
def load_name(self, name): if name in self.globals_: return self.globals_[name] b = self.globals_[] if isinstance(b, dict): return b[name] else: return getattr(b, name)
Implementation of the LOAD_NAME operation
5,695
def view_pool(arg, opts, shell_opts): res = Pool.list({ : arg }) if len(res) == 0: print("No pool with name found." % arg) return p = res[0] vrf_rt = None vrf_name = None if p.vrf: vrf_rt = p.vrf.rt vrf_name = p.vrf.name print("-- Pool ") print(" %-26s : %d" % ("ID", p.id)) print(" %-26s : %s" % ("Name", p.name)) print(" %-26s : %s" % ("Description", p.description)) print(" %-26s : %s" % ("Default type", p.default_type)) print(" %-26s : %s / %s" % ("Implied VRF RT / name", vrf_rt, vrf_name)) print(" %-26s : %s / %s" % ("Preflen (v4/v6)", str(p.ipv4_default_prefix_length), str(p.ipv6_default_prefix_length))) print("-- Extra Attributes") if p.avps is not None: for key in sorted(p.avps, key=lambda s: s.lower()): print(" %-26s : %s" % (key, p.avps[key])) print("-- Tags") for tag_name in sorted(p.tags, key=lambda s: s.lower()): print(" %s" % tag_name) print("-- Statistics") if p.member_prefixes_v4 == 0: print(" IPv4 prefixes Used / Free : N/A (No IPv4 member prefixes)") elif p.ipv4_default_prefix_length is None: print(" IPv4 prefixes Used / Free : N/A (IPv4 default prefix length is not set)") else: if p.total_prefixes_v4 == 0: used_percent_v4 = 0 else: used_percent_v4 = (float(p.used_prefixes_v4)/p.total_prefixes_v4)*100 print(" %-26s : %.0f / %.0f (%.2f%% of %.0f)" % ("IPv4 prefixes Used / Free", p.used_prefixes_v4, p.free_prefixes_v4, used_percent_v4, p.total_prefixes_v4)) if p.member_prefixes_v6 == 0: print(" IPv6 prefixes Used / Free : N/A (No IPv6 member prefixes)") elif p.ipv6_default_prefix_length is None: print(" IPv6 prefixes Used / Free : N/A (IPv6 default prefix length is not set)") else: if p.total_prefixes_v6 == 0: used_percent_v6 = 0 else: used_percent_v6 = (float(p.used_prefixes_v6)/p.total_prefixes_v6)*100 print(" %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("IPv6 prefixes Used / Free", p.used_prefixes_v6, p.free_prefixes_v6, used_percent_v6, p.total_prefixes_v6)) if p.member_prefixes_v4 == 0: print(" IPv4 addresses Used / Free : N/A (No IPv4 member prefixes)") elif p.ipv4_default_prefix_length is None: print(" IPv4 addresses Used / Free : N/A (IPv4 default prefix length is not set)") else: if p.total_addresses_v4 == 0: used_percent_v4 = 0 else: used_percent_v4 = (float(p.used_addresses_v4)/p.total_addresses_v4)*100 print(" %-26s : %.0f / %.0f (%.2f%% of %.0f)" % ("IPv4 addresses Used / Free", p.used_addresses_v4, p.free_addresses_v4, used_percent_v4, p.total_addresses_v4)) if p.member_prefixes_v6 == 0: print(" IPv6 addresses Used / Free : N/A (No IPv6 member prefixes)") elif p.ipv6_default_prefix_length is None: print(" IPv6 addresses Used / Free : N/A (IPv6 default prefix length is not set)") else: if p.total_addresses_v6 == 0: used_percent_v6 = 0 else: used_percent_v6 = (float(p.used_addresses_v6)/p.total_addresses_v6)*100 print(" %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("IPv6 addresses Used / Free", p.used_addresses_v6, p.free_addresses_v6, used_percent_v6, p.total_addresses_v6)) print("\n-- Prefixes in pool - v4: %d v6: %d" % (p.member_prefixes_v4, p.member_prefixes_v6)) res = Prefix.list({ : p.id}) for pref in res: print(" %s" % pref.display_prefix)
View a single pool
5,696
def _cull(self): right_now = time.time() cull_from = -1 for index in range(len(self._call_times)): if right_now - self._call_times[index].time >= 1.0: cull_from = index self._outstanding_calls -= self._call_times[index].num_calls else: break if cull_from > -1: self._call_times = self._call_times[cull_from + 1:]
Remove calls more than 1 second old from the queue.
5,697
def _nest_variable(v, check_records=False): if (check_records and is_cwl_record(v) and len(v["id"].split("/")) > 1 and v.get("type", {}).get("type") == "array"): return v else: v = copy.deepcopy(v) v["type"] = {"type": "array", "items": v["type"]} return v
Nest a variable when moving from scattered back to consolidated. check_records -- avoid re-nesting a record input if it comes from a previous step and is already nested, don't need to re-array.
5,698
def xvJacobianFreqs(self,jr,jphi,jz,angler,anglephi,anglez,**kwargs): out= actionAngleTorus_c.actionAngleTorus_jacobian_c(\ self._pot, jr,jphi,jz, angler,anglephi,anglez, tol=kwargs.get(,self._tol), dJ=kwargs.get(,self._dJ)) if out[11] != 0: warnings.warn("actionAngleTorusnosym',False): out[7][:]= 0.5*(out[7]+out[7].T) return (numpy.array(out[:6]).T,out[6],out[7], out[8],out[9],out[10],out[11])
NAME: xvJacobianFreqs PURPOSE: return [R,vR,vT,z,vz,phi], the Jacobian d [R,vR,vT,z,vz,phi] / d (J,angle), the Hessian dO/dJ, and frequencies Omega corresponding to a torus at multiple sets of angles INPUT: jr - radial action (scalar) jphi - azimuthal action (scalar) jz - vertical action (scalar) angler - radial angle (array [N]) anglephi - azimuthal angle (array [N]) anglez - vertical angle (array [N]) tol= (object-wide value) goal for |dJ|/|J| along the torus dJ= (object-wide value) action difference when computing derivatives (Hessian or Jacobian) nosym= (False) if True, don't explicitly symmetrize the Hessian (good to check errors) OUTPUT: ([R,vR,vT,z,vz,phi], [N,6] array d[R,vR,vT,z,vz,phi]/d[J,angle], --> (N,6,6) array dO/dJ, --> (3,3) array Omegar,Omegaphi,Omegaz, [N] arrays Autofit error message) HISTORY: 2016-07-19 - Written - Bovy (UofT)
5,699
def default_metric_definitions(cls, toolkit): if toolkit is RLToolkit.COACH: return [ {: , : }, {: , : } ] elif toolkit is RLToolkit.RAY: float_regex = "[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?" return [ {: , : % float_regex}, {: , : % float_regex} ]
Provides default metric definitions based on provided toolkit. Args: toolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training. Returns: list: metric definitions