code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def serveUpcoming(self, request): """Upcoming events list view.""" myurl = self.get_url(request) today = timezone.localdate() monthlyUrl = myurl + self.reverse_subpage('serveMonth', args=[today.year, today.month]) weekNum = gregorian_to_week_date(today)[1] weeklyUrl = myurl + self.reverse_subpage('serveWeek', args=[today.year, weekNum]) listUrl = myurl + self.reverse_subpage('servePast') upcomingEvents = self._getUpcomingEvents(request) paginator = Paginator(upcomingEvents, self.EventsPerPage) try: eventsPage = paginator.page(request.GET.get('page')) except PageNotAnInteger: eventsPage = paginator.page(1) except EmptyPage: eventsPage = paginator.page(paginator.num_pages) # TODO Consider changing to a TemplateResponse # https://stackoverflow.com/questions/38838601 return render(request, "joyous/calendar_list_upcoming.html", {'self': self, 'page': self, 'version': __version__, 'today': today, 'weeklyUrl': weeklyUrl, 'monthlyUrl': monthlyUrl, 'listUrl': listUrl, 'events': eventsPage})
Upcoming events list view.
def consumer(self, fn): """Consumer decorator :param fn: coroutine consumer function Example: >>> api = StreamingAPI('my_service_key') >>> stream = api.get_stream() >>> @stream.consumer >>> @asyncio.coroutine >>> def handle_event(payload): >>> print(payload) """ if self._consumer_fn is not None: raise ValueError('Consumer function is already defined for this ' 'Stream instance') if not any([asyncio.iscoroutine(fn), asyncio.iscoroutinefunction(fn)]): raise ValueError('Consumer function must be a coroutine') self._consumer_fn = fn
Consumer decorator :param fn: coroutine consumer function Example: >>> api = StreamingAPI('my_service_key') >>> stream = api.get_stream() >>> @stream.consumer >>> @asyncio.coroutine >>> def handle_event(payload): >>> print(payload)
def new_mapping(self, lineup, station, channel, channelMinor, validFrom, validTo, onAirFrom, onAirTo): """Callback run for each new mapping within a lineup""" if self.__v_mapping: # [Mapping: FL09567:X, 11097, 45, None, 2010-06-29 00:00:00.00, None, None, None] print("[Mapping: %s, %s, %s, %s, %s, %s, %s, %s]" % (lineup, station, channel, channelMinor, validFrom, validTo, onAirFrom, onAirTo))
Callback run for each new mapping within a lineup
def register_token(self, *args, **kwargs): """ Register token Accepts: - token_name [string] - contract_address [hex string] - blockchain [string] token's blockchain (QTUMTEST, ETH) Returns dictionary with following fields: - success [Bool] """ client = HTTPClient(self.withdraw_server_address + self.withdraw_endpoint) if check_sig: return client.request('register_token', self.signature_validator.sign(kwargs)) else: return client.request('register_token', kwargs)
Register token Accepts: - token_name [string] - contract_address [hex string] - blockchain [string] token's blockchain (QTUMTEST, ETH) Returns dictionary with following fields: - success [Bool]
def safe_remove_file(filename, app): """ Removes a given resource file from builder resources. Needed mostly during test, if multiple sphinx-build are started. During these tests js/cass-files are not cleaned, so a css_file from run A is still registered in run B. :param filename: filename to remove :param app: app object :return: None """ data_file = filename static_data_file = os.path.join("_static", data_file) if data_file.split(".")[-1] == "js": if hasattr(app.builder, "script_files") and static_data_file in app.builder.script_files: app.builder.script_files.remove(static_data_file) elif data_file.split(".")[-1] == "css": if hasattr(app.builder, "css_files") and static_data_file in app.builder.css_files: app.builder.css_files.remove(static_data_file)
Removes a given resource file from builder resources. Needed mostly during test, if multiple sphinx-build are started. During these tests js/cass-files are not cleaned, so a css_file from run A is still registered in run B. :param filename: filename to remove :param app: app object :return: None
def ParseLines(lines, message, allow_unknown_extension=False, allow_field_number=False): """Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. """ parser = _Parser(allow_unknown_extension, allow_field_number) return parser.ParseLines(lines, message)
Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems.
async def read(cls, node): """Get list of `Bcache`'s for `node`.""" if isinstance(node, str): system_id = node elif isinstance(node, Node): system_id = node.system_id else: raise TypeError( "node must be a Node or str, not %s" % type(node).__name__) data = await cls._handler.read(system_id=system_id) return cls( cls._object( item, local_data={"node_system_id": system_id}) for item in data)
Get list of `Bcache`'s for `node`.
def get_dev_vlans(auth, url, devid=None, devip=None): """Function takes input of devID to issue RESTUL call to HP IMC :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param devid: str requires devId as the only input parameter :param devip: str of ipv4 address of the target device :return: list of dictionaries where each element of the list represents one vlan on the target device :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.vlanm import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> vlans = get_dev_vlans('350', auth.creds, auth.url) >>> assert type(vlans) is list >>> assert 'vlanId' in vlans[0] """ if devip is not None: devid = get_dev_details(devip, auth, url)['id'] get_dev_vlans_url = "/imcrs/vlan?devId=" + str(devid) + "&start=0&size=5000&total=false" f_url = url + get_dev_vlans_url response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: dev_vlans = (json.loads(response.text)) return dev_vlans['vlan'] elif response.status_code == 409: return {'vlan': 'no vlans'} except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' get_dev_vlans: An Error has occured'
Function takes input of devID to issue RESTUL call to HP IMC :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param devid: str requires devId as the only input parameter :param devip: str of ipv4 address of the target device :return: list of dictionaries where each element of the list represents one vlan on the target device :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.vlanm import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> vlans = get_dev_vlans('350', auth.creds, auth.url) >>> assert type(vlans) is list >>> assert 'vlanId' in vlans[0]
def check_docker_access(self): """ Creates a :class:`DockerClient <docker.client.DockerClient>` for the instance and checks the connection. :raise BuildError: If docker isn't accessible by the current user. """ try: if self.client is None: self.client = docker.from_env() self.client.ping() # check that docker is running and user is permitted to access it except ConnectionError as e: logger.exception(e) raise BuildError("Docker is not running or the current user doesn't have permissions to access docker.")
Creates a :class:`DockerClient <docker.client.DockerClient>` for the instance and checks the connection. :raise BuildError: If docker isn't accessible by the current user.
def _register_endpoints(self, providers): """ See super class satosa.frontends.base.FrontendModule#register_endpoints :type providers: list[str] :rtype list[(str, ((satosa.context.Context, Any) -> satosa.response.Response, Any))] | list[(str, (satosa.context.Context) -> satosa.response.Response)] :param providers: A list with backend names :return: A list of url and endpoint function pairs """ url_map = [] for endp_category in self.endpoints: for binding, endp in self.endpoints[endp_category].items(): valid_providers = "|^".join(providers) parsed_endp = urlparse(endp) url_map.append(("(^%s)/\S+/%s" % (valid_providers, parsed_endp.path), functools.partial(self.handle_authn_request, binding_in=binding))) return url_map
See super class satosa.frontends.base.FrontendModule#register_endpoints :type providers: list[str] :rtype list[(str, ((satosa.context.Context, Any) -> satosa.response.Response, Any))] | list[(str, (satosa.context.Context) -> satosa.response.Response)] :param providers: A list with backend names :return: A list of url and endpoint function pairs
def p_joinx(self,t): # todo: support join types http://www.postgresql.org/docs/9.4/static/queries-table-expressions.html#QUERIES-JOIN """joinx : fromtable jointype fromtable | fromtable jointype fromtable kw_on expression | fromtable jointype fromtable kw_using '(' namelist ')' """ if len(t)==4: t[0] = JoinX(t[1],t[3],None,t[2]) elif len(t)==6: t[0] = JoinX(t[1],t[3],t[5],t[2]) else: raise NotImplementedError('todo: join .. using')
joinx : fromtable jointype fromtable | fromtable jointype fromtable kw_on expression | fromtable jointype fromtable kw_using '(' namelist ')'
def prob(self, comparison_vectors, return_type=None): """Compute the probabilities for each record pair. For each pair of records, estimate the probability of being a match. Parameters ---------- comparison_vectors : pandas.DataFrame The dataframe with comparison vectors. return_type : str Deprecated. (default 'series') Returns ------- pandas.Series or numpy.ndarray The probability of being a match for each record pair. """ if return_type is not None: warnings.warn("The argument 'return_type' is removed. " "Default value is now 'series'.", VisibleDeprecationWarning, stacklevel=2) logging.info("Classification - compute probabilities") prob_match = self._prob_match(comparison_vectors.values) return pandas.Series(prob_match, index=comparison_vectors.index)
Compute the probabilities for each record pair. For each pair of records, estimate the probability of being a match. Parameters ---------- comparison_vectors : pandas.DataFrame The dataframe with comparison vectors. return_type : str Deprecated. (default 'series') Returns ------- pandas.Series or numpy.ndarray The probability of being a match for each record pair.
def del_layer(self, layer_num): """ Delete mesh layer """ del self.layer_stack[layer_num] # Adjust current layer if needed if layer_num < self.current_layer(): self.set_current_layer(self.current_layer() - 1) return None
Delete mesh layer
def create(cls, statement_format, date_start, date_end, monetary_account_id=None, regional_format=None, custom_headers=None): """ :type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_STATEMENT_FORMAT: statement_format, cls.FIELD_DATE_START: date_start, cls.FIELD_DATE_END: date_end, cls.FIELD_REGIONAL_FORMAT: regional_format } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id)) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
:type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
def fetch(self, url): """ Get the feed content using 'requests' """ try: r = requests.get(url, timeout=self.timeout) except requests.exceptions.Timeout: if not self.safe: raise else: return None # Raise 404/500 error if any if r and not self.safe: r.raise_for_status() return r.text
Get the feed content using 'requests'
def doParseXMLData( self ): """This function parses the XML output of FileMaker.""" parser = xml2obj.Xml2Obj() # Not valid document comming from FMServer if self.data[-6:] == '</COL>': self.data += '</ROW></RESULTSET></FMPXMLRESULT>' xobj = parser.ParseString( self.data ) try: el = xobj.getElements( 'ERRORCODE') if el: self.errorcode = int( el[0].getData() ) else: self.errorcode = int( xobj.getElements('error')[0].getAttribute('code') ) except: FMErrorByNum( 954 ) if self.errorcode != 0: FMErrorByNum( self.errorcode ) return xobj
This function parses the XML output of FileMaker.
def getTaskInfos(self): """ .. note:: Experimental Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage, ordered by partition ID. .. versionadded:: 2.4.0 """ if self._port is None or self._secret is None: raise Exception("Not supported to call getTaskInfos() before initialize " + "BarrierTaskContext.") else: addresses = self._localProperties.get("addresses", "") return [BarrierTaskInfo(h.strip()) for h in addresses.split(",")]
.. note:: Experimental Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage, ordered by partition ID. .. versionadded:: 2.4.0
def spawn(self, actor, aid=None, **params): '''Spawn a new actor from ``actor``. ''' aid = aid or create_aid() future = actor.send('arbiter', 'spawn', aid=aid, **params) return actor_proxy_future(aid, future)
Spawn a new actor from ``actor``.
def from_str(cls, s): """Construct an import object from a string.""" ast_obj = ast.parse(s).body[0] if not isinstance(ast_obj, cls._expected_ast_type): raise AssertionError( 'Expected ast of type {!r} but got {!r}'.format( cls._expected_ast_type, ast_obj ) ) return cls(ast_obj)
Construct an import object from a string.
def lower_folded_coerce_types_into_filter_blocks(folded_ir_blocks): """Lower CoerceType blocks into "INSTANCEOF" Filter blocks. Indended for folded IR blocks.""" new_folded_ir_blocks = [] for block in folded_ir_blocks: if isinstance(block, CoerceType): new_block = convert_coerce_type_to_instanceof_filter(block) else: new_block = block new_folded_ir_blocks.append(new_block) return new_folded_ir_blocks
Lower CoerceType blocks into "INSTANCEOF" Filter blocks. Indended for folded IR blocks.
def is_builtin_name(name): """For example, __foo__ or __bar__.""" if name.startswith('__') and name.endswith('__'): return ALL_LOWER_CASE_RE.match(name[2:-2]) is not None return False
For example, __foo__ or __bar__.
def serialized_task(self, task: Task) -> Tuple[str, str]: """ Returns the name of the task definition file and its contents. """ return f"{task.hash}.json", task.json
Returns the name of the task definition file and its contents.
def price(usr, item, searches = 2, method = "AVERAGE", deduct = 0): """ Searches the shop wizard for given item and determines price with given method Searches the shop wizard x times (x being number given in searches) for the given item and collects the lowest price from each result. Uses the given pricing method to determine and return the price of the item. Below is information on each pricing method available: ShopWizard.AVERAGE -- Average of the lowest prices ShopWizard.LOWDEDUCT -- Deducts x (x = deduct) from the lowest price ShopWizard.AVGDEDUCT -- Deducts x (x = deduct) from the average of the lowest prices ShopWizard.LOW -- Returns the lowest price ShopWizard.RETLOW -- Returns an Item instance of the lowest price found Parameters: usr (User) -- User to search with item (str, Item) -- Item to search for searches (int) -- Number of times to search for the item method (str) -- Pricing method deduct (int) -- Amount to deduct from the price (if applicable) Returns int -- The item price """ if not method in ShopWizard.methods: raise invalidMethod() if isinstance(item, Item): item = item.name prices = [] dets = {} for x in range(0, searches): results = ShopWizard.search(usr, item) # Set to -1 if not found if not results: prices.append(-1) continue prices.append(int(results[0].price)) dets[str(results[0].price)] = (results[0].owner, results[0].id) time.sleep(ShopWizard.waitTime) # Determines if item was UB if sum(prices) == len(prices) * -1: return False prices = list(filter(lambda x: x != -1, prices)) if method == ShopWizard.RETLOW: price = sorted(prices)[0] return (price, dets[str(price)][0], dets[str(price)][1]) return ShopWizard.__determinePrice(prices, method, deduct)
Searches the shop wizard for given item and determines price with given method Searches the shop wizard x times (x being number given in searches) for the given item and collects the lowest price from each result. Uses the given pricing method to determine and return the price of the item. Below is information on each pricing method available: ShopWizard.AVERAGE -- Average of the lowest prices ShopWizard.LOWDEDUCT -- Deducts x (x = deduct) from the lowest price ShopWizard.AVGDEDUCT -- Deducts x (x = deduct) from the average of the lowest prices ShopWizard.LOW -- Returns the lowest price ShopWizard.RETLOW -- Returns an Item instance of the lowest price found Parameters: usr (User) -- User to search with item (str, Item) -- Item to search for searches (int) -- Number of times to search for the item method (str) -- Pricing method deduct (int) -- Amount to deduct from the price (if applicable) Returns int -- The item price
def calculate_width_and_height(url_parts, options): '''Appends width and height information to url''' width = options.get('width', 0) has_width = width height = options.get('height', 0) has_height = height flip = options.get('flip', False) flop = options.get('flop', False) if flip: width = width * -1 if flop: height = height * -1 if not has_width and not has_height: if flip: width = "-0" if flop: height = "-0" if width or height: url_parts.append('%sx%s' % (width, height))
Appends width and height information to url
def SLOAD(self, offset): """Load word from storage""" storage_address = self.address self._publish('will_evm_read_storage', storage_address, offset) value = self.world.get_storage_data(storage_address, offset) self._publish('did_evm_read_storage', storage_address, offset, value) return value
Load word from storage
def expect_keyword(lexer: Lexer, value: str) -> Token: """Expect the next token to be a given keyword. If the next token is a given keyword, return that token after advancing the lexer. Otherwise, do not change the parser state and throw an error. """ token = lexer.token if token.kind == TokenKind.NAME and token.value == value: lexer.advance() return token raise GraphQLSyntaxError( lexer.source, token.start, f"Expected {value!r}, found {token.desc}" )
Expect the next token to be a given keyword. If the next token is a given keyword, return that token after advancing the lexer. Otherwise, do not change the parser state and throw an error.
def start_instance(self, instance): """ Starts a single instance. :param str instance: A Yamcs instance name. """ params = {'state': 'running'} url = '/instances/{}'.format(instance) self.patch_proto(url, params=params)
Starts a single instance. :param str instance: A Yamcs instance name.
def main(port=4118, parentpid=None): """Main entry point. Parse command line options and start up a server.""" if "LDTP_DEBUG" in os.environ: _ldtp_debug = True else: _ldtp_debug = False _ldtp_debug_file = os.environ.get('LDTP_DEBUG_FILE', None) if _ldtp_debug: print("Parent PID: {}".format(int(parentpid))) if _ldtp_debug_file: with open(unicode(_ldtp_debug_file), "a") as fp: fp.write("Parent PID: {}".format(int(parentpid))) server = LDTPServer(('', port), allow_none=True, logRequests=_ldtp_debug, requestHandler=RequestHandler) server.register_introspection_functions() server.register_multicall_functions() ldtp_inst = core.Core() server.register_instance(ldtp_inst) if parentpid: thread.start_new_thread(notifyclient, (parentpid,)) try: server.serve_forever() except KeyboardInterrupt: pass except: if _ldtp_debug: print(traceback.format_exc()) if _ldtp_debug_file: with open(_ldtp_debug_file, "a") as fp: fp.write(traceback.format_exc())
Main entry point. Parse command line options and start up a server.
def get_user_by_key(app, key): """ An SQLAlchemy User getting function. Get a user by public key. :param str key: the public key the user belongs to """ user = ses.query(um.User).join(um.UserKey).filter(um.UserKey.key==key).first() return user
An SQLAlchemy User getting function. Get a user by public key. :param str key: the public key the user belongs to
def step_size(self, t0, t1=None): ''' Return the time in seconds for each step. Requires that we know a time relative to which we should calculate to account for variable length intervals (e.g. February) ''' tb0 = self.to_bucket( t0 ) if t1: tb1 = self.to_bucket( t1, steps=1 ) # NOTE: "end" of second bucket else: tb1 = self.to_bucket( t0, steps=1 ) # Calculate the difference in days, then multiply by simple scalar days = (self.from_bucket(tb1, native=True) - self.from_bucket(tb0, native=True)).days return days * SIMPLE_TIMES['d']
Return the time in seconds for each step. Requires that we know a time relative to which we should calculate to account for variable length intervals (e.g. February)
def findXScreens(self): qapp = QtCore.QCoreApplication.instance() if not qapp: # QApplication has not been started return screens = qapp.screens() """ let's find out which screens are virtual screen, siblings: One big virtual desktop: A [A, B, C] B [A, B, C] C [A, B, C] A & B in one xscreen, C in another: A [A, B] B [A, B] C [C] """ virtual_screens = set() for screen in screens: # if screen has been deemed as "virtual", don't check its siblings if (screen not in virtual_screens): siblings = screen.virtualSiblings() # remove the current screen under scrutiny from the siblings # list virtual_screens.update(set(siblings).difference(set([screen]))) # .. the ones left over are virtual # print("GPUHandler: findXScreens: virtual screens",virtual_screens) true_screens = list(set(screens) - virtual_screens) # sort'em for screen in true_screens: self.true_screens.insert(screens.index(screen), screen) print("GPUHandler: findXScreens: true screens:", self.true_screens)
let's find out which screens are virtual screen, siblings: One big virtual desktop: A [A, B, C] B [A, B, C] C [A, B, C] A & B in one xscreen, C in another: A [A, B] B [A, B] C [C]
def do_edit(self, line): """edit FILE Copies the file locally, launches an editor to edit the file. When the editor exits, if the file was modified then its copied back. You can specify the editor used with the --editor command line option when you start rshell, or by using the VISUAL or EDITOR environment variable. if none of those are set, then vi will be used. """ if len(line) == 0: print_err("Must provide a filename") return filename = resolve_path(line) dev, dev_filename = get_dev_and_path(filename) mode = auto(get_mode, filename) if mode_exists(mode) and mode_isdir(mode): print_err("Unable to edit directory '{}'".format(filename)) return if dev is None: # File is local os.system("{} '{}'".format(EDITOR, filename)) else: # File is remote with tempfile.TemporaryDirectory() as temp_dir: local_filename = os.path.join(temp_dir, os.path.basename(filename)) if mode_exists(mode): print('Retrieving {} ...'.format(filename)) cp(filename, local_filename) old_stat = get_stat(local_filename) os.system("{} '{}'".format(EDITOR, local_filename)) new_stat = get_stat(local_filename) if old_stat != new_stat: self.print('Updating {} ...'.format(filename)) cp(local_filename, filename)
edit FILE Copies the file locally, launches an editor to edit the file. When the editor exits, if the file was modified then its copied back. You can specify the editor used with the --editor command line option when you start rshell, or by using the VISUAL or EDITOR environment variable. if none of those are set, then vi will be used.
def _escaped_token_to_subtoken_strings(self, escaped_token): """ Converts an escaped token string to a list of subtoken strings. Args: escaped_token: An escaped token as a unicode string. Returns: A list of subtokens as unicode strings. """ # NOTE: This algorithm is greedy; it won't necessarily produce the "best" # list of subtokens. ret = [] start = 0 token_len = len(escaped_token) while start < token_len: for end in xrange(min(token_len, start + self._max_subtoken_len), start, -1): subtoken = escaped_token[start:end] if subtoken in self._all_subtoken_strings: ret.append(subtoken) start = end break else: # Did not break # If there is no possible encoding of the escaped token then one of the # characters in the token is not in the alphabet. This should be # impossible and would be indicative of a bug. assert False, "Token substring not found in subtoken vocabulary." return ret
Converts an escaped token string to a list of subtoken strings. Args: escaped_token: An escaped token as a unicode string. Returns: A list of subtokens as unicode strings.
def InitFromApiFlow(self, f, cron_job_id=None): """Shortcut method for easy legacy cron jobs support.""" if f.flow_id: self.run_id = f.flow_id elif f.urn: self.run_id = f.urn.Basename() self.started_at = f.started_at self.cron_job_id = cron_job_id flow_state_enum = api_plugins_flow.ApiFlow.State cron_enum = rdf_cronjobs.CronJobRun.CronJobRunStatus errors_map = { flow_state_enum.RUNNING: cron_enum.RUNNING, flow_state_enum.TERMINATED: cron_enum.FINISHED, flow_state_enum.ERROR: cron_enum.ERROR, flow_state_enum.CLIENT_CRASHED: cron_enum.ERROR } self.status = errors_map[f.state] if f.state != f.State.RUNNING: self.finished_at = f.last_active_at if f.context.kill_timestamp: self.status = self.Status.LIFETIME_EXCEEDED if f.context.HasField("status"): self.log_message = f.context.status if f.context.HasField("backtrace"): self.backtrace = f.context.backtrace return self
Shortcut method for easy legacy cron jobs support.
def get_approval_by_id(self, issue_id_or_key, approval_id): """ Get an approval for a given approval ID :param issue_id_or_key: str :param approval_id: str :return: """ url = 'rest/servicedeskapi/request/{0}/approval/{1}'.format(issue_id_or_key, approval_id) return self.get(url, headers=self.experimental_headers)
Get an approval for a given approval ID :param issue_id_or_key: str :param approval_id: str :return:
def _transition(self, duration, hue=None, brightness=None): """ Transition. :param duration: Time to transition. :param hue: Transition to this hue. :param brightness: Transition to this brightness. """ # Calculate brightness steps. b_steps = 0 if brightness is not None: b_steps = steps(self.brightness, brightness, self.command_set.brightness_steps) b_start = self.brightness # Calculate hue steps. h_steps = 0 if hue is not None: h_steps = steps(self.hue, hue, self.command_set.hue_steps) h_start = self.hue # Compute ideal step amount (at least one). total_steps = max(b_steps, h_steps, 1) total_commands = b_steps + h_steps # Calculate wait. wait = self._wait(duration, total_steps, total_commands) # Scale down steps if no wait time. if wait == 0: b_steps, h_steps = self._scale_steps(duration, total_commands, b_steps, h_steps) total_steps = max(b_steps, h_steps, 1) # Perform transition. for i in range(total_steps): # Brightness. if b_steps > 0 and i % math.ceil(total_steps/b_steps) == 0: self.brightness = util.transition(i, total_steps, b_start, brightness) # Hue. if h_steps > 0 and i % math.ceil(total_steps/h_steps) == 0: self.hue = util.transition(i, total_steps, h_start, hue) # Wait. time.sleep(wait)
Transition. :param duration: Time to transition. :param hue: Transition to this hue. :param brightness: Transition to this brightness.
def apply_T12(word): '''There is a syllable boundary within a VV sequence of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].''' WORD = word offset = 0 for vv in new_vv(WORD): # import pdb; pdb.set_trace() seq = vv.group(1) if not is_diphthong(seq) and not is_long(seq): i = vv.start(1) + 1 + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 RULE = ' T2' if word != WORD else '' return WORD, RULE
There is a syllable boundary within a VV sequence of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].
def _type_priority(ifo, ftype, trend=None): """Prioritise the given GWF type based on its name or trend status. This is essentially an ad-hoc ordering function based on internal knowledge of how LIGO does GWF type naming. """ # if looking for a trend channel, prioritise the matching type for trendname, trend_regex in [ ('m-trend', MINUTE_TREND_TYPE), ('s-trend', SECOND_TREND_TYPE), ]: if trend == trendname and trend_regex.match(ftype): return 0, len(ftype) # otherwise rank this type according to priority for reg, prio in { HIGH_PRIORITY_TYPE: 1, re.compile(r'[A-Z]\d_C'): 6, LOW_PRIORITY_TYPE: 10, MINUTE_TREND_TYPE: 10, SECOND_TREND_TYPE: 10, }.items(): if reg.search(ftype): return prio, len(ftype) return 5, len(ftype)
Prioritise the given GWF type based on its name or trend status. This is essentially an ad-hoc ordering function based on internal knowledge of how LIGO does GWF type naming.
def app_to_context(self, context): """Return a context encoded tag.""" if self.tagClass != Tag.applicationTagClass: raise ValueError("application tag required") # application tagged boolean now has data if (self.tagNumber == Tag.booleanAppTag): return ContextTag(context, chr(self.tagLVT)) else: return ContextTag(context, self.tagData)
Return a context encoded tag.
def reserve_position(fp, fmt='I'): """ Reserves the current position for write. Use with `write_position`. :param fp: file-like object :param fmt: format of the reserved position :return: the position """ position = fp.tell() fp.seek(struct.calcsize(str('>' + fmt)), 1) return position
Reserves the current position for write. Use with `write_position`. :param fp: file-like object :param fmt: format of the reserved position :return: the position
def save_token(self, user, token): """Save the token on the config file.""" self.config.set('auth', 'user', user) self.config.set('auth', 'token', token) # allow_no_value=True is used to keep the comments on the config file. new_config = ConfigParser(allow_no_value=True) # Parse the config file. If no config file was found, then create some # default sections on the config variable. new_config.read(self.config_file) self.check_sections(new_config) new_config.set('auth', 'user', user) new_config.set('auth', 'token', token) filename = os.path.expanduser(self.config_file) with open(filename, 'w') as out_file: os.chmod(filename, 0o0600) new_config.write(out_file)
Save the token on the config file.
def group(self): """ Returns the periodic table group of the element. """ z = self.Z if z == 1: return 1 if z == 2: return 18 if 3 <= z <= 18: if (z - 2) % 8 == 0: return 18 elif (z - 2) % 8 <= 2: return (z - 2) % 8 else: return 10 + (z - 2) % 8 if 19 <= z <= 54: if (z - 18) % 18 == 0: return 18 else: return (z - 18) % 18 if (z - 54) % 32 == 0: return 18 elif (z - 54) % 32 >= 18: return (z - 54) % 32 - 14 else: return (z - 54) % 32
Returns the periodic table group of the element.
def cosh(x): """ Hyperbolic cosine """ if isinstance(x, UncertainFunction): mcpts = np.cosh(x._mcpts) return UncertainFunction(mcpts) else: return np.cosh(x)
Hyperbolic cosine
def _get_precision_scale(self, number): """ :param number: :return: tuple(precision, scale, decimal_number) """ try: decimal_num = Decimal(number) except InvalidOperation: raise Invalid(self.msg or 'Value must be a number enclosed with string') return (len(decimal_num.as_tuple().digits), -(decimal_num.as_tuple().exponent), decimal_num)
:param number: :return: tuple(precision, scale, decimal_number)
def get_nearest_successors(self, type_measurement): """! @brief Find pair of nearest successors of the node in line with measurement type. @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest successors. @return (list) Pair of nearest successors represented by list. """ nearest_node1 = None; nearest_node2 = None; nearest_distance = float("Inf"); for i in range(0, len(self.successors)): candidate1 = self.successors[i]; for j in range(i + 1, len(self.successors)): candidate2 = self.successors[j]; candidate_distance = candidate1.get_distance(candidate2, type_measurement); if (candidate_distance < nearest_distance): nearest_distance = candidate_distance; nearest_node1 = candidate1; nearest_node2 = candidate2; return [nearest_node1, nearest_node2];
! @brief Find pair of nearest successors of the node in line with measurement type. @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest successors. @return (list) Pair of nearest successors represented by list.
def _configure_using_fluent_definition(self): """ Configure the console command using a fluent definition. """ definition = Parser.parse(self.signature) self._config.set_name(definition["name"]) for name, flags, description, default in definition["arguments"]: self._config.add_argument(name, flags, description, default) for long_name, short_name, flags, description, default in definition["options"]: self._config.add_option(long_name, short_name, flags, description, default)
Configure the console command using a fluent definition.
def _listify(collection): """This is a workaround where Collections are no longer iterable when using JPype.""" new_list = [] for index in range(len(collection)): new_list.append(collection[index]) return new_list
This is a workaround where Collections are no longer iterable when using JPype.
def override_locale(self, locale: str = locales.EN, ) -> Generator['BaseDataProvider', None, None]: """Context manager which allows overriding current locale. Temporarily overrides current locale for locale-dependent providers. :param locale: Locale. :return: Provider with overridden locale. """ try: origin_locale = self.locale self._override_locale(locale) try: yield self finally: self._override_locale(origin_locale) except AttributeError: raise ValueError('«{}» has not locale dependent'.format( self.__class__.__name__))
Context manager which allows overriding current locale. Temporarily overrides current locale for locale-dependent providers. :param locale: Locale. :return: Provider with overridden locale.
def filter_params(self, src_mod): """ Remove params uneeded by source_model """ # point and area related params STRIKE_PARAMS[src_mod.num_np:] = [] DIP_PARAMS[src_mod.num_np:] = [] RAKE_PARAMS[src_mod.num_np:] = [] NPW_PARAMS[src_mod.num_np:] = [] HDEPTH_PARAMS[src_mod.num_hd:] = [] HDW_PARAMS[src_mod.num_hd:] = [] # planar rupture related params PLANES_STRIKES_PARAM[src_mod.num_p:] = [] PLANES_DIPS_PARAM[src_mod.num_p:] = [] # rate params RATE_PARAMS[src_mod.num_r:] = [] if src_mod.has_simple_fault_geometry is False: GEOMETRY_PARAMS.remove(('dip', 'dip', 'f')) if (src_mod.has_simple_fault_geometry is False and src_mod.has_complex_fault_geometry is False and src_mod.has_planar_geometry is False): BASE_PARAMS.remove(('rake', 'rake', 'f')) if (src_mod.has_simple_fault_geometry is False and src_mod.has_complex_fault_geometry is False and src_mod.has_area_source is False and src_mod.has_point_source is False): GEOMETRY_PARAMS[:] = [] if src_mod.has_mfd_incremental is False: MFD_PARAMS.remove(('binWidth', 'bin_width', 'f'))
Remove params uneeded by source_model
def _getMetadata(self, key): """_getMetadata(self, key) -> char *""" if self.isClosed: raise ValueError("operation illegal for closed doc") return _fitz.Document__getMetadata(self, key)
_getMetadata(self, key) -> char *
def _unicode(self): '''This returns a printable representation of the screen as a unicode string (which, under Python 3.x, is the same as 'str'). The end of each screen line is terminated by a newline.''' return u'\n'.join ([ u''.join(c) for c in self.w ])
This returns a printable representation of the screen as a unicode string (which, under Python 3.x, is the same as 'str'). The end of each screen line is terminated by a newline.
def getInferenceTypeFromLabel(cls, label): """ Extracts the PredictionKind (temporal vs. nontemporal) from the given metric label. :param label: (string) for a metric spec generated by :meth:`getMetricLabel` :returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`) """ infType, _, _= label.partition(cls._LABEL_SEPARATOR) if not InferenceType.validate(infType): return None return infType
Extracts the PredictionKind (temporal vs. nontemporal) from the given metric label. :param label: (string) for a metric spec generated by :meth:`getMetricLabel` :returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)
def scale_back_batch(self, bboxes_in, scores_in): """ Do scale and transform from xywh to ltrb suppose input Nx4xnum_bbox Nxlabel_numxnum_bbox """ if bboxes_in.device == torch.device("cpu"): self.dboxes = self.dboxes.cpu() self.dboxes_xywh = self.dboxes_xywh.cpu() else: self.dboxes = self.dboxes.cuda() self.dboxes_xywh = self.dboxes_xywh.cuda() bboxes_in = bboxes_in.permute(0, 2, 1) scores_in = scores_in.permute(0, 2, 1) # print(bboxes_in.device, scores_in.device, self.dboxes_xywh.device) bboxes_in[:, :, :2] = self.scale_xy * bboxes_in[:, :, :2] bboxes_in[:, :, 2:] = self.scale_wh * bboxes_in[:, :, 2:] bboxes_in[:, :, :2] = bboxes_in[:, :, :2] * self.dboxes_xywh[:, :, 2:] + self.dboxes_xywh[:, :, :2] bboxes_in[:, :, 2:] = bboxes_in[:, :, 2:].exp() * self.dboxes_xywh[:, :, 2:] # Transform format to ltrb l, t, r, b = bboxes_in[:, :, 0] - 0.5 * bboxes_in[:, :, 2], \ bboxes_in[:, :, 1] - 0.5 * bboxes_in[:, :, 3], \ bboxes_in[:, :, 0] + 0.5 * bboxes_in[:, :, 2], \ bboxes_in[:, :, 1] + 0.5 * bboxes_in[:, :, 3] bboxes_in[:, :, 0] = l bboxes_in[:, :, 1] = t bboxes_in[:, :, 2] = r bboxes_in[:, :, 3] = b return bboxes_in, F.softmax(scores_in, dim=-1)
Do scale and transform from xywh to ltrb suppose input Nx4xnum_bbox Nxlabel_numxnum_bbox
def predict(self, X): """Returns predictions of input test cases.""" return self.__cost(self.__unroll(self.__thetas), 0, np.matrix(X))
Returns predictions of input test cases.
def _get_image_size(self, image_path): """Return disk size in bytes""" command = 'du -b %s' % image_path (rc, output) = zvmutils.execute(command) if rc: msg = ("Error happened when executing command du -b with" "reason: %s" % output) LOG.error(msg) raise exception.SDKImageOperationError(rs=8) size = output.split()[0] return size
Return disk size in bytes
def retrieve_approver_email_list(self, domain, product_id): """Retrieve the list of allowed approver email addresses.""" response = self.request(E.retrieveApproverEmailListSslCertRequest( E.domain(domain), E.productId(product_id) )) return [str(i) for i in response.data.array[0].item]
Retrieve the list of allowed approver email addresses.
def standard_kinetics(target, quantity, prefactor, exponent): r""" """ X = target[quantity] A = target[prefactor] b = target[exponent] r = A*(X**b) S1 = A*b*(X**(b - 1)) S2 = A*(1 - b)*(X**b) values = {'S1': S1, 'S2': S2, 'rate': r} return values
r"""
def update_geometry(self): """ Updates the Widget geometry. :return: Method success. :rtype: bool """ self.setGeometry(self.__editor.contentsRect().left(), self.__editor.contentsRect().top(), self.get_width(), self.__editor.contentsRect().height()) return True
Updates the Widget geometry. :return: Method success. :rtype: bool
def returner(ret): ''' Insert minion return data into the sqlite3 database ''' log.debug('sqlite3 returner <returner> called with data: %s', ret) conn = _get_conn(ret) cur = conn.cursor() sql = '''INSERT INTO salt_returns (fun, jid, id, fun_args, date, full_ret, success) VALUES (:fun, :jid, :id, :fun_args, :date, :full_ret, :success)''' cur.execute(sql, {'fun': ret['fun'], 'jid': ret['jid'], 'id': ret['id'], 'fun_args': six.text_type(ret['fun_args']) if ret.get('fun_args') else None, 'date': six.text_type(datetime.datetime.now()), 'full_ret': salt.utils.json.dumps(ret['return']), 'success': ret.get('success', '')}) _close_conn(conn)
Insert minion return data into the sqlite3 database
def make_call_positionals(stack_builders, count): """ Make the args entry for an ast.Call node. """ out = [make_expr(stack_builders) for _ in range(count)] out.reverse() return out
Make the args entry for an ast.Call node.
def lock_access(repository_path, callback): """ Synchronise access to the user file between processes, this specifies which user is allowed write access at the current time """ with open(cpjoin(repository_path, 'lock_file'), 'w') as fd: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) returned = callback() fcntl.flock(fd, fcntl.LOCK_UN) return returned except IOError: return fail(lock_fail_msg)
Synchronise access to the user file between processes, this specifies which user is allowed write access at the current time
def _load_from_file(self, filename): """Find filename in tar, and load it""" if filename in self.fdata: return self.fdata[filename] else: filepath = find_in_tarball(self.tarloc, filename) return read_from_tarball(self.tarloc, filepath)
Find filename in tar, and load it
def get_weekly_chart_dates(self): """Returns a list of From and To tuples for the available charts.""" doc = self._request(self.ws_prefix + ".getWeeklyChartList", True) seq = [] for node in doc.getElementsByTagName("chart"): seq.append((node.getAttribute("from"), node.getAttribute("to"))) return seq
Returns a list of From and To tuples for the available charts.
def off(self): """Send an OFF message to device group.""" off_command = ExtendedSend(self._address, COMMAND_LIGHT_OFF_0X13_0X00, self._udata) off_command.set_checksum() self._send_method(off_command, self._off_message_received)
Send an OFF message to device group.
def bqsr_table(data): """Generate recalibration tables as inputs to BQSR. """ in_file = dd.get_align_bam(data) out_file = "%s-recal-table.txt" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: assoc_files = dd.get_variation_resources(data) known = "-k %s" % (assoc_files.get("dbsnp")) if "dbsnp" in assoc_files else "" license = license_export(data) cores = dd.get_num_cores(data) ref_file = dd.get_ref_file(data) cmd = ("{license}sentieon driver -t {cores} -r {ref_file} " "-i {in_file} --algo QualCal {known} {tx_out_file}") do.run(cmd.format(**locals()), "Sentieon QualCal generate table") return out_file
Generate recalibration tables as inputs to BQSR.
def p_empty_statement(self, p): """empty_statement : SEMI""" p[0] = self.asttypes.EmptyStatement(p[1]) p[0].setpos(p)
empty_statement : SEMI
def write_json_document(title, body): """ `title` - Name of the file to write. `body` - Python datastructure representing the document. This method handles transforming the body into a proper json string, and then writing the file to disk. """ if not title.endswith('.json'): title += '.json' json_body = create_json_str(body) if os.path.exists(title): juicer.utils.Log.log_warn("Cart file '%s' already exists, overwriting with new data." % title) f = open(title, 'w') f.write(json_body) f.flush() f.close()
`title` - Name of the file to write. `body` - Python datastructure representing the document. This method handles transforming the body into a proper json string, and then writing the file to disk.
def securityHandler(self, value): """ sets the security handler """ if isinstance(value, BaseSecurityHandler): if isinstance(value, security.AGOLTokenSecurityHandler): self._securityHandler = value elif isinstance(value, security.OAuthSecurityHandler): self._securityHandler = value else: pass
sets the security handler
def Ax(self): """Compute a stack of skew-symmetric matrices which can be multiplied by 'b' to get the cross product. See: http://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication """ # 0 -self.a3 self.a2 # self.a3 0 -self.a1 # -self.a2 self.a1 0 m = np.zeros((len(self.a1), 3, 3)) m[:, 0, 1] = -self.a3 m[:, 0, 2] = +self.a2 m[:, 1, 0] = +self.a3 m[:, 1, 2] = -self.a1 m[:, 2, 0] = -self.a2 m[:, 2, 1] = +self.a1 return m
Compute a stack of skew-symmetric matrices which can be multiplied by 'b' to get the cross product. See: http://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication
def get_comments(self) -> Iterator[PostComment]: r"""Iterate over all comments of the post. Each comment is represented by a PostComment namedtuple with fields text (string), created_at (datetime), id (int), owner (:class:`Profile`) and answers (:class:`~typing.Iterator`\ [:class:`PostCommentAnswer`]) if available. """ def _postcommentanswer(node): return PostCommentAnswer(id=int(node['id']), created_at_utc=datetime.utcfromtimestamp(node['created_at']), text=node['text'], owner=Profile(self._context, node['owner'])) def _postcommentanswers(node): if 'edge_threaded_comments' not in node: return answer_count = node['edge_threaded_comments']['count'] if answer_count == 0: # Avoid doing additional requests if there are no comment answers return answer_edges = node['edge_threaded_comments']['edges'] if answer_count == len(answer_edges): # If the answer's metadata already contains all comments, don't do GraphQL requests to obtain them yield from (_postcommentanswer(comment['node']) for comment in answer_edges) return yield from (_postcommentanswer(answer_node) for answer_node in self._context.graphql_node_list("51fdd02b67508306ad4484ff574a0b62", {'comment_id': node['id']}, 'https://www.instagram.com/p/' + self.shortcode + '/', lambda d: d['data']['comment']['edge_threaded_comments'])) def _postcomment(node): return PostComment(*_postcommentanswer(node), answers=_postcommentanswers(node)) if self.comments == 0: # Avoid doing additional requests if there are no comments return try: comment_edges = self._field('edge_media_to_parent_comment', 'edges') answers_count = sum([edge['node']['edge_threaded_comments']['count'] for edge in comment_edges]) threaded_comments_available = True except KeyError: comment_edges = self._field('edge_media_to_comment', 'edges') answers_count = 0 threaded_comments_available = False if self.comments == len(comment_edges) + answers_count: # If the Post's metadata already contains all parent comments, don't do GraphQL requests to obtain them yield from (_postcomment(comment['node']) for comment in comment_edges) return yield from (_postcomment(node) for node in self._context.graphql_node_list( "97b41c52301f77ce508f55e66d17620e" if threaded_comments_available else "f0986789a5c5d17c2400faebf16efd0d", {'shortcode': self.shortcode}, 'https://www.instagram.com/p/' + self.shortcode + '/', lambda d: d['data']['shortcode_media'][ 'edge_media_to_parent_comment' if threaded_comments_available else 'edge_media_to_comment'], self._rhx_gis))
r"""Iterate over all comments of the post. Each comment is represented by a PostComment namedtuple with fields text (string), created_at (datetime), id (int), owner (:class:`Profile`) and answers (:class:`~typing.Iterator`\ [:class:`PostCommentAnswer`]) if available.
def _note_reply_pending(self, option, state): """Record the status of requested Telnet options.""" if not self.telnet_opt_dict.has_key(option): self.telnet_opt_dict[option] = TelnetOption() self.telnet_opt_dict[option].reply_pending = state
Record the status of requested Telnet options.
def new(params, event_shape=(), dtype=None, validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" with tf.compat.v1.name_scope(name, 'IndependentBernoulli', [params, event_shape]): params = tf.convert_to_tensor(value=params, name='params') event_shape = dist_util.expand_to_vector( tf.convert_to_tensor( value=event_shape, name='event_shape', dtype_hint=tf.int32), tensor_name='event_shape') new_shape = tf.concat([ tf.shape(input=params)[:-1], event_shape, ], axis=0) dist = tfd.Independent( tfd.Bernoulli( logits=tf.reshape(params, new_shape), dtype=dtype or params.dtype.base_dtype, validate_args=validate_args), reinterpreted_batch_ndims=tf.size(input=event_shape), validate_args=validate_args) dist._logits = dist.distribution._logits # pylint: disable=protected-access dist._probs = dist.distribution._probs # pylint: disable=protected-access dist.logits = tfd.Bernoulli.logits dist.probs = tfd.Bernoulli.probs return dist
Create the distribution instance from a `params` vector.
def fan_speed(self, value): """Verifies the value is between 1 and 9 inclusively.""" if value not in range(1, 10): raise exceptions.RoasterValueError self._fan_speed.value = value
Verifies the value is between 1 and 9 inclusively.
def get_config(self): """ Currently only contains the "config" member, which is a string containing the config file as loaded by i3 most recently. :rtype: ConfigReply """ data = self.message(MessageType.GET_CONFIG, '') return json.loads(data, object_hook=ConfigReply)
Currently only contains the "config" member, which is a string containing the config file as loaded by i3 most recently. :rtype: ConfigReply
def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs): """Execute remote psql query.""" flags = flags or u'' if db: flags = u"%s -d %s" % (flags, db) command = u'psql %s -c "%s"' % (flags, query) if use_sudo: sudo(command, user='postgres', **kwargs) else: run(command, **kwargs)
Execute remote psql query.
def get_serializer_class(self, view, method_func): """ Try to get the serializer class from view method. If view method don't have request serializer, fallback to serializer_class on view class """ if hasattr(method_func, 'request_serializer'): return getattr(method_func, 'request_serializer') if hasattr(view, 'serializer_class'): return getattr(view, 'serializer_class') if hasattr(view, 'get_serializer_class'): return getattr(view, 'get_serializer_class')() return None
Try to get the serializer class from view method. If view method don't have request serializer, fallback to serializer_class on view class
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'count') and self.count is not None: _dict['count'] = self.count if hasattr(self, 'relevance') and self.relevance is not None: _dict['relevance'] = self.relevance if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'emotion') and self.emotion is not None: _dict['emotion'] = self.emotion._to_dict() if hasattr(self, 'sentiment') and self.sentiment is not None: _dict['sentiment'] = self.sentiment._to_dict() return _dict
Return a json dictionary representing this model.
def get_basis(name, elements=None, version=None, fmt=None, uncontract_general=False, uncontract_spdf=False, uncontract_segmented=False, make_general=False, optimize_general=False, data_dir=None, header=True): '''Obtain a basis set This is the main function for getting basis set information. This function reads in all the basis data and returns it either as a string or as a python dictionary. Parameters ---------- name : str Name of the basis set. This is not case sensitive. elements : str or list List of elements that you want the basis set for. Elements can be specified by Z-number (int or str) or by symbol (str). If this argument is a str (ie, '1-3,7-10'), it is expanded into a list. Z numbers and symbols (case insensitive) can be used interchangeably (see :func:`bse.misc.expand_elements`) If an empty string or list is passed, or if None is passed (the default), all elements for which the basis set is defined are included. version : int or str Obtain a specific version of this basis set. By default, the latest version is returned. fmt: str The desired output format of the basis set. By default, basis set information is returned as a python dictionary. Otherwise, if a format is specified, a string is returned. Use :func:`bse.api.get_formats` to programmatically obtain the available formats. The `fmt` argument is not case sensitive. Available formats are * nwchem * gaussian94 * psi4 * gamess_us * turbomole * json uncontract_general : bool If True, remove general contractions by duplicating the set of primitive exponents with each vector of coefficients. Primitives with zero coefficient are removed, as are duplicate shells. uncontract_spdf : bool If True, remove general contractions with combined angular momentum (sp, spd, etc) by duplicating the set of primitive exponents with each vector of coefficients. Primitives with zero coefficient are removed, as are duplicate shells. uncontract_segmented : bool If True, remove segmented contractions by duplicating each primitive into new shells. Each coefficient is set to 1.0 make_general : bool If True, make the basis set as generally-contracted as possible. There will be one shell per angular momentum (for each element) optimize_general : bool Optimize by removing general contractions that contain uncontracted functions (see :func:`bse.manip.optimize_general`) data_dir : str Data directory with all the basis set information. By default, it is in the 'data' subdirectory of this project. Returns ------- str or dict The basis set in the desired format. If `fmt` is **None**, this will be a python dictionary. Otherwise, it will be a string. ''' data_dir = fix_data_dir(data_dir) bs_data = _get_basis_metadata(name, data_dir) # If version is not specified, use the latest if version is None: version = bs_data['latest_version'] else: version = str(version) # Version may be an int if not version in bs_data['versions']: raise KeyError("Version {} does not exist for basis {}".format(version, name)) # Compose the entire basis set (all elements) file_relpath = bs_data['versions'][version]['file_relpath'] basis_dict = compose.compose_table_basis(file_relpath, data_dir) # Set the name (from the global metadata) # Only the list of all names will be returned from compose_table_basis basis_dict['name'] = bs_data['display_name'] # Handle optional arguments if elements is not None: # Convert to purely a list of strings that represent integers elements = misc.expand_elements(elements, True) # Did the user pass an empty string or empty list? If so, include # all elements if len(elements) != 0: bs_elements = basis_dict['elements'] # Are elements part of this basis set? for el in elements: if not el in bs_elements: elsym = lut.element_sym_from_Z(el) raise KeyError("Element {} (Z={}) not found in basis {} version {}".format( elsym, el, name, version)) # Set to only the elements we want basis_dict['elements'] = {k: v for k, v in bs_elements.items() if k in elements} # Note that from now on, the pipleline is going to modify basis_dict. That is ok, # since we are returned a unique instance from compose_table_basis needs_pruning = False if optimize_general: basis_dict = manip.optimize_general(basis_dict, False) needs_pruning = True # uncontract_segmented implies uncontract_general if uncontract_segmented: basis_dict = manip.uncontract_segmented(basis_dict, False) needs_pruning = True elif uncontract_general: basis_dict = manip.uncontract_general(basis_dict, False) needs_pruning = True if uncontract_spdf: basis_dict = manip.uncontract_spdf(basis_dict, 0, False) needs_pruning = True if make_general: basis_dict = manip.make_general(basis_dict, False) needs_pruning = True # Remove dead and duplicate shells if needs_pruning: basis_dict = manip.prune_basis(basis_dict, False) # If fmt is not specified, return as a python dict if fmt is None: return basis_dict if header: header_str = _header_string(basis_dict) else: header_str = None return converters.convert_basis(basis_dict, fmt, header_str)
Obtain a basis set This is the main function for getting basis set information. This function reads in all the basis data and returns it either as a string or as a python dictionary. Parameters ---------- name : str Name of the basis set. This is not case sensitive. elements : str or list List of elements that you want the basis set for. Elements can be specified by Z-number (int or str) or by symbol (str). If this argument is a str (ie, '1-3,7-10'), it is expanded into a list. Z numbers and symbols (case insensitive) can be used interchangeably (see :func:`bse.misc.expand_elements`) If an empty string or list is passed, or if None is passed (the default), all elements for which the basis set is defined are included. version : int or str Obtain a specific version of this basis set. By default, the latest version is returned. fmt: str The desired output format of the basis set. By default, basis set information is returned as a python dictionary. Otherwise, if a format is specified, a string is returned. Use :func:`bse.api.get_formats` to programmatically obtain the available formats. The `fmt` argument is not case sensitive. Available formats are * nwchem * gaussian94 * psi4 * gamess_us * turbomole * json uncontract_general : bool If True, remove general contractions by duplicating the set of primitive exponents with each vector of coefficients. Primitives with zero coefficient are removed, as are duplicate shells. uncontract_spdf : bool If True, remove general contractions with combined angular momentum (sp, spd, etc) by duplicating the set of primitive exponents with each vector of coefficients. Primitives with zero coefficient are removed, as are duplicate shells. uncontract_segmented : bool If True, remove segmented contractions by duplicating each primitive into new shells. Each coefficient is set to 1.0 make_general : bool If True, make the basis set as generally-contracted as possible. There will be one shell per angular momentum (for each element) optimize_general : bool Optimize by removing general contractions that contain uncontracted functions (see :func:`bse.manip.optimize_general`) data_dir : str Data directory with all the basis set information. By default, it is in the 'data' subdirectory of this project. Returns ------- str or dict The basis set in the desired format. If `fmt` is **None**, this will be a python dictionary. Otherwise, it will be a string.
def get_file_contents_text( filename: str = None, blob: bytes = None, config: TextProcessingConfig = _DEFAULT_CONFIG) -> str: """ Returns the string contents of a file, or of a BLOB. """ binary_contents = get_file_contents(filename=filename, blob=blob) # 1. Try the encoding the user specified if config.encoding: try: return binary_contents.decode(config.encoding) except ValueError: # of which UnicodeDecodeError is more specific # ... https://docs.python.org/3/library/codecs.html pass # 2. Try the system encoding sysdef = sys.getdefaultencoding() if sysdef != config.encoding: try: return binary_contents.decode(sysdef) except ValueError: pass # 3. Try the best guess from chardet # http://chardet.readthedocs.io/en/latest/usage.html if chardet: guess = chardet.detect(binary_contents) if guess['encoding']: return binary_contents.decode(guess['encoding']) raise ValueError("Unknown encoding ({})".format( "filename={}".format(repr(filename)) if filename else "blob"))
Returns the string contents of a file, or of a BLOB.
def get_type(mime=None, ext=None): """ Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None. """ for kind in types: if kind.extension is ext or kind.mime is mime: return kind return None
Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None.
def publ(name, cfg): """ Create a Flask app and configure it for use with Publ """ config.setup(cfg) app = _PublApp(name, template_folder=config.template_folder, static_folder=config.static_folder, static_url_path=config.static_url_path) for route in [ '/', '/<path:category>/', '/<template>', '/<path:category>/<template>', ]: app.add_url_rule(route, 'category', rendering.render_category) for route in [ '/<int:entry_id>', '/<int:entry_id>-', '/<int:entry_id>-<slug_text>', '/<path:category>/<int:entry_id>', '/<path:category>/<int:entry_id>-', '/<path:category>/<int:entry_id>-<slug_text>', ]: app.add_url_rule(route, 'entry', rendering.render_entry) app.add_url_rule('/<path:path>.PUBL_PATHALIAS', 'path_alias', rendering.render_path_alias) app.add_url_rule('/_async/<path:filename>', 'async', image.get_async) app.add_url_rule('/_', 'chit', rendering.render_transparent_chit) app.add_url_rule('/_file/<path:filename>', 'asset', rendering.retrieve_asset) app.config['TRAP_HTTP_EXCEPTIONS'] = True app.register_error_handler( werkzeug.exceptions.HTTPException, rendering.render_exception) app.jinja_env.globals.update( # pylint: disable=no-member get_view=view.get_view, arrow=arrow, static=utils.static_url, get_template=rendering.get_template ) caching.init_app(app) maint = maintenance.Maintenance() if config.index_rescan_interval: maint.register(functools.partial(index.scan_index, config.content_folder), config.index_rescan_interval) if config.image_cache_interval and config.image_cache_age: maint.register(functools.partial(image.clean_cache, config.image_cache_age), config.image_cache_interval) app.before_request(maint.run) if 'CACHE_THRESHOLD' in config.cache: app.after_request(set_cache_expiry) if app.debug: # We're in debug mode so we don't want to scan until everything's up # and running app.before_first_request(startup) else: # In production, register the exception handler and scan the index # immediately app.register_error_handler(Exception, rendering.render_exception) startup() return app
Create a Flask app and configure it for use with Publ
def file_modified_time(file_name) -> pd.Timestamp: """ File modified time in python Args: file_name: file name Returns: pd.Timestamp """ return pd.to_datetime(time.ctime(os.path.getmtime(filename=file_name)))
File modified time in python Args: file_name: file name Returns: pd.Timestamp
def get_default_task(self): """ Returns the default task if there is only one """ default_tasks = list(filter(lambda task: task.default, self.values())) if len(default_tasks) == 1: return default_tasks[0]
Returns the default task if there is only one
def get_map_url(self, mapsource, grid_coords): """ Get URL to a map region. """ return self.get_abs_url( "/maps/{}/{}/{}/{}.kml".format(mapsource.id, grid_coords.zoom, grid_coords.x, grid_coords.y))
Get URL to a map region.
def has_in_watched(self, watched): """ :calls: `GET /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_ :param watched: :class:`github.Repository.Repository` :rtype: bool """ assert isinstance(watched, github.Repository.Repository), watched status, headers, data = self._requester.requestJson( "GET", "/repos/" + watched._identity + "/subscription" ) return status == 200
:calls: `GET /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_ :param watched: :class:`github.Repository.Repository` :rtype: bool
def _set_name(self, v, load=False): """ Setter method for name, mapped from YANG variable /rbridge_id/event_handler/activate/name (list) If this variable is read-only (config: false) in the source YANG file, then _set_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_name() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",name.name, yang_name="name", rest_name="name", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """name must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",name.name, yang_name="name", rest_name="name", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='list', is_config=True)""", }) self.__name = t if hasattr(self, '_set'): self._set()
Setter method for name, mapped from YANG variable /rbridge_id/event_handler/activate/name (list) If this variable is read-only (config: false) in the source YANG file, then _set_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_name() directly.
def uncache(self): """ Disable in-memory caching (Spark only). """ if self.mode == 'spark': self.values.unpersist() return self else: notsupported(self.mode)
Disable in-memory caching (Spark only).
def _last_commit(self): """ Retrieve the most recent commit message (with ``svn log -l1``) Returns: tuple: (datestr, (revno, user, None, desc)) :: $ svn log -l1 ------------------------------------------------------------------------ r25701 | bhendrix | 2010-08-02 12:14:25 -0500 (Mon, 02 Aug 2010) | 1 line added selection range traits to make it possible for users to replace ------------------------------------------------------------------------ .. note:: svn log references the svn server """ cmd = ['svn', 'log' '-l1'] op = self.sh(cmd, shell=False) data, rest = op.split('\n', 2)[1:] revno, user, datestr, lc = data.split(' | ', 3) desc = '\n'.join(rest.split('\n')[1:-2]) revno = revno[1:] # lc = long(lc.rstrip(' line')) return datestr, (revno, user, None, desc)
Retrieve the most recent commit message (with ``svn log -l1``) Returns: tuple: (datestr, (revno, user, None, desc)) :: $ svn log -l1 ------------------------------------------------------------------------ r25701 | bhendrix | 2010-08-02 12:14:25 -0500 (Mon, 02 Aug 2010) | 1 line added selection range traits to make it possible for users to replace ------------------------------------------------------------------------ .. note:: svn log references the svn server
def sub(self, repl): """ Return Regex with an attached parse action to transform the parsed result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. Example:: make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>") print(make_html.transformString("h1:main title:")) # prints "<h1>main title</h1>" """ if self.asGroupList: warnings.warn("cannot use sub() with Regex(asGroupList=True)", SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch and callable(repl): warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch: def pa(tokens): return tokens[0].expand(repl) else: def pa(tokens): return self.re.sub(repl, tokens[0]) return self.addParseAction(pa)
Return Regex with an attached parse action to transform the parsed result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. Example:: make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>") print(make_html.transformString("h1:main title:")) # prints "<h1>main title</h1>"
def append_responder(self, matcher, *args, **kwargs): """Add a responder of last resort. Like `.autoresponds`, but instead of adding a responder to the top of the stack, add it to the bottom. This responder will be called if no others match. """ return self._insert_responder("bottom", matcher, *args, **kwargs)
Add a responder of last resort. Like `.autoresponds`, but instead of adding a responder to the top of the stack, add it to the bottom. This responder will be called if no others match.
def parse_dis_tree(self, dis_tree, indent=0): """parse a *.dis ParentedTree into this document graph""" tree_type = get_tree_type(dis_tree) assert tree_type in SUBTREE_TYPES if tree_type == 'Root': # replace generic root node with tree root old_root_id = self.root root_id = get_node_id(dis_tree, self.ns) self.root = root_id self.add_node(root_id) self.remove_node(old_root_id) children = dis_tree[1:] for child in children: child_id = get_node_id(child, self.ns) self.add_edge( root_id, child_id, #~ attr_dict={self.ns+':rel_type': relation_type}, edge_type=EdgeTypes.dominance_relation) self.parse_dis_tree(child, indent=indent+1) else: # tree_type in ('Nucleus', 'Satellite') node_id = get_node_id(dis_tree, self.ns) node_type = get_node_type(dis_tree) relation_type = get_relation_type(dis_tree) if node_type == 'leaf': edu_text = get_edu_text(dis_tree[-1]) self.add_node(node_id, attr_dict={ self.ns+':text': edu_text, 'label': u'{0}: {1}'.format(node_id, edu_text[:20])}) if self.tokenized: edu_tokens = edu_text.split() for i, token in enumerate(edu_tokens): token_node_id = '{0}_{1}'.format(node_id, i) self.tokens.append(token_node_id) self.add_node(token_node_id, attr_dict={self.ns+':token': token, 'label': token}) self.add_edge(node_id, '{0}_{1}'.format(node_id, i)) else: # node_type == 'span' self.add_node(node_id, attr_dict={self.ns+':rel_type': relation_type, self.ns+':node_type': node_type}) children = dis_tree[3:] child_types = get_child_types(children) expected_child_types = set(['Nucleus', 'Satellite']) unexpected_child_types = set(child_types).difference(expected_child_types) assert not unexpected_child_types, \ "Node '{0}' contains unexpected child types: {1}\n".format( node_id, unexpected_child_types) if 'Satellite' not in child_types: # span only contains nucleii -> multinuc for child in children: child_node_id = get_node_id(child, self.ns) self.add_edge(node_id, child_node_id, attr_dict={ self.ns+':rel_type': relation_type}) elif len(child_types['Satellite']) == 1 and len(children) == 1: if tree_type == 'Nucleus': child = children[0] child_node_id = get_node_id(child, self.ns) self.add_edge( node_id, child_node_id, attr_dict={self.ns+':rel_type': relation_type}, edge_type=EdgeTypes.dominance_relation) else: assert tree_type == 'Satellite' raise NotImplementedError("I don't know how to combine two satellites") elif len(child_types['Satellite']) == 1 and len(child_types['Nucleus']) == 1: # standard RST relation, where one satellite is dominated by one nucleus nucleus_index = child_types['Nucleus'][0] satellite_index = child_types['Satellite'][0] nucleus_node_id = get_node_id(children[nucleus_index], self.ns) satellite_node_id = get_node_id(children[satellite_index], self.ns) self.add_edge(node_id, nucleus_node_id, attr_dict={self.ns+':rel_type': 'span'}, edge_type=EdgeTypes.spanning_relation) self.add_edge(nucleus_node_id, satellite_node_id, attr_dict={self.ns+':rel_type': relation_type}, edge_type=EdgeTypes.dominance_relation) else: raise ValueError("Unexpected child combinations: {}\n".format(child_types)) for child in children: self.parse_dis_tree(child, indent=indent+1)
parse a *.dis ParentedTree into this document graph
def get_mining_contracts(): """ Get all the mining contracts information available. Returns: This function returns two major dictionaries. The first one contains information about the coins for which mining contracts data is available: coin_data: {symbol1: {'BlockNumber': ..., 'BlockReward': ..., 'BlockRewardReduction': ..., 'BlockTime': ..., 'DifficultyAdjustment': ..., 'NetHashesPerSecond': ..., 'PreviousTotalCoinsMined': ..., 'PriceUSD': ..., 'Symbol': ..., 'TotalCoinsMined': ...}, symbol2: {...}, ...} The other one contains all the available mining contracts: mining_data: {id1: {'AffiliateURL': ..., 'Algorithm': ..., 'Company': ..., 'ContractLength': ..., 'Cost': ..., 'CurrenciesAvailable': ..., 'CurrenciesAvailableLogo': ..., 'CurrenciesAvailableName': ..., 'Currency': ..., 'FeePercentage': ..., 'FeeValue': ..., 'FeeValueCurrency': ..., 'HashesPerSecond': ..., 'Id': id1, 'LogoUrl': ..., 'Name': ..., 'ParentId': ..., 'Recommended': ..., 'Sponsored': ..., 'Url': ...}, id2: {...}, ...} """ # load data url = build_url('miningcontracts') data = load_data(url) coin_data = data['CoinData'] mining_data = data['MiningData'] return coin_data, mining_data
Get all the mining contracts information available. Returns: This function returns two major dictionaries. The first one contains information about the coins for which mining contracts data is available: coin_data: {symbol1: {'BlockNumber': ..., 'BlockReward': ..., 'BlockRewardReduction': ..., 'BlockTime': ..., 'DifficultyAdjustment': ..., 'NetHashesPerSecond': ..., 'PreviousTotalCoinsMined': ..., 'PriceUSD': ..., 'Symbol': ..., 'TotalCoinsMined': ...}, symbol2: {...}, ...} The other one contains all the available mining contracts: mining_data: {id1: {'AffiliateURL': ..., 'Algorithm': ..., 'Company': ..., 'ContractLength': ..., 'Cost': ..., 'CurrenciesAvailable': ..., 'CurrenciesAvailableLogo': ..., 'CurrenciesAvailableName': ..., 'Currency': ..., 'FeePercentage': ..., 'FeeValue': ..., 'FeeValueCurrency': ..., 'HashesPerSecond': ..., 'Id': id1, 'LogoUrl': ..., 'Name': ..., 'ParentId': ..., 'Recommended': ..., 'Sponsored': ..., 'Url': ...}, id2: {...}, ...}
def del_alias(self, alias): """ Delete an alias from the registry. The blobs it points to won't be deleted. Use :meth:`del_blob` for that. .. Note:: On private registry, garbage collection might need to be run manually; see: https://docs.docker.com/registry/garbage-collection/ :param alias: Alias name. :type alias: str :rtype: list :returns: A list of blob hashes (strings) which were assigned to the alias. """ dcd = self._get_dcd(alias) dgsts = self.get_alias(alias) self._request('delete', 'manifests/{}'.format(dcd)) return dgsts
Delete an alias from the registry. The blobs it points to won't be deleted. Use :meth:`del_blob` for that. .. Note:: On private registry, garbage collection might need to be run manually; see: https://docs.docker.com/registry/garbage-collection/ :param alias: Alias name. :type alias: str :rtype: list :returns: A list of blob hashes (strings) which were assigned to the alias.
def oracle_eval(command): """ Retrieve password from the given command """ p = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.wait() if p.returncode == 0: return p.stdout.readline().strip().decode('utf-8') else: die( "Error retrieving password: `{command}` returned '{error}'".format( command=command, error=p.stderr.read().strip()))
Retrieve password from the given command
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model): """ The how well do the features plus a constant base rate sum up to the model output. """ X_train, X_test = to_array(X_train, X_test) # how many features to mask assert X_train.shape[1] == X_test.shape[1] # keep nkeep top features and re-train the model for each test explanation yp_test = trained_model.predict(X_test) return metric(yp_test, strip_list(attr_test).sum(1))
The how well do the features plus a constant base rate sum up to the model output.
def get_real_data(self): """ Grab actual data from the system """ ret = [] username = os.environ.get('USER') if username: ret.append(username) editor = os.environ.get('EDITOR') if editor: editor = editor.split('/')[-1] ret.append(editor) # OS, hostname and... architechture (because lel) if hasattr(os, 'uname'): uname = os.uname() ret.append(uname[0]) ret.append(uname[1]) ret.append(uname[4]) # Grab actual files from $HOME. files = os.listdir(os.environ.get('HOME')) if files: ret.append(random.choice(files)) # Grab some processes ret += self.get_processes()[:2] # Prepare the returned data. First, lowercase it. # If there is unicode data being returned from any of the above # Python 2 needs to decode the UTF bytes to not crash. See issue #45. func = str.lower if sys.version_info < (3,): func = lambda x: str.lower(x).decode('utf-8') self.words.extend(map(func, ret))
Grab actual data from the system
async def service_observer(self, limit) -> int: """ Service the observer's inBox and outBox :return: the number of messages successfully serviced """ if not self.isReady(): return 0 return await self._observer.serviceQueues(limit)
Service the observer's inBox and outBox :return: the number of messages successfully serviced
def FromBinary(cls, record_data, record_count=1): """Create an UpdateRecord subclass from binary record data. This should be called with a binary record blob (NOT including the record type header) and it will decode it into a SetConstantRecord. Args: record_data (bytearray): The raw record data that we wish to parse into an UpdateRecord subclass NOT including its 8 byte record header. record_count (int): The number of records included in record_data. Raises: ArgumentError: If the record_data is malformed and cannot be parsed. Returns: SetConstantRecord: The decoded reflash tile record. """ _cmd, address, _resp_length, payload = cls._parse_rpc_info(record_data) try: value, encoded_stream = struct.unpack("<LH", payload) stream = DataStream.FromEncoded(encoded_stream) except ValueError: raise ArgumentError("Could not parse set_constant payload", payload=payload) return SetConstantRecord(stream, value, address=address)
Create an UpdateRecord subclass from binary record data. This should be called with a binary record blob (NOT including the record type header) and it will decode it into a SetConstantRecord. Args: record_data (bytearray): The raw record data that we wish to parse into an UpdateRecord subclass NOT including its 8 byte record header. record_count (int): The number of records included in record_data. Raises: ArgumentError: If the record_data is malformed and cannot be parsed. Returns: SetConstantRecord: The decoded reflash tile record.
def get(self, request, bot_id, hook_id, id, format=None): """ Get recipient by id --- serializer: TelegramRecipientSerializer responseMessages: - code: 401 message: Not authenticated """ bot = self.get_bot(bot_id, request.user) hook = self.get_hook(hook_id, bot, request.user) recipient = self.get_recipient(id, hook, request.user) serializer = self.serializer(recipient) return Response(serializer.data)
Get recipient by id --- serializer: TelegramRecipientSerializer responseMessages: - code: 401 message: Not authenticated
def modifyBits(inputVal, maxChanges): """ Modifies up to maxChanges number of bits in the inputVal """ changes = np.random.random_integers(0, maxChanges, 1)[0] if changes == 0: return inputVal inputWidth = len(inputVal) whatToChange = np.random.random_integers(0, 41, changes) runningIndex = -1 numModsDone = 0 for i in xrange(inputWidth): if numModsDone >= changes: break if inputVal[i] == 1: runningIndex += 1 if runningIndex in whatToChange: if i != 0 and inputVal[i-1] == 0: inputVal[i-1] = 1 inputVal[i] = 0 return inputVal
Modifies up to maxChanges number of bits in the inputVal