Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
1,800
def serveUpcoming(self, request): myurl = self.get_url(request) today = timezone.localdate() monthlyUrl = myurl + self.reverse_subpage(, args=[today.year, today.month]) weekNum = gregorian_to_week_date(today)[1] weeklyUrl = myurl + self.reverse_subpage(, args=[today.year, weekNum]) listUrl = myurl + self.reverse_subpage() upcomingEvents = self._getUpcomingEvents(request) paginator = Paginator(upcomingEvents, self.EventsPerPage) try: eventsPage = paginator.page(request.GET.get()) except PageNotAnInteger: eventsPage = paginator.page(1) except EmptyPage: eventsPage = paginator.page(paginator.num_pages) return render(request, "joyous/calendar_list_upcoming.html", {: self, : self, : __version__, : today, : weeklyUrl, : monthlyUrl, : listUrl, : eventsPage})
Upcoming events list view.
1,801
def consumer(self, fn): if self._consumer_fn is not None: raise ValueError( ) if not any([asyncio.iscoroutine(fn), asyncio.iscoroutinefunction(fn)]): raise ValueError() self._consumer_fn = fn
Consumer decorator :param fn: coroutine consumer function Example: >>> api = StreamingAPI('my_service_key') >>> stream = api.get_stream() >>> @stream.consumer >>> @asyncio.coroutine >>> def handle_event(payload): >>> print(payload)
1,802
def new_mapping(self, lineup, station, channel, channelMinor, validFrom, validTo, onAirFrom, onAirTo): if self.__v_mapping: print("[Mapping: %s, %s, %s, %s, %s, %s, %s, %s]" % (lineup, station, channel, channelMinor, validFrom, validTo, onAirFrom, onAirTo))
Callback run for each new mapping within a lineup
1,803
def register_token(self, *args, **kwargs): client = HTTPClient(self.withdraw_server_address + self.withdraw_endpoint) if check_sig: return client.request(, self.signature_validator.sign(kwargs)) else: return client.request(, kwargs)
Register token Accepts: - token_name [string] - contract_address [hex string] - blockchain [string] token's blockchain (QTUMTEST, ETH) Returns dictionary with following fields: - success [Bool]
1,804
def safe_remove_file(filename, app): data_file = filename static_data_file = os.path.join("_static", data_file) if data_file.split(".")[-1] == "js": if hasattr(app.builder, "script_files") and static_data_file in app.builder.script_files: app.builder.script_files.remove(static_data_file) elif data_file.split(".")[-1] == "css": if hasattr(app.builder, "css_files") and static_data_file in app.builder.css_files: app.builder.css_files.remove(static_data_file)
Removes a given resource file from builder resources. Needed mostly during test, if multiple sphinx-build are started. During these tests js/cass-files are not cleaned, so a css_file from run A is still registered in run B. :param filename: filename to remove :param app: app object :return: None
1,805
def ParseLines(lines, message, allow_unknown_extension=False, allow_field_number=False): parser = _Parser(allow_unknown_extension, allow_field_number) return parser.ParseLines(lines, message)
Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems.
1,806
async def read(cls, node): if isinstance(node, str): system_id = node elif isinstance(node, Node): system_id = node.system_id else: raise TypeError( "node must be a Node or str, not %s" % type(node).__name__) data = await cls._handler.read(system_id=system_id) return cls( cls._object( item, local_data={"node_system_id": system_id}) for item in data)
Get list of `Bcache`'s for `node`.
1,807
def get_dev_vlans(auth, url, devid=None, devip=None): if devip is not None: devid = get_dev_details(devip, auth, url)[] get_dev_vlans_url = "/imcrs/vlan?devId=" + str(devid) + "&start=0&size=5000&total=false" f_url = url + get_dev_vlans_url response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: dev_vlans = (json.loads(response.text)) return dev_vlans[] elif response.status_code == 409: return {: } except requests.exceptions.RequestException as error: return "Error:\n" + str(error) +
Function takes input of devID to issue RESTUL call to HP IMC :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param devid: str requires devId as the only input parameter :param devip: str of ipv4 address of the target device :return: list of dictionaries where each element of the list represents one vlan on the target device :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.vlanm import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> vlans = get_dev_vlans('350', auth.creds, auth.url) >>> assert type(vlans) is list >>> assert 'vlanId' in vlans[0]
1,808
def check_docker_access(self): try: if self.client is None: self.client = docker.from_env() self.client.ping() except ConnectionError as e: logger.exception(e) raise BuildError("Docker is not running or the current user doesn't have permissions to access docker.")
Creates a :class:`DockerClient <docker.client.DockerClient>` for the instance and checks the connection. :raise BuildError: If docker isn't accessible by the current user.
1,809
def _register_endpoints(self, providers): url_map = [] for endp_category in self.endpoints: for binding, endp in self.endpoints[endp_category].items(): valid_providers = "|^".join(providers) parsed_endp = urlparse(endp) url_map.append(("(^%s)/\S+/%s" % (valid_providers, parsed_endp.path), functools.partial(self.handle_authn_request, binding_in=binding))) return url_map
See super class satosa.frontends.base.FrontendModule#register_endpoints :type providers: list[str] :rtype list[(str, ((satosa.context.Context, Any) -> satosa.response.Response, Any))] | list[(str, (satosa.context.Context) -> satosa.response.Response)] :param providers: A list with backend names :return: A list of url and endpoint function pairs
1,810
def p_joinx(self,t): if len(t)==4: t[0] = JoinX(t[1],t[3],None,t[2]) elif len(t)==6: t[0] = JoinX(t[1],t[3],t[5],t[2]) else: raise NotImplementedError()
joinx : fromtable jointype fromtable | fromtable jointype fromtable kw_on expression | fromtable jointype fromtable kw_using '(' namelist ')'
1,811
def prob(self, comparison_vectors, return_type=None): if return_type is not None: warnings.warn("The argument is removed. " "Default value is now .", VisibleDeprecationWarning, stacklevel=2) logging.info("Classification - compute probabilities") prob_match = self._prob_match(comparison_vectors.values) return pandas.Series(prob_match, index=comparison_vectors.index)
Compute the probabilities for each record pair. For each pair of records, estimate the probability of being a match. Parameters ---------- comparison_vectors : pandas.DataFrame The dataframe with comparison vectors. return_type : str Deprecated. (default 'series') Returns ------- pandas.Series or numpy.ndarray The probability of being a match for each record pair.
1,812
def del_layer(self, layer_num): del self.layer_stack[layer_num] if layer_num < self.current_layer(): self.set_current_layer(self.current_layer() - 1) return None
Delete mesh layer
1,813
def create(cls, statement_format, date_start, date_end, monetary_account_id=None, regional_format=None, custom_headers=None): if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_STATEMENT_FORMAT: statement_format, cls.FIELD_DATE_START: date_start, cls.FIELD_DATE_END: date_end, cls.FIELD_REGIONAL_FORMAT: regional_format } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id)) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
:type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
1,814
def fetch(self, url): try: r = requests.get(url, timeout=self.timeout) except requests.exceptions.Timeout: if not self.safe: raise else: return None if r and not self.safe: r.raise_for_status() return r.text
Get the feed content using 'requests'
1,815
def doParseXMLData( self ): parser = xml2obj.Xml2Obj() if self.data[-6:] == : self.data += xobj = parser.ParseString( self.data ) try: el = xobj.getElements( ) if el: self.errorcode = int( el[0].getData() ) else: self.errorcode = int( xobj.getElements()[0].getAttribute() ) except: FMErrorByNum( 954 ) if self.errorcode != 0: FMErrorByNum( self.errorcode ) return xobj
This function parses the XML output of FileMaker.
1,816
def getTaskInfos(self): if self._port is None or self._secret is None: raise Exception("Not supported to call getTaskInfos() before initialize " + "BarrierTaskContext.") else: addresses = self._localProperties.get("addresses", "") return [BarrierTaskInfo(h.strip()) for h in addresses.split(",")]
.. note:: Experimental Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage, ordered by partition ID. .. versionadded:: 2.4.0
1,817
def spawn(self, actor, aid=None, **params): aid = aid or create_aid() future = actor.send(, , aid=aid, **params) return actor_proxy_future(aid, future)
Spawn a new actor from ``actor``.
1,818
def from_str(cls, s): ast_obj = ast.parse(s).body[0] if not isinstance(ast_obj, cls._expected_ast_type): raise AssertionError( .format( cls._expected_ast_type, ast_obj ) ) return cls(ast_obj)
Construct an import object from a string.
1,819
def lower_folded_coerce_types_into_filter_blocks(folded_ir_blocks): new_folded_ir_blocks = [] for block in folded_ir_blocks: if isinstance(block, CoerceType): new_block = convert_coerce_type_to_instanceof_filter(block) else: new_block = block new_folded_ir_blocks.append(new_block) return new_folded_ir_blocks
Lower CoerceType blocks into "INSTANCEOF" Filter blocks. Indended for folded IR blocks.
1,820
def is_builtin_name(name): if name.startswith() and name.endswith(): return ALL_LOWER_CASE_RE.match(name[2:-2]) is not None return False
For example, __foo__ or __bar__.
1,821
def serialized_task(self, task: Task) -> Tuple[str, str]: return f"{task.hash}.json", task.json
Returns the name of the task definition file and its contents.
1,822
def price(usr, item, searches = 2, method = "AVERAGE", deduct = 0): if not method in ShopWizard.methods: raise invalidMethod() if isinstance(item, Item): item = item.name prices = [] dets = {} for x in range(0, searches): results = ShopWizard.search(usr, item) if not results: prices.append(-1) continue prices.append(int(results[0].price)) dets[str(results[0].price)] = (results[0].owner, results[0].id) time.sleep(ShopWizard.waitTime) if sum(prices) == len(prices) * -1: return False prices = list(filter(lambda x: x != -1, prices)) if method == ShopWizard.RETLOW: price = sorted(prices)[0] return (price, dets[str(price)][0], dets[str(price)][1]) return ShopWizard.__determinePrice(prices, method, deduct)
Searches the shop wizard for given item and determines price with given method Searches the shop wizard x times (x being number given in searches) for the given item and collects the lowest price from each result. Uses the given pricing method to determine and return the price of the item. Below is information on each pricing method available: ShopWizard.AVERAGE -- Average of the lowest prices ShopWizard.LOWDEDUCT -- Deducts x (x = deduct) from the lowest price ShopWizard.AVGDEDUCT -- Deducts x (x = deduct) from the average of the lowest prices ShopWizard.LOW -- Returns the lowest price ShopWizard.RETLOW -- Returns an Item instance of the lowest price found Parameters: usr (User) -- User to search with item (str, Item) -- Item to search for searches (int) -- Number of times to search for the item method (str) -- Pricing method deduct (int) -- Amount to deduct from the price (if applicable) Returns int -- The item price
1,823
def calculate_width_and_height(url_parts, options): width = options.get(, 0) has_width = width height = options.get(, 0) has_height = height flip = options.get(, False) flop = options.get(, False) if flip: width = width * -1 if flop: height = height * -1 if not has_width and not has_height: if flip: width = "-0" if flop: height = "-0" if width or height: url_parts.append( % (width, height))
Appends width and height information to url
1,824
def SLOAD(self, offset): storage_address = self.address self._publish(, storage_address, offset) value = self.world.get_storage_data(storage_address, offset) self._publish(, storage_address, offset, value) return value
Load word from storage
1,825
def expect_keyword(lexer: Lexer, value: str) -> Token: token = lexer.token if token.kind == TokenKind.NAME and token.value == value: lexer.advance() return token raise GraphQLSyntaxError( lexer.source, token.start, f"Expected {value!r}, found {token.desc}" )
Expect the next token to be a given keyword. If the next token is a given keyword, return that token after advancing the lexer. Otherwise, do not change the parser state and throw an error.
1,826
def start_instance(self, instance): params = {: } url = .format(instance) self.patch_proto(url, params=params)
Starts a single instance. :param str instance: A Yamcs instance name.
1,827
def main(port=4118, parentpid=None): if "LDTP_DEBUG" in os.environ: _ldtp_debug = True else: _ldtp_debug = False _ldtp_debug_file = os.environ.get(, None) if _ldtp_debug: print("Parent PID: {}".format(int(parentpid))) if _ldtp_debug_file: with open(unicode(_ldtp_debug_file), "a") as fp: fp.write("Parent PID: {}".format(int(parentpid))) server = LDTPServer((, port), allow_none=True, logRequests=_ldtp_debug, requestHandler=RequestHandler) server.register_introspection_functions() server.register_multicall_functions() ldtp_inst = core.Core() server.register_instance(ldtp_inst) if parentpid: thread.start_new_thread(notifyclient, (parentpid,)) try: server.serve_forever() except KeyboardInterrupt: pass except: if _ldtp_debug: print(traceback.format_exc()) if _ldtp_debug_file: with open(_ldtp_debug_file, "a") as fp: fp.write(traceback.format_exc())
Main entry point. Parse command line options and start up a server.
1,828
def get_user_by_key(app, key): user = ses.query(um.User).join(um.UserKey).filter(um.UserKey.key==key).first() return user
An SQLAlchemy User getting function. Get a user by public key. :param str key: the public key the user belongs to
1,829
def step_size(self, t0, t1=None): tb0 = self.to_bucket( t0 ) if t1: tb1 = self.to_bucket( t1, steps=1 ) else: tb1 = self.to_bucket( t0, steps=1 ) days = (self.from_bucket(tb1, native=True) - self.from_bucket(tb0, native=True)).days return days * SIMPLE_TIMES[]
Return the time in seconds for each step. Requires that we know a time relative to which we should calculate to account for variable length intervals (e.g. February)
1,830
def findXScreens(self): qapp = QtCore.QCoreApplication.instance() if not qapp: return screens = qapp.screens() virtual_screens = set() for screen in screens: for screen in true_screens: self.true_screens.insert(screens.index(screen), screen) print("GPUHandler: findXScreens: true screens:", self.true_screens)
let's find out which screens are virtual screen, siblings: One big virtual desktop: A [A, B, C] B [A, B, C] C [A, B, C] A & B in one xscreen, C in another: A [A, B] B [A, B] C [C]
1,831
def do_edit(self, line): if len(line) == 0: print_err("Must provide a filename") return filename = resolve_path(line) dev, dev_filename = get_dev_and_path(filename) mode = auto(get_mode, filename) if mode_exists(mode) and mode_isdir(mode): print_err("Unable to edit directory ".format(filename)) return if dev is None: os.system("{} ".format(EDITOR, filename)) else: with tempfile.TemporaryDirectory() as temp_dir: local_filename = os.path.join(temp_dir, os.path.basename(filename)) if mode_exists(mode): print(.format(filename)) cp(filename, local_filename) old_stat = get_stat(local_filename) os.system("{} ".format(EDITOR, local_filename)) new_stat = get_stat(local_filename) if old_stat != new_stat: self.print(.format(filename)) cp(local_filename, filename)
edit FILE Copies the file locally, launches an editor to edit the file. When the editor exits, if the file was modified then its copied back. You can specify the editor used with the --editor command line option when you start rshell, or by using the VISUAL or EDITOR environment variable. if none of those are set, then vi will be used.
1,832
def _escaped_token_to_subtoken_strings(self, escaped_token): ret = [] start = 0 token_len = len(escaped_token) while start < token_len: for end in xrange(min(token_len, start + self._max_subtoken_len), start, -1): subtoken = escaped_token[start:end] if subtoken in self._all_subtoken_strings: ret.append(subtoken) start = end break else: assert False, "Token substring not found in subtoken vocabulary." return ret
Converts an escaped token string to a list of subtoken strings. Args: escaped_token: An escaped token as a unicode string. Returns: A list of subtokens as unicode strings.
1,833
def InitFromApiFlow(self, f, cron_job_id=None): if f.flow_id: self.run_id = f.flow_id elif f.urn: self.run_id = f.urn.Basename() self.started_at = f.started_at self.cron_job_id = cron_job_id flow_state_enum = api_plugins_flow.ApiFlow.State cron_enum = rdf_cronjobs.CronJobRun.CronJobRunStatus errors_map = { flow_state_enum.RUNNING: cron_enum.RUNNING, flow_state_enum.TERMINATED: cron_enum.FINISHED, flow_state_enum.ERROR: cron_enum.ERROR, flow_state_enum.CLIENT_CRASHED: cron_enum.ERROR } self.status = errors_map[f.state] if f.state != f.State.RUNNING: self.finished_at = f.last_active_at if f.context.kill_timestamp: self.status = self.Status.LIFETIME_EXCEEDED if f.context.HasField("status"): self.log_message = f.context.status if f.context.HasField("backtrace"): self.backtrace = f.context.backtrace return self
Shortcut method for easy legacy cron jobs support.
1,834
def get_approval_by_id(self, issue_id_or_key, approval_id): url = .format(issue_id_or_key, approval_id) return self.get(url, headers=self.experimental_headers)
Get an approval for a given approval ID :param issue_id_or_key: str :param approval_id: str :return:
1,835
def _transition(self, duration, hue=None, brightness=None): b_steps = 0 if brightness is not None: b_steps = steps(self.brightness, brightness, self.command_set.brightness_steps) b_start = self.brightness h_steps = 0 if hue is not None: h_steps = steps(self.hue, hue, self.command_set.hue_steps) h_start = self.hue total_steps = max(b_steps, h_steps, 1) total_commands = b_steps + h_steps wait = self._wait(duration, total_steps, total_commands) if wait == 0: b_steps, h_steps = self._scale_steps(duration, total_commands, b_steps, h_steps) total_steps = max(b_steps, h_steps, 1) for i in range(total_steps): if b_steps > 0 and i % math.ceil(total_steps/b_steps) == 0: self.brightness = util.transition(i, total_steps, b_start, brightness) if h_steps > 0 and i % math.ceil(total_steps/h_steps) == 0: self.hue = util.transition(i, total_steps, h_start, hue) time.sleep(wait)
Transition. :param duration: Time to transition. :param hue: Transition to this hue. :param brightness: Transition to this brightness.
1,836
def apply_T12(word): WORD = word offset = 0 for vv in new_vv(WORD): seq = vv.group(1) if not is_diphthong(seq) and not is_long(seq): i = vv.start(1) + 1 + offset WORD = WORD[:i] + + WORD[i:] offset += 1 RULE = if word != WORD else return WORD, RULE
There is a syllable boundary within a VV sequence of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].
1,837
def _type_priority(ifo, ftype, trend=None): for trendname, trend_regex in [ (, MINUTE_TREND_TYPE), (, SECOND_TREND_TYPE), ]: if trend == trendname and trend_regex.match(ftype): return 0, len(ftype) for reg, prio in { HIGH_PRIORITY_TYPE: 1, re.compile(r): 6, LOW_PRIORITY_TYPE: 10, MINUTE_TREND_TYPE: 10, SECOND_TREND_TYPE: 10, }.items(): if reg.search(ftype): return prio, len(ftype) return 5, len(ftype)
Prioritise the given GWF type based on its name or trend status. This is essentially an ad-hoc ordering function based on internal knowledge of how LIGO does GWF type naming.
1,838
def app_to_context(self, context): if self.tagClass != Tag.applicationTagClass: raise ValueError("application tag required") if (self.tagNumber == Tag.booleanAppTag): return ContextTag(context, chr(self.tagLVT)) else: return ContextTag(context, self.tagData)
Return a context encoded tag.
1,839
def reserve_position(fp, fmt=): position = fp.tell() fp.seek(struct.calcsize(str( + fmt)), 1) return position
Reserves the current position for write. Use with `write_position`. :param fp: file-like object :param fmt: format of the reserved position :return: the position
1,840
def save_token(self, user, token): self.config.set(, , user) self.config.set(, , token) new_config = ConfigParser(allow_no_value=True) new_config.read(self.config_file) self.check_sections(new_config) new_config.set(, , user) new_config.set(, , token) filename = os.path.expanduser(self.config_file) with open(filename, ) as out_file: os.chmod(filename, 0o0600) new_config.write(out_file)
Save the token on the config file.
1,841
def group(self): z = self.Z if z == 1: return 1 if z == 2: return 18 if 3 <= z <= 18: if (z - 2) % 8 == 0: return 18 elif (z - 2) % 8 <= 2: return (z - 2) % 8 else: return 10 + (z - 2) % 8 if 19 <= z <= 54: if (z - 18) % 18 == 0: return 18 else: return (z - 18) % 18 if (z - 54) % 32 == 0: return 18 elif (z - 54) % 32 >= 18: return (z - 54) % 32 - 14 else: return (z - 54) % 32
Returns the periodic table group of the element.
1,842
def cosh(x): if isinstance(x, UncertainFunction): mcpts = np.cosh(x._mcpts) return UncertainFunction(mcpts) else: return np.cosh(x)
Hyperbolic cosine
1,843
def _get_precision_scale(self, number): try: decimal_num = Decimal(number) except InvalidOperation: raise Invalid(self.msg or ) return (len(decimal_num.as_tuple().digits), -(decimal_num.as_tuple().exponent), decimal_num)
:param number: :return: tuple(precision, scale, decimal_number)
1,844
def get_nearest_successors(self, type_measurement): nearest_node1 = None; nearest_node2 = None; nearest_distance = float("Inf"); for i in range(0, len(self.successors)): candidate1 = self.successors[i]; for j in range(i + 1, len(self.successors)): candidate2 = self.successors[j]; candidate_distance = candidate1.get_distance(candidate2, type_measurement); if (candidate_distance < nearest_distance): nearest_distance = candidate_distance; nearest_node1 = candidate1; nearest_node2 = candidate2; return [nearest_node1, nearest_node2];
! @brief Find pair of nearest successors of the node in line with measurement type. @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest successors. @return (list) Pair of nearest successors represented by list.
1,845
def _configure_using_fluent_definition(self): definition = Parser.parse(self.signature) self._config.set_name(definition["name"]) for name, flags, description, default in definition["arguments"]: self._config.add_argument(name, flags, description, default) for long_name, short_name, flags, description, default in definition["options"]: self._config.add_option(long_name, short_name, flags, description, default)
Configure the console command using a fluent definition.
1,846
def _listify(collection): new_list = [] for index in range(len(collection)): new_list.append(collection[index]) return new_list
This is a workaround where Collections are no longer iterable when using JPype.
1,847
def override_locale(self, locale: str = locales.EN, ) -> Generator[, None, None]: try: origin_locale = self.locale self._override_locale(locale) try: yield self finally: self._override_locale(origin_locale) except AttributeError: raise ValueError(.format( self.__class__.__name__))
Context manager which allows overriding current locale. Temporarily overrides current locale for locale-dependent providers. :param locale: Locale. :return: Provider with overridden locale.
1,848
def filter_params(self, src_mod): STRIKE_PARAMS[src_mod.num_np:] = [] DIP_PARAMS[src_mod.num_np:] = [] RAKE_PARAMS[src_mod.num_np:] = [] NPW_PARAMS[src_mod.num_np:] = [] HDEPTH_PARAMS[src_mod.num_hd:] = [] HDW_PARAMS[src_mod.num_hd:] = [] PLANES_STRIKES_PARAM[src_mod.num_p:] = [] PLANES_DIPS_PARAM[src_mod.num_p:] = [] RATE_PARAMS[src_mod.num_r:] = [] if src_mod.has_simple_fault_geometry is False: GEOMETRY_PARAMS.remove((, , )) if (src_mod.has_simple_fault_geometry is False and src_mod.has_complex_fault_geometry is False and src_mod.has_planar_geometry is False): BASE_PARAMS.remove((, , )) if (src_mod.has_simple_fault_geometry is False and src_mod.has_complex_fault_geometry is False and src_mod.has_area_source is False and src_mod.has_point_source is False): GEOMETRY_PARAMS[:] = [] if src_mod.has_mfd_incremental is False: MFD_PARAMS.remove((, , ))
Remove params uneeded by source_model
1,849
def _getMetadata(self, key): if self.isClosed: raise ValueError("operation illegal for closed doc") return _fitz.Document__getMetadata(self, key)
_getMetadata(self, key) -> char *
1,850
def _unicode(self): str return u.join ([ u.join(c) for c in self.w ])
This returns a printable representation of the screen as a unicode string (which, under Python 3.x, is the same as 'str'). The end of each screen line is terminated by a newline.
1,851
def getInferenceTypeFromLabel(cls, label): infType, _, _= label.partition(cls._LABEL_SEPARATOR) if not InferenceType.validate(infType): return None return infType
Extracts the PredictionKind (temporal vs. nontemporal) from the given metric label. :param label: (string) for a metric spec generated by :meth:`getMetricLabel` :returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)
1,852
def scale_back_batch(self, bboxes_in, scores_in): if bboxes_in.device == torch.device("cpu"): self.dboxes = self.dboxes.cpu() self.dboxes_xywh = self.dboxes_xywh.cpu() else: self.dboxes = self.dboxes.cuda() self.dboxes_xywh = self.dboxes_xywh.cuda() bboxes_in = bboxes_in.permute(0, 2, 1) scores_in = scores_in.permute(0, 2, 1) bboxes_in[:, :, :2] = self.scale_xy * bboxes_in[:, :, :2] bboxes_in[:, :, 2:] = self.scale_wh * bboxes_in[:, :, 2:] bboxes_in[:, :, :2] = bboxes_in[:, :, :2] * self.dboxes_xywh[:, :, 2:] + self.dboxes_xywh[:, :, :2] bboxes_in[:, :, 2:] = bboxes_in[:, :, 2:].exp() * self.dboxes_xywh[:, :, 2:] l, t, r, b = bboxes_in[:, :, 0] - 0.5 * bboxes_in[:, :, 2], \ bboxes_in[:, :, 1] - 0.5 * bboxes_in[:, :, 3], \ bboxes_in[:, :, 0] + 0.5 * bboxes_in[:, :, 2], \ bboxes_in[:, :, 1] + 0.5 * bboxes_in[:, :, 3] bboxes_in[:, :, 0] = l bboxes_in[:, :, 1] = t bboxes_in[:, :, 2] = r bboxes_in[:, :, 3] = b return bboxes_in, F.softmax(scores_in, dim=-1)
Do scale and transform from xywh to ltrb suppose input Nx4xnum_bbox Nxlabel_numxnum_bbox
1,853
def predict(self, X): return self.__cost(self.__unroll(self.__thetas), 0, np.matrix(X))
Returns predictions of input test cases.
1,854
def _get_image_size(self, image_path): command = % image_path (rc, output) = zvmutils.execute(command) if rc: msg = ("Error happened when executing command du -b with" "reason: %s" % output) LOG.error(msg) raise exception.SDKImageOperationError(rs=8) size = output.split()[0] return size
Return disk size in bytes
1,855
def retrieve_approver_email_list(self, domain, product_id): response = self.request(E.retrieveApproverEmailListSslCertRequest( E.domain(domain), E.productId(product_id) )) return [str(i) for i in response.data.array[0].item]
Retrieve the list of allowed approver email addresses.
1,856
def standard_kinetics(target, quantity, prefactor, exponent): r X = target[quantity] A = target[prefactor] b = target[exponent] r = A*(X**b) S1 = A*b*(X**(b - 1)) S2 = A*(1 - b)*(X**b) values = {: S1, : S2, : r} return values
r"""
1,857
def update_geometry(self): self.setGeometry(self.__editor.contentsRect().left(), self.__editor.contentsRect().top(), self.get_width(), self.__editor.contentsRect().height()) return True
Updates the Widget geometry. :return: Method success. :rtype: bool
1,858
def returner(ret): log.debug(, ret) conn = _get_conn(ret) cur = conn.cursor() sql = cur.execute(sql, {: ret[], : ret[], : ret[], : six.text_type(ret[]) if ret.get() else None, : six.text_type(datetime.datetime.now()), : salt.utils.json.dumps(ret[]), : ret.get(, )}) _close_conn(conn)
Insert minion return data into the sqlite3 database
1,859
def make_call_positionals(stack_builders, count): out = [make_expr(stack_builders) for _ in range(count)] out.reverse() return out
Make the args entry for an ast.Call node.
1,860
def lock_access(repository_path, callback): with open(cpjoin(repository_path, ), ) as fd: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) returned = callback() fcntl.flock(fd, fcntl.LOCK_UN) return returned except IOError: return fail(lock_fail_msg)
Synchronise access to the user file between processes, this specifies which user is allowed write access at the current time
1,861
def _load_from_file(self, filename): if filename in self.fdata: return self.fdata[filename] else: filepath = find_in_tarball(self.tarloc, filename) return read_from_tarball(self.tarloc, filepath)
Find filename in tar, and load it
1,862
def get_weekly_chart_dates(self): doc = self._request(self.ws_prefix + ".getWeeklyChartList", True) seq = [] for node in doc.getElementsByTagName("chart"): seq.append((node.getAttribute("from"), node.getAttribute("to"))) return seq
Returns a list of From and To tuples for the available charts.
1,863
def off(self): off_command = ExtendedSend(self._address, COMMAND_LIGHT_OFF_0X13_0X00, self._udata) off_command.set_checksum() self._send_method(off_command, self._off_message_received)
Send an OFF message to device group.
1,864
def bqsr_table(data): in_file = dd.get_align_bam(data) out_file = "%s-recal-table.txt" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: assoc_files = dd.get_variation_resources(data) known = "-k %s" % (assoc_files.get("dbsnp")) if "dbsnp" in assoc_files else "" license = license_export(data) cores = dd.get_num_cores(data) ref_file = dd.get_ref_file(data) cmd = ("{license}sentieon driver -t {cores} -r {ref_file} " "-i {in_file} --algo QualCal {known} {tx_out_file}") do.run(cmd.format(**locals()), "Sentieon QualCal generate table") return out_file
Generate recalibration tables as inputs to BQSR.
1,865
def p_empty_statement(self, p): p[0] = self.asttypes.EmptyStatement(p[1]) p[0].setpos(p)
empty_statement : SEMI
1,866
def write_json_document(title, body): if not title.endswith(): title += json_body = create_json_str(body) if os.path.exists(title): juicer.utils.Log.log_warn("Cart file already exists, overwriting with new data." % title) f = open(title, ) f.write(json_body) f.flush() f.close()
`title` - Name of the file to write. `body` - Python datastructure representing the document. This method handles transforming the body into a proper json string, and then writing the file to disk.
1,867
def securityHandler(self, value): if isinstance(value, BaseSecurityHandler): if isinstance(value, security.AGOLTokenSecurityHandler): self._securityHandler = value elif isinstance(value, security.OAuthSecurityHandler): self._securityHandler = value else: pass
sets the security handler
1,868
def Ax(self): m = np.zeros((len(self.a1), 3, 3)) m[:, 0, 1] = -self.a3 m[:, 0, 2] = +self.a2 m[:, 1, 0] = +self.a3 m[:, 1, 2] = -self.a1 m[:, 2, 0] = -self.a2 m[:, 2, 1] = +self.a1 return m
Compute a stack of skew-symmetric matrices which can be multiplied by 'b' to get the cross product. See: http://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication
1,869
def get_comments(self) -> Iterator[PostComment]: r def _postcommentanswer(node): return PostCommentAnswer(id=int(node[]), created_at_utc=datetime.utcfromtimestamp(node[]), text=node[], owner=Profile(self._context, node[])) def _postcommentanswers(node): if not in node: return answer_count = node[][] if answer_count == 0: return answer_edges = node[][] if answer_count == len(answer_edges): yield from (_postcommentanswer(comment[]) for comment in answer_edges) return yield from (_postcommentanswer(answer_node) for answer_node in self._context.graphql_node_list("51fdd02b67508306ad4484ff574a0b62", {: node[]}, + self.shortcode + , lambda d: d[][][])) def _postcomment(node): return PostComment(*_postcommentanswer(node), answers=_postcommentanswers(node)) if self.comments == 0: return try: comment_edges = self._field(, ) answers_count = sum([edge[][][] for edge in comment_edges]) threaded_comments_available = True except KeyError: comment_edges = self._field(, ) answers_count = 0 threaded_comments_available = False if self.comments == len(comment_edges) + answers_count: yield from (_postcomment(comment[]) for comment in comment_edges) return yield from (_postcomment(node) for node in self._context.graphql_node_list( "97b41c52301f77ce508f55e66d17620e" if threaded_comments_available else "f0986789a5c5d17c2400faebf16efd0d", {: self.shortcode}, + self.shortcode + , lambda d: d[][][ if threaded_comments_available else ], self._rhx_gis))
r"""Iterate over all comments of the post. Each comment is represented by a PostComment namedtuple with fields text (string), created_at (datetime), id (int), owner (:class:`Profile`) and answers (:class:`~typing.Iterator`\ [:class:`PostCommentAnswer`]) if available.
1,870
def _note_reply_pending(self, option, state): if not self.telnet_opt_dict.has_key(option): self.telnet_opt_dict[option] = TelnetOption() self.telnet_opt_dict[option].reply_pending = state
Record the status of requested Telnet options.
1,871
def new(params, event_shape=(), dtype=None, validate_args=False, name=None): with tf.compat.v1.name_scope(name, , [params, event_shape]): params = tf.convert_to_tensor(value=params, name=) event_shape = dist_util.expand_to_vector( tf.convert_to_tensor( value=event_shape, name=, dtype_hint=tf.int32), tensor_name=) new_shape = tf.concat([ tf.shape(input=params)[:-1], event_shape, ], axis=0) dist = tfd.Independent( tfd.Bernoulli( logits=tf.reshape(params, new_shape), dtype=dtype or params.dtype.base_dtype, validate_args=validate_args), reinterpreted_batch_ndims=tf.size(input=event_shape), validate_args=validate_args) dist._logits = dist.distribution._logits dist._probs = dist.distribution._probs dist.logits = tfd.Bernoulli.logits dist.probs = tfd.Bernoulli.probs return dist
Create the distribution instance from a `params` vector.
1,872
def fan_speed(self, value): if value not in range(1, 10): raise exceptions.RoasterValueError self._fan_speed.value = value
Verifies the value is between 1 and 9 inclusively.
1,873
def get_config(self): data = self.message(MessageType.GET_CONFIG, ) return json.loads(data, object_hook=ConfigReply)
Currently only contains the "config" member, which is a string containing the config file as loaded by i3 most recently. :rtype: ConfigReply
1,874
def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs): flags = flags or u if db: flags = u"%s -d %s" % (flags, db) command = u % (flags, query) if use_sudo: sudo(command, user=, **kwargs) else: run(command, **kwargs)
Execute remote psql query.
1,875
def get_serializer_class(self, view, method_func): if hasattr(method_func, ): return getattr(method_func, ) if hasattr(view, ): return getattr(view, ) if hasattr(view, ): return getattr(view, )() return None
Try to get the serializer class from view method. If view method don't have request serializer, fallback to serializer_class on view class
1,876
def _to_dict(self): _dict = {} if hasattr(self, ) and self.count is not None: _dict[] = self.count if hasattr(self, ) and self.relevance is not None: _dict[] = self.relevance if hasattr(self, ) and self.text is not None: _dict[] = self.text if hasattr(self, ) and self.emotion is not None: _dict[] = self.emotion._to_dict() if hasattr(self, ) and self.sentiment is not None: _dict[] = self.sentiment._to_dict() return _dict
Return a json dictionary representing this model.
1,877
def get_basis(name, elements=None, version=None, fmt=None, uncontract_general=False, uncontract_spdf=False, uncontract_segmented=False, make_general=False, optimize_general=False, data_dir=None, header=True): 1-3,7-10data data_dir = fix_data_dir(data_dir) bs_data = _get_basis_metadata(name, data_dir) if version is None: version = bs_data[] else: version = str(version) if not version in bs_data[]: raise KeyError("Version {} does not exist for basis {}".format(version, name)) file_relpath = bs_data[][version][] basis_dict = compose.compose_table_basis(file_relpath, data_dir) basis_dict[] = bs_data[] if elements is not None: elements = misc.expand_elements(elements, True) if len(elements) != 0: bs_elements = basis_dict[] for el in elements: if not el in bs_elements: elsym = lut.element_sym_from_Z(el) raise KeyError("Element {} (Z={}) not found in basis {} version {}".format( elsym, el, name, version)) basis_dict[] = {k: v for k, v in bs_elements.items() if k in elements} needs_pruning = False if optimize_general: basis_dict = manip.optimize_general(basis_dict, False) needs_pruning = True if uncontract_segmented: basis_dict = manip.uncontract_segmented(basis_dict, False) needs_pruning = True elif uncontract_general: basis_dict = manip.uncontract_general(basis_dict, False) needs_pruning = True if uncontract_spdf: basis_dict = manip.uncontract_spdf(basis_dict, 0, False) needs_pruning = True if make_general: basis_dict = manip.make_general(basis_dict, False) needs_pruning = True if needs_pruning: basis_dict = manip.prune_basis(basis_dict, False) if fmt is None: return basis_dict if header: header_str = _header_string(basis_dict) else: header_str = None return converters.convert_basis(basis_dict, fmt, header_str)
Obtain a basis set This is the main function for getting basis set information. This function reads in all the basis data and returns it either as a string or as a python dictionary. Parameters ---------- name : str Name of the basis set. This is not case sensitive. elements : str or list List of elements that you want the basis set for. Elements can be specified by Z-number (int or str) or by symbol (str). If this argument is a str (ie, '1-3,7-10'), it is expanded into a list. Z numbers and symbols (case insensitive) can be used interchangeably (see :func:`bse.misc.expand_elements`) If an empty string or list is passed, or if None is passed (the default), all elements for which the basis set is defined are included. version : int or str Obtain a specific version of this basis set. By default, the latest version is returned. fmt: str The desired output format of the basis set. By default, basis set information is returned as a python dictionary. Otherwise, if a format is specified, a string is returned. Use :func:`bse.api.get_formats` to programmatically obtain the available formats. The `fmt` argument is not case sensitive. Available formats are * nwchem * gaussian94 * psi4 * gamess_us * turbomole * json uncontract_general : bool If True, remove general contractions by duplicating the set of primitive exponents with each vector of coefficients. Primitives with zero coefficient are removed, as are duplicate shells. uncontract_spdf : bool If True, remove general contractions with combined angular momentum (sp, spd, etc) by duplicating the set of primitive exponents with each vector of coefficients. Primitives with zero coefficient are removed, as are duplicate shells. uncontract_segmented : bool If True, remove segmented contractions by duplicating each primitive into new shells. Each coefficient is set to 1.0 make_general : bool If True, make the basis set as generally-contracted as possible. There will be one shell per angular momentum (for each element) optimize_general : bool Optimize by removing general contractions that contain uncontracted functions (see :func:`bse.manip.optimize_general`) data_dir : str Data directory with all the basis set information. By default, it is in the 'data' subdirectory of this project. Returns ------- str or dict The basis set in the desired format. If `fmt` is **None**, this will be a python dictionary. Otherwise, it will be a string.
1,878
def get_file_contents_text( filename: str = None, blob: bytes = None, config: TextProcessingConfig = _DEFAULT_CONFIG) -> str: binary_contents = get_file_contents(filename=filename, blob=blob) if config.encoding: try: return binary_contents.decode(config.encoding) except ValueError: pass sysdef = sys.getdefaultencoding() if sysdef != config.encoding: try: return binary_contents.decode(sysdef) except ValueError: pass if chardet: guess = chardet.detect(binary_contents) if guess[]: return binary_contents.decode(guess[]) raise ValueError("Unknown encoding ({})".format( "filename={}".format(repr(filename)) if filename else "blob"))
Returns the string contents of a file, or of a BLOB.
1,879
def get_type(mime=None, ext=None): for kind in types: if kind.extension is ext or kind.mime is mime: return kind return None
Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None.
1,880
def publ(name, cfg): config.setup(cfg) app = _PublApp(name, template_folder=config.template_folder, static_folder=config.static_folder, static_url_path=config.static_url_path) for route in [ , , , , ]: app.add_url_rule(route, , rendering.render_category) for route in [ , , , , , , ]: app.add_url_rule(route, , rendering.render_entry) app.add_url_rule(, , rendering.render_path_alias) app.add_url_rule(, , image.get_async) app.add_url_rule(, , rendering.render_transparent_chit) app.add_url_rule(, , rendering.retrieve_asset) app.config[] = True app.register_error_handler( werkzeug.exceptions.HTTPException, rendering.render_exception) app.jinja_env.globals.update( get_view=view.get_view, arrow=arrow, static=utils.static_url, get_template=rendering.get_template ) caching.init_app(app) maint = maintenance.Maintenance() if config.index_rescan_interval: maint.register(functools.partial(index.scan_index, config.content_folder), config.index_rescan_interval) if config.image_cache_interval and config.image_cache_age: maint.register(functools.partial(image.clean_cache, config.image_cache_age), config.image_cache_interval) app.before_request(maint.run) if in config.cache: app.after_request(set_cache_expiry) if app.debug: app.before_first_request(startup) else: app.register_error_handler(Exception, rendering.render_exception) startup() return app
Create a Flask app and configure it for use with Publ
1,881
def file_modified_time(file_name) -> pd.Timestamp: return pd.to_datetime(time.ctime(os.path.getmtime(filename=file_name)))
File modified time in python Args: file_name: file name Returns: pd.Timestamp
1,882
def get_default_task(self): default_tasks = list(filter(lambda task: task.default, self.values())) if len(default_tasks) == 1: return default_tasks[0]
Returns the default task if there is only one
1,883
def get_map_url(self, mapsource, grid_coords): return self.get_abs_url( "/maps/{}/{}/{}/{}.kml".format(mapsource.id, grid_coords.zoom, grid_coords.x, grid_coords.y))
Get URL to a map region.
1,884
def has_in_watched(self, watched): assert isinstance(watched, github.Repository.Repository), watched status, headers, data = self._requester.requestJson( "GET", "/repos/" + watched._identity + "/subscription" ) return status == 200
:calls: `GET /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_ :param watched: :class:`github.Repository.Repository` :rtype: bool
1,885
def _set_name(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",name.name, yang_name="name", rest_name="name", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: None, u: None, u: None}}), is_container=, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "list", : , }) self.__name = t if hasattr(self, ): self._set()
Setter method for name, mapped from YANG variable /rbridge_id/event_handler/activate/name (list) If this variable is read-only (config: false) in the source YANG file, then _set_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_name() directly.
1,886
def uncache(self): if self.mode == : self.values.unpersist() return self else: notsupported(self.mode)
Disable in-memory caching (Spark only).
1,887
def _last_commit(self): cmd = [, ] op = self.sh(cmd, shell=False) data, rest = op.split(, 2)[1:] revno, user, datestr, lc = data.split(, 3) desc = .join(rest.split()[1:-2]) revno = revno[1:] return datestr, (revno, user, None, desc)
Retrieve the most recent commit message (with ``svn log -l1``) Returns: tuple: (datestr, (revno, user, None, desc)) :: $ svn log -l1 ------------------------------------------------------------------------ r25701 | bhendrix | 2010-08-02 12:14:25 -0500 (Mon, 02 Aug 2010) | 1 line added selection range traits to make it possible for users to replace ------------------------------------------------------------------------ .. note:: svn log references the svn server
1,888
def sub(self, repl): if self.asGroupList: warnings.warn("cannot use sub() with Regex(asGroupList=True)", SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch and callable(repl): warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch: def pa(tokens): return tokens[0].expand(repl) else: def pa(tokens): return self.re.sub(repl, tokens[0]) return self.addParseAction(pa)
Return Regex with an attached parse action to transform the parsed result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. Example:: make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>") print(make_html.transformString("h1:main title:")) # prints "<h1>main title</h1>"
1,889
def append_responder(self, matcher, *args, **kwargs): return self._insert_responder("bottom", matcher, *args, **kwargs)
Add a responder of last resort. Like `.autoresponds`, but instead of adding a responder to the top of the stack, add it to the bottom. This responder will be called if no others match.
1,890
def parse_dis_tree(self, dis_tree, indent=0): tree_type = get_tree_type(dis_tree) assert tree_type in SUBTREE_TYPES if tree_type == : old_root_id = self.root root_id = get_node_id(dis_tree, self.ns) self.root = root_id self.add_node(root_id) self.remove_node(old_root_id) children = dis_tree[1:] for child in children: child_id = get_node_id(child, self.ns) self.add_edge( root_id, child_id, edge_type=EdgeTypes.dominance_relation) self.parse_dis_tree(child, indent=indent+1) else: node_id = get_node_id(dis_tree, self.ns) node_type = get_node_type(dis_tree) relation_type = get_relation_type(dis_tree) if node_type == : edu_text = get_edu_text(dis_tree[-1]) self.add_node(node_id, attr_dict={ self.ns+: edu_text, : u.format(node_id, edu_text[:20])}) if self.tokenized: edu_tokens = edu_text.split() for i, token in enumerate(edu_tokens): token_node_id = .format(node_id, i) self.tokens.append(token_node_id) self.add_node(token_node_id, attr_dict={self.ns+: token, : token}) self.add_edge(node_id, .format(node_id, i)) else: self.add_node(node_id, attr_dict={self.ns+: relation_type, self.ns+: node_type}) children = dis_tree[3:] child_types = get_child_types(children) expected_child_types = set([, ]) unexpected_child_types = set(child_types).difference(expected_child_types) assert not unexpected_child_types, \ "Node contains unexpected child types: {1}\n".format( node_id, unexpected_child_types) if not in child_types: for child in children: child_node_id = get_node_id(child, self.ns) self.add_edge(node_id, child_node_id, attr_dict={ self.ns+: relation_type}) elif len(child_types[]) == 1 and len(children) == 1: if tree_type == : child = children[0] child_node_id = get_node_id(child, self.ns) self.add_edge( node_id, child_node_id, attr_dict={self.ns+: relation_type}, edge_type=EdgeTypes.dominance_relation) else: assert tree_type == raise NotImplementedError("I donSatelliteNucleusNucleusSatellite:rel_typespan:rel_type': relation_type}, edge_type=EdgeTypes.dominance_relation) else: raise ValueError("Unexpected child combinations: {}\n".format(child_types)) for child in children: self.parse_dis_tree(child, indent=indent+1)
parse a *.dis ParentedTree into this document graph
1,891
def get_mining_contracts(): url = build_url() data = load_data(url) coin_data = data[] mining_data = data[] return coin_data, mining_data
Get all the mining contracts information available. Returns: This function returns two major dictionaries. The first one contains information about the coins for which mining contracts data is available: coin_data: {symbol1: {'BlockNumber': ..., 'BlockReward': ..., 'BlockRewardReduction': ..., 'BlockTime': ..., 'DifficultyAdjustment': ..., 'NetHashesPerSecond': ..., 'PreviousTotalCoinsMined': ..., 'PriceUSD': ..., 'Symbol': ..., 'TotalCoinsMined': ...}, symbol2: {...}, ...} The other one contains all the available mining contracts: mining_data: {id1: {'AffiliateURL': ..., 'Algorithm': ..., 'Company': ..., 'ContractLength': ..., 'Cost': ..., 'CurrenciesAvailable': ..., 'CurrenciesAvailableLogo': ..., 'CurrenciesAvailableName': ..., 'Currency': ..., 'FeePercentage': ..., 'FeeValue': ..., 'FeeValueCurrency': ..., 'HashesPerSecond': ..., 'Id': id1, 'LogoUrl': ..., 'Name': ..., 'ParentId': ..., 'Recommended': ..., 'Sponsored': ..., 'Url': ...}, id2: {...}, ...}
1,892
def del_alias(self, alias): dcd = self._get_dcd(alias) dgsts = self.get_alias(alias) self._request(, .format(dcd)) return dgsts
Delete an alias from the registry. The blobs it points to won't be deleted. Use :meth:`del_blob` for that. .. Note:: On private registry, garbage collection might need to be run manually; see: https://docs.docker.com/registry/garbage-collection/ :param alias: Alias name. :type alias: str :rtype: list :returns: A list of blob hashes (strings) which were assigned to the alias.
1,893
def oracle_eval(command): p = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.wait() if p.returncode == 0: return p.stdout.readline().strip().decode() else: die( "Error retrieving password: `{command}` returned ".format( command=command, error=p.stderr.read().strip()))
Retrieve password from the given command
1,894
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model): X_train, X_test = to_array(X_train, X_test) assert X_train.shape[1] == X_test.shape[1] yp_test = trained_model.predict(X_test) return metric(yp_test, strip_list(attr_test).sum(1))
The how well do the features plus a constant base rate sum up to the model output.
1,895
def get_real_data(self): ret = [] username = os.environ.get() if username: ret.append(username) editor = os.environ.get() if editor: editor = editor.split()[-1] ret.append(editor) if hasattr(os, ): uname = os.uname() ret.append(uname[0]) ret.append(uname[1]) ret.append(uname[4]) files = os.listdir(os.environ.get()) if files: ret.append(random.choice(files)) ret += self.get_processes()[:2] func = str.lower if sys.version_info < (3,): func = lambda x: str.lower(x).decode() self.words.extend(map(func, ret))
Grab actual data from the system
1,896
async def service_observer(self, limit) -> int: if not self.isReady(): return 0 return await self._observer.serviceQueues(limit)
Service the observer's inBox and outBox :return: the number of messages successfully serviced
1,897
def FromBinary(cls, record_data, record_count=1): _cmd, address, _resp_length, payload = cls._parse_rpc_info(record_data) try: value, encoded_stream = struct.unpack("<LH", payload) stream = DataStream.FromEncoded(encoded_stream) except ValueError: raise ArgumentError("Could not parse set_constant payload", payload=payload) return SetConstantRecord(stream, value, address=address)
Create an UpdateRecord subclass from binary record data. This should be called with a binary record blob (NOT including the record type header) and it will decode it into a SetConstantRecord. Args: record_data (bytearray): The raw record data that we wish to parse into an UpdateRecord subclass NOT including its 8 byte record header. record_count (int): The number of records included in record_data. Raises: ArgumentError: If the record_data is malformed and cannot be parsed. Returns: SetConstantRecord: The decoded reflash tile record.
1,898
def get(self, request, bot_id, hook_id, id, format=None): bot = self.get_bot(bot_id, request.user) hook = self.get_hook(hook_id, bot, request.user) recipient = self.get_recipient(id, hook, request.user) serializer = self.serializer(recipient) return Response(serializer.data)
Get recipient by id --- serializer: TelegramRecipientSerializer responseMessages: - code: 401 message: Not authenticated
1,899
def modifyBits(inputVal, maxChanges): changes = np.random.random_integers(0, maxChanges, 1)[0] if changes == 0: return inputVal inputWidth = len(inputVal) whatToChange = np.random.random_integers(0, 41, changes) runningIndex = -1 numModsDone = 0 for i in xrange(inputWidth): if numModsDone >= changes: break if inputVal[i] == 1: runningIndex += 1 if runningIndex in whatToChange: if i != 0 and inputVal[i-1] == 0: inputVal[i-1] = 1 inputVal[i] = 0 return inputVal
Modifies up to maxChanges number of bits in the inputVal