Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
386,500
def Run(self, unused_arg): logging.debug("Disabling service") msg = "Service disabled." if hasattr(sys, "frozen"): grr_binary = os.path.abspath(sys.executable) elif __file__: grr_binary = os.path.abspath(__file__) try: os.remove(grr_binary) except OSError: msg = "Could not remove binary." try: os.remove(config.CONFIG["Client.plist_path"]) except OSError: if "Could not" in msg: msg += " Could not remove plist file." else: msg = "Could not remove plist file." directory = getattr(sys, "_MEIPASS", None) if directory: shutil.rmtree(directory, ignore_errors=True) self.SendReply(rdf_protodict.DataBlob(string=msg))
This kills us with no cleanups.
386,501
def from_taxtable(cls, taxtable_fp): r = csv.reader(taxtable_fp) headers = next(r) rows = (collections.OrderedDict(list(zip(headers, i))) for i in r) row = next(rows) root = cls(rank=row[], tax_id=row[ ], name=row[]) path_root = headers.index() root.ranks = headers[path_root:] for row in rows: rank, tax_id, name = [ row[i] for i in (, , )] path = [_f for _f in list(row.values())[path_root:] if _f] parent = root.path(path[:-1]) parent.add_child(cls(rank, tax_id, name=name)) return root
Generate a node from an open handle to a taxtable, as generated by ``taxit taxtable``
386,502
def vi_return_param(self, index): if index == 0: return self.mu0 elif index == 1: return np.log(self.sigma0)
Wrapper function for selecting appropriate latent variable for variational inference Parameters ---------- index : int 0 or 1 depending on which latent variable Returns ---------- The appropriate indexed parameter
386,503
def decode_call(self, call): if call is None: return None itokens = call.split(self._callables_separator) odict = {} for key, value in self._clut.items(): if value in itokens: odict[itokens[itokens.index(value)]] = key return self._callables_separator.join([odict[itoken] for itoken in itokens])
Replace callable tokens with callable names. :param call: Encoded callable name :type call: string :rtype: string
386,504
def parse_number_factory(alg, sep, pre_sep): nan_replace = float("+inf") if alg & ns.NANLAST else float("-inf") def func(val, _nan_replace=nan_replace, _sep=sep): return _sep, _nan_replace if val != val else val if alg & ns.PATH and alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA: return lambda x: (((pre_sep,), func(x)),) elif alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA: return lambda x: ((pre_sep,), func(x)) elif alg & ns.PATH: return lambda x: (func(x),) else: return func
Create a function that will format a number into a tuple. Parameters ---------- alg : ns enum Indicate how to format the *bytes*. sep : str The string character to be inserted before the number in the returned tuple. pre_sep : str In the event that *alg* contains ``UNGROUPLETTERS``, this string will be placed in a single-element tuple at the front of the returned nested tuple. Returns ------- func : callable A function that accepts numeric input (e.g. *int* or *float*) and returns a tuple containing the number with the leading string *sep*. Intended to be used as the *num_func* argument to *natsort_key*. See Also -------- natsort_key
386,505
def add_association_to_graph(self): Assoc.add_association_to_graph(self) if self.start_stage_id or self.end_stage_id is not None: stage_process_id = .join((str(self.start_stage_id), str(self.end_stage_id))) stage_process_id = +re.sub(r, , stage_process_id) self.model.addIndividualToGraph( stage_process_id, None, self.globaltt[]) self.graph.addTriple( stage_process_id, self.globaltt[], self.start_stage_id) self.graph.addTriple( stage_process_id, self.globaltt[], self.end_stage_id) self.stage_process_id = stage_process_id self.graph.addTriple( self.assoc_id, self.globaltt[], self.stage_process_id) if self.environment_id is not None: self.graph.addTriple( self.assoc_id, self.globaltt[], self.environment_id) return
Overrides Association by including bnode support The reified relationship between a genotype (or any genotype part) and a phenotype is decorated with some provenance information. This makes the assumption that both the genotype and phenotype are classes. currently hardcoded to map the annotation to the monarch namespace :param g: :return:
386,506
def tau_reduction(ms, rate, n_per_decade): ms = np.int64(ms) keep = np.bool8(np.rint(n_per_decade*np.log10(ms[1:])) - np.rint(n_per_decade*np.log10(ms[:-1]))) ms = ms[:-1] assert len(ms) == len(keep) ms = ms[keep] taus = ms/float(rate) return ms, taus
Reduce the number of taus to maximum of n per decade (Helper function) takes in a tau list and reduces the number of taus to a maximum amount per decade. This is only useful if more than the "decade" and "octave" but less than the "all" taus are wanted. E.g. to show certain features of the data one might want 100 points per decade. NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and will also remove some points in this range, which is usually fine. Typical use would be something like: (data,m,taus)=tau_generator(data,rate,taus="all") (m,taus)=tau_reduction(m,rate,n_per_decade) Parameters ---------- ms: array of integers List of m values (assumed to be an "all" list) to remove points from. rate: float Sample rate of data in Hz. Time interval between measurements is 1/rate seconds. Used to convert to taus. n_per_decade: int Number of ms/taus to keep per decade. Returns ------- m: np.array Reduced list of m values taus: np.array Reduced list of tau values
386,507
def p_reset(self, program): program[0] = node.Reset([program[2]]) self.verify_reg(program[2], )
reset : RESET primary
386,508
def _update_card_file_location(self, card_name, new_directory): with tmp_chdir(self.gssha_directory): file_card = self.project_manager.getCard(card_name) if file_card: if file_card.value: original_location = file_card.value.strip("""{0}"'.format(os.path.basename(original_location)) try: move(original_location, new_location) except OSError as ex: log.warning(ex) pass
Moves card to new gssha working directory
386,509
def multiple_packaged_versions(package_name): dist_files = os.listdir() versions = set() for filename in dist_files: version = funcy.re_find(r.format(package_name), filename) if version: versions.add(version) return len(versions) > 1
Look through built package directory and see if there are multiple versions there
386,510
def write_metadata(self, key, values): values = Series(values) self.parent.put(self._get_metadata_path(key), values, format=, encoding=self.encoding, errors=self.errors, nan_rep=self.nan_rep)
write out a meta data array to the key as a fixed-format Series Parameters ---------- key : string values : ndarray
386,511
def create_tables(database): logging.getLogger(__name__).debug("Creating missing database tables") database.connect() database.create_tables([User, Group, UserToGroup, GroupToCapability, Capability], safe=True)
Create all tables in the given database
386,512
def _run_up(self, path, migration_file, batch, pretend=False): migration = self._resolve(path, migration_file) if pretend: return self._pretend_to_run(migration, ) migration.up() self._repository.log(migration_file, batch) self._note( % migration_file)
Run "up" a migration instance. :type migration_file: str :type batch: int :type pretend: bool
386,513
def solar(filename_solar, solar_factor): f0=open(filename_solar) sol=f0.readlines() f0.close sol[0].split(" ") global names_sol names_sol=[] global z_sol z_sol=[] yps=np.zeros(len(sol)) mass_number=np.zeros(len(sol)) for i in range(len(sol)): z_sol.append(int(sol[i][1:3])) names_sol.extend([sol[i].split(" ")[0][4:]]) yps[i]=float(sol[i].split(" ")[1]) * solar_factor try: mass_number[i]=int(names_sol[i][2:5]) except ValueError: print("WARNING:") print("This initial abundance file uses an element name that does") print("not contain the mass number in the 3rd to 5th position.") print("It is assumed that this is the proton and we will change") print("the name to to be consistent with the notation used in") print("iniab.dat files") names_sol[i]= mass_number[i]=int(names_sol[i][2:5]) if mass_number[i] == 1 or mass_number[i] == 4: yps[i] = old_div(yps[i],solar_factor) global solar_abundance solar_abundance={} for a,b in zip(names_sol,yps): solar_abundance[a] = b z_bismuth = 83 global solar_elem_abund solar_elem_abund = np.zeros(z_bismuth) for i in range(z_bismuth): dummy = 0. for j in range(len(solar_abundance)): if z_sol[j] == i+1: dummy = dummy + float(solar_abundance[names_sol[j]]) solar_elem_abund[i] = dummy
read solar abundances from filename_solar. Parameters ---------- filename_solar : string The file name. solar_factor : float The correction factor to apply, in case filename_solar is not solar, but some file used to get initial abundances at metallicity lower than solar. However, notice that this is really rude, since alpha-enahncements and things like that are not properly considered. Only H and He4 are not multiplied. So, for publications PLEASE use proper filename_solar at...solar, and use solar_factor = 1. Marco
386,514
def show_status(self): txt = print(txt) txt += "start_x = " + str(self.start_x) + "\n" txt += "start_y = " + str(self.start_y) + "\n" txt += "target_x = " + str(self.target_x) + "\n" txt += "target_y = " + str(self.target_y) + "\n" txt += "current_x = " + str(self.current_x) + "\n" txt += "current_y = " + str(self.current_y) + "\n" print(self.grd) return txt
dumps the status of the agent
386,515
def bar_amplitude(self): "返回bar振幅" res = (self.high - self.low) / self.low res.name = return res
返回bar振幅
386,516
def distance(self, there): return haversine_distance((self.latitude, self.longitude), (there.latitude, there.longitude))
Calculate the distance from this location to there. Parameters ---------- there : Location Returns ------- distance_in_m : float
386,517
def login(self, username, password, load=True): self.auth = Auth(username, password) if load is True: self.get_ip() self.get_servers()
Set the authentication data in the object, and if load is True (default is True) it also retrieve the ip list and the vm list in order to build the internal objects list. @param (str) username: username of the cloud @param (str) password: password of the cloud @param (bool) load: define if pre cache the objects. @return: None
386,518
def drawDisplay(self, painter, option, rect, text): if self.showRichText(): doc = QtGui.QTextDocument() doc.setTextWidth(float(rect.width())) doc.setHtml(text) painter.translate(rect.x(), rect.y()) doc.drawContents(painter, QtCore.QRectF(0, 0, float(rect.width()), float(rect.height()))) painter.translate(-rect.x(), -rect.y()) else: if type(text).__name__ not in (, , ): text = nativestring(text) metrics = QtGui.QFontMetrics(option.font) text = metrics.elidedText(text, QtCore.Qt.TextElideMode(option.textElideMode), rect.width()) painter.setFont(option.font) painter.drawText(rect, int(option.displayAlignment), text)
Overloads the drawDisplay method to render HTML if the rich text \ information is set to true. :param painter | <QtGui.QPainter> option | <QtGui.QStyleOptionItem> rect | <QtCore.QRect> text | <str>
386,519
def _http_response(self, url, method, data=None, content_type=None, schema=None, **kwargs): if schema is None: schema = self.schema_2 header = { : content_type or , : schema, } auth = self.auth token_required = auth.token_required token = auth.token desired_scope = auth.desired_scope scope = auth.scope if token_required: if not token or desired_scope != scope: logger.debug("Getting new token for scope: %s", desired_scope) auth.get_new_token() header[] = % self.auth.token if data and not content_type: data = json.dumps(data) path = url.format(**kwargs) logger.debug("%s %s", method.__name__.upper(), path) response = method(self.host + path, data=data, headers=header, **self.method_kwargs) logger.debug("%s %s", response.status_code, response.reason) response.raise_for_status() return response
url -> full target url method -> method from requests data -> request body kwargs -> url formatting args
386,520
def truncate_volume(self, volume, size): return self.set_volume(volume, size=size, truncate=True)
Truncate a volume to a new, smaller size. :param volume: Name of the volume to truncate. :type volume: str :param size: Size in bytes, or string representing the size of the volume to be created. :type size: int or str :returns: A dictionary mapping "name" to volume and "size" to the volume's new size in bytes. :rtype: ResponseDict .. warnings also:: Data may be irretrievably lost in this operation. .. note:: A snapshot of the volume in its previous state is taken and immediately destroyed, but it is available for recovery for the 24 hours following the truncation.
386,521
def absorb(self, other): if not isinstance(other, self.__class__): raise TypeError("`other` has to be a instance of %s!" % self.__class__) for attr, value in other.items(): if value is not None: setattr(self, attr, deepcopy(value))
For attributes of others that value is not None, assign it to self. **中文文档** 将另一个文档中的数据更新到本条文档。当且仅当数据值不为None时。
386,522
def _populate_input_for_name_id(self, config, record, context, data): user_id = "" user_id_from_attrs = config[] for attr in user_id_from_attrs: if attr in record["attributes"]: value = record["attributes"][attr] if isinstance(value, list): value.sort() user_id += "".join(value) satosa_logging( logger, logging.DEBUG, "Added attribute {} with values {} to input for NameID".format(attr, value), context.state ) else: user_id += value satosa_logging( logger, logging.DEBUG, "Added attribute {} with value {} to input for NameID".format(attr, value), context.state ) if not user_id: satosa_logging( logger, logging.WARNING, "Input for NameID is empty so not overriding default", context.state ) else: data.subject_id = user_id satosa_logging( logger, logging.DEBUG, "Input for NameID is {}".format(data.subject_id), context.state )
Use a record found in LDAP to populate input for NameID generation.
386,523
def slang_date(self, locale="en"): dt = pendulum.instance(self.datetime()) try: return _translate(dt, locale) except KeyError: pass delta = humanize.time.abs_timedelta( timedelta(seconds=(self.epoch - now().epoch))) format_string = "DD MMM" if delta.days >= 365: format_string += " YYYY" return dt.format(format_string, locale=locale).title()
Returns human slang representation of date. Keyword Arguments: locale -- locale to translate to, e.g. 'fr' for french. (default: 'en' - English)
386,524
def dump(self, obj): self.references = [] self.object_obj = obj self.object_stream = BytesIO() self._writeStreamHeader() self.writeObject(obj) return self.object_stream.getvalue()
Dumps the given object in the Java serialization format
386,525
def get_object(self, object_ids): for object_id in object_ids: if not isinstance(object_id, ObjectID): raise TypeError( "Attempting to call `get` on the value {}, " "which is not an ray.ObjectID.".format(object_id)) plain_object_ids = [ plasma.ObjectID(object_id.binary()) for object_id in object_ids ] for i in range(0, len(object_ids), ray._config.worker_fetch_request_size()): self.raylet_client.fetch_or_reconstruct( object_ids[i:(i + ray._config.worker_fetch_request_size())], True) final_results = self.retrieve_and_deserialize(plain_object_ids, 0) while len(unready_ids) > 0: object_ids_to_fetch = [ plasma.ObjectID(unready_id) for unready_id in unready_ids.keys() ] ray_object_ids_to_fetch = [ ObjectID(unready_id) for unready_id in unready_ids.keys() ] fetch_request_size = ray._config.worker_fetch_request_size() for i in range(0, len(object_ids_to_fetch), fetch_request_size): self.raylet_client.fetch_or_reconstruct( ray_object_ids_to_fetch[i:(i + fetch_request_size)], False, self.current_task_id, ) results = self.retrieve_and_deserialize( object_ids_to_fetch, max([ ray._config.get_timeout_milliseconds(), int(0.01 * len(unready_ids)), ]), ) self.raylet_client.notify_unblocked(self.current_task_id) assert len(final_results) == len(object_ids) return final_results
Get the value or values in the object store associated with the IDs. Return the values from the local object store for object_ids. This will block until all the values for object_ids have been written to the local object store. Args: object_ids (List[object_id.ObjectID]): A list of the object IDs whose values should be retrieved.
386,526
def register_listener(self, address, func): try: listeners = self.address_listeners[address] except KeyError: listeners = [] self.address_listeners[address] = listeners if not func in listeners: listeners.append(func) return True
Adds a listener to messages received on a specific address If some KNX messages will be received from the KNX bus, this listener will be called func(address, data). There can be multiple listeners for a given address
386,527
def share_vm_image(self, vm_image_name, permission): _validate_not_none(, vm_image_name) _validate_not_none(, permission) path = self._get_sharing_path_using_vm_image_name(vm_image_name) query = + permission path = path + + query.lstrip() return self._perform_put( path, None, as_async=True, x_ms_version= )
Share an already replicated OS image. This operation is only for publishers. You have to be registered as image publisher with Windows Azure to be able to call this. vm_image_name: The name of the virtual machine image to share permission: The sharing permission: public, msdn, or private
386,528
def get_rank_value(cls, name): if name in cls._RANK_NAMES.values(): return getattr(cls, name, None) return None
Returns the integer constant value for the given rank name. :param string rank: the string rank name (E.g., 'HARDCODED'). :returns: the integer constant value of the rank. :rtype: int
386,529
def datum_to_value(self, instance, datum): datum = self.map_func(instance, datum) if datum is None: return None local_data = None if self.reverse is not None: local_data = {} if self.reverse is undefined: local_data[instance.__class__.__name__.lower()] = instance else: local_data[self.reverse] = instance bound = getattr(instance._origin, self.cls) return bound(datum, local_data=local_data)
Convert a given MAAS-side datum to a Python-side value. :param instance: The `Object` instance on which this field is currently operating. This method should treat it as read-only, for example to perform validation with regards to other fields. :param datum: The MAAS-side datum to validate and convert into a Python-side value. :return: A set of `cls` from the given datum.
386,530
def from_zenity_tuple_str(zenity_tuple_str: str): components = zenity_tuple_str.strip("rgb()").split(",") return ColourData(*map(int, components))
Parser for Zenity output, which outputs a named tuple-like string: "rgb(R, G, B)", where R, G, B are base10 integers. @param zenity_tuple_str: tuple-like string: "rgb(r, g, b), where r, g, b are base10 integers. @return: ColourData instance @rtype: ColourData
386,531
def get_config(self, name, default=_MISSING): val = self._config.get(name, default) if val is _MISSING: raise ArgumentError("DeviceAdapter config {} did not exist and no default".format(name)) return val
Get a configuration setting from this DeviceAdapter. See :meth:`AbstractDeviceAdapter.get_config`.
386,532
def _fetchBlock(self): if self._blockRequestInProgress : return if self._standbyBlock is not None: return self._blockRequestInProgress = True fetchReq = TFetchResultsReq(operationHandle=self.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, maxRows=self.arraysize) self._standbyBlock = self._fetch([],fetchReq) self._blockRequestInProgress = False return
internal use only. get a block of rows from the server and put in standby block. future enhancements: (1) locks for multithreaded access (protect from multiple calls) (2) allow for prefetch by use of separate thread
386,533
def protect(self, password=None, read_protect=False, protect_from=0): args = (password, read_protect, protect_from) return super(NTAG21x, self).protect(*args)
Set password protection or permanent lock bits. If the *password* argument is None, all memory pages will be protected by setting the relevant lock bits (note that lock bits can not be reset). If valid NDEF management data is found, protect() also sets the NDEF write flag to read-only. All Tags of the NTAG21x family can alternatively be protected by password. If a *password* argument is provided, the protect() method writes the first 4 byte of the *password* string into the Tag's password (PWD) memory bytes and the following 2 byte of the *password* string into the password acknowledge (PACK) memory bytes. Factory default values are used if the *password* argument is an empty string. Lock bits are not set for password protection. The *read_protect* and *protect_from* arguments are only evaluated if *password* is not None. If *read_protect* is True, the memory protection bit (PROT) is set to require password verification also for reading of protected memory pages. The value of *protect_from* determines the first password protected memory page (one page is 4 byte) with the exception that the smallest set value is page 3 even if *protect_from* is smaller.
386,534
def createNetwork(dataSource): network = Network() sensor = createRecordSensor(network, name=_RECORD_SENSOR, dataSource=dataSource) createSpatialPooler(network, name=_L1_SPATIAL_POOLER, inputWidth=sensor.encoder.getWidth()) linkType = "UniformLink" linkParams = "" network.link(_RECORD_SENSOR, _L1_SPATIAL_POOLER, linkType, linkParams) l1temporalMemory = createTemporalMemory(network, _L1_TEMPORAL_MEMORY) network.link(_L1_SPATIAL_POOLER, _L1_TEMPORAL_MEMORY, linkType, linkParams) classifierParams = { : 0.005, : , : , : 0} l1Classifier = network.addRegion(_L1_CLASSIFIER, "py.SDRClassifierRegion", json.dumps(classifierParams)) l1Classifier.setParameter(, True) l1Classifier.setParameter(, True) network.link(_L1_TEMPORAL_MEMORY, _L1_CLASSIFIER, linkType, linkParams, srcOutput="bottomUpOut", destInput="bottomUpIn") network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams, srcOutput="categoryOut", destInput="categoryIn") network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams, srcOutput="bucketIdxOut", destInput="bucketIdxIn") network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams, srcOutput="actValueOut", destInput="actValueIn") l2inputWidth = l1temporalMemory.getSelf().getOutputElementCount("bottomUpOut") createSpatialPooler(network, name=_L2_SPATIAL_POOLER, inputWidth=l2inputWidth) network.link(_L1_TEMPORAL_MEMORY, _L2_SPATIAL_POOLER, linkType, linkParams) createTemporalMemory(network, _L2_TEMPORAL_MEMORY) network.link(_L2_SPATIAL_POOLER, _L2_TEMPORAL_MEMORY, linkType, linkParams) l2Classifier = network.addRegion(_L2_CLASSIFIER, "py.SDRClassifierRegion", json.dumps(classifierParams)) l2Classifier.setParameter(, True) l2Classifier.setParameter(, True) network.link(_L2_TEMPORAL_MEMORY, _L2_CLASSIFIER, linkType, linkParams, srcOutput="bottomUpOut", destInput="bottomUpIn") network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams, srcOutput="categoryOut", destInput="categoryIn") network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams, srcOutput="bucketIdxOut", destInput="bucketIdxIn") network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams, srcOutput="actValueOut", destInput="actValueIn") return network
Creates and returns a new Network with a sensor region reading data from 'dataSource'. There are two hierarchical levels, each with one SP and one TM. @param dataSource - A RecordStream containing the input data @returns a Network ready to run
386,535
def init_app(self, app, sessionstore=None, register_blueprint=False): return super(InvenioAccountsREST, self).init_app( app, sessionstore=sessionstore, register_blueprint=register_blueprint, )
Flask application initialization. :param app: The Flask application. :param sessionstore: store for sessions. Passed to ``flask-kvsession``. If ``None`` then Redis is configured. (Default: ``None``) :param register_blueprint: If ``True``, the application registers the blueprints. (Default: ``True``)
386,536
def _build_youtube_dl_coprocessor(cls, session: AppSession, proxy_port: int): wpull.processor.coprocessor.youtubedl.get_version(session.args.youtube_dl_exe) coprocessor = session.factory.new( , session.args.youtube_dl_exe, (session.args.proxy_server_address, proxy_port), root_path=session.args.directory_prefix, user_agent=session.args.user_agent or session.default_user_agent, warc_recorder=session.factory.get(), inet_family=session.args.inet_family, check_certificate=False ) return coprocessor
Build youtube-dl coprocessor.
386,537
def dedents(self, s, stacklevel=3): s = dedents(s) return safe_modulo(s, self.params, stacklevel=stacklevel)
Dedent a string and substitute with the :attr:`params` attribute Parameters ---------- s: str string to dedent and insert the sections of the :attr:`params` attribute stacklevel: int The stacklevel for the warning raised in :func:`safe_module` when encountering an invalid key in the string
386,538
def remove_event(self, name=None, time=None, chan=None): self.annot.remove_event(name=name, time=time, chan=chan) self.update_annotations()
Action: remove single event.
386,539
def _permute(self, ordering: np.ndarray) -> None: for key in self.keys(): self[key] = self[key][ordering]
Permute all the attributes in the collection Remarks: This permutes the order of the values for each attribute in the file
386,540
def set_volume(self, volume=50): assert(volume in range(101)) log.debug("setting volume...") cmd, url = DEVICE_URLS["set_volume"] json_data = { "volume": volume, } return self._exec(cmd, url, json_data=json_data)
allows to change the volume :param int volume: volume to be set for the current device [0..100] (default: 50)
386,541
def main(): neutron_config.register_agent_state_opts_helper(CONF) common_config.init(sys.argv[1:]) neutron_config.setup_logging() hnv_agent = HNVAgent() LOG.info("Agent initialized successfully, now running... ") hnv_agent.daemon_loop()
The entry point for the HNV Agent.
386,542
def _parse_response(self, resp): super(RaxIdentity, self)._parse_response(resp) user = resp["access"]["user"] defreg = user.get("RAX-AUTH:defaultRegion") if defreg: self._default_region = defreg
Gets the authentication information from the returned JSON.
386,543
def prepareToSolve(self): self.setAndUpdateValues(self.solution_next,self.IncomeDstn,self.LivPrb,self.DiscFac) self.defBoroCnst(self.BoroCnstArt)
Perform preparatory work before calculating the unconstrained consumption function. Parameters ---------- none Returns ------- none
386,544
def make_grid(self): changes = None if isinstance(self.magic_dataframe, cb.MagicDataFrame): col_labels = list(self.magic_dataframe.df.columns) for ex_col in self.exclude_cols: col_labels.pop(ex_col) if self.grid_type == : levels = [, , , ] for label in levels[:]: if label in col_labels: col_labels.remove(label) else: levels.remove(label) col_labels[:0] = levels else: if self.parent_type: if self.parent_type[:-1] in col_labels: col_labels.remove(self.parent_type[:-1]) col_labels[:0] = [self.parent_type[:-1]] if self.grid_type[:-1] in col_labels: col_labels.remove(self.grid_type[:-1]) col_labels[:0] = (self.grid_type[:-1],) for col in col_labels: if col not in self.magic_dataframe.df.columns: self.magic_dataframe.df[col] = None self.magic_dataframe.df = self.magic_dataframe.df[col_labels] self.magic_dataframe.sort_dataframe_cols() col_labels = list(self.magic_dataframe.df.columns) row_labels = self.magic_dataframe.df.index for header in self.reqd_headers: if header not in col_labels: changes = set([1]) col_labels.append(header) else: col_labels = list(self.reqd_headers) if self.grid_type in [, , ]: col_labels.extend([, ]) if self.grid_type == : levels = [, , , ] for label in levels: if label in col_labels: col_labels.remove(label) col_labels[:0] = levels else: if self.parent_type: col_labels.remove(self.parent_type[:-1]) col_labels[:0] = [self.parent_type[:-1]] col_labels.remove(self.grid_type[:-1]) col_labels[:0] = [self.grid_type[:-1]] for col in col_labels: if col not in self.magic_dataframe.df.columns: self.magic_dataframe.df[col] = None if not self.huge: grid = magic_grid.MagicGrid(parent=self.panel, name=self.grid_type, row_labels=[], col_labels=col_labels) else: row_labels = self.magic_dataframe.df.index grid = magic_grid.HugeMagicGrid(parent=self.panel, name=self.grid_type, row_labels=row_labels, col_labels=col_labels) grid.do_event_bindings() grid.changes = changes self.grid = grid return grid
return grid
386,545
def __json(self): if self.exclude_list is None: self.exclude_list = [] fields = {} for key, item in vars(self).items(): if hasattr(self, ): if len(orm.attributes.instance_state(self).unloaded) > 0: mapper = inspect(self) for column in mapper.attrs: column.key column.value if str(key).startswith() or key in self.exclude_list: continue fields[key] = item obj = Json.safe_object(fields) return str(obj)
Using the exclude lists, convert fields to a string.
386,546
def add_mark_at(string, index, mark): if index == -1: return string return string[:index] + add_mark_char(string[index], mark) + string[index+1:]
Add mark to the index-th character of the given string. Return the new string after applying change. Notice: index > 0
386,547
def forward(self, layer_input: torch.Tensor, layer_output: torch.Tensor, layer_index: int = None, total_layers: int = None) -> torch.Tensor: if layer_index is not None and total_layers is not None: dropout_prob = 1.0 * self.undecayed_dropout_prob * layer_index / total_layers else: dropout_prob = 1.0 * self.undecayed_dropout_prob if self.training: if torch.rand(1) < dropout_prob: return layer_input else: return layer_output + layer_input else: return (1 - dropout_prob) * layer_output + layer_input
Apply dropout to this layer, for this whole mini-batch. dropout_prob = layer_index / total_layers * undecayed_dropout_prob if layer_idx and total_layers is specified, else it will use the undecayed_dropout_prob directly. Parameters ---------- layer_input ``torch.FloatTensor`` required The input tensor of this layer. layer_output ``torch.FloatTensor`` required The output tensor of this layer, with the same shape as the layer_input. layer_index ``int`` The layer index, starting from 1. This is used to calcuate the dropout prob together with the `total_layers` parameter. total_layers ``int`` The total number of layers. Returns ------- output: ``torch.FloatTensor`` A tensor with the same shape as `layer_input` and `layer_output`.
386,548
def disable(gandi, resource, backend, port, probe): result = [] if backend: backends = backend for backend in backends: if not in backend: if not port: backend[] = click.prompt( , type=int) else: backend[] = port result = gandi.webacc.backend_disable(backend) if probe: if not resource: gandi.echo() return result = gandi.webacc.probe_disable(resource) return result
Disable a backend or a probe on a webaccelerator
386,549
def pack_nibbles(nibbles): if nibbles[-1] == NIBBLE_TERMINATOR: flags = 2 nibbles = nibbles[:-1] else: flags = 0 oddlen = len(nibbles) % 2 flags |= oddlen if oddlen: nibbles = [flags] + nibbles else: nibbles = [flags, 0] + nibbles o = b for i in range(0, len(nibbles), 2): o += ascii_chr(16 * nibbles[i] + nibbles[i + 1]) return o
pack nibbles to binary :param nibbles: a nibbles sequence. may have a terminator
386,550
def add_scope(self, scope_type, scope_name, scope_start, is_method=False): if self._curr is not None: self._curr[] = scope_start - 1 self._curr = { : scope_type, : scope_name, : scope_start, : scope_start } if is_method and self._positions: last = self._positions[-1] if not in last: last[] = [] last[].append(self._curr) else: self._positions.append(self._curr)
we identified a scope and add it to positions.
386,551
def search(cls, session, queries, out_type): cls._check_implements() domain = cls.get_search_domain(queries) return cls( % cls.__endpoint__, data={: str(domain)}, session=session, out_type=out_type, )
Search for a record given a domain. Args: session (requests.sessions.Session): Authenticated session. queries (helpscout.models.Domain or iter): The queries for the domain. If a ``Domain`` object is provided, it will simply be returned. Otherwise, a ``Domain`` object will be generated from the complex queries. In this case, the queries should conform to the interface in :func:`helpscout.domain.Domain.from_tuple`. out_type (helpscout.BaseModel): The type of record to output. This should be provided by child classes, by calling super. Returns: RequestPaginator(output_type=helpscout.BaseModel): Results iterator of the ``out_type`` that is defined.
386,552
def update_function(old, new): for name in func_attrs: try: setattr(old, name, getattr(new, name)) except (AttributeError, TypeError): pass
Upgrade the code object of a function
386,553
def options(cls, obj, options=None, **kwargs): if (options is None) and kwargs == {}: yield else: Store._options_context = True optstate = cls.state(obj) groups = Store.options().groups.keys() options = cls.merge_options(groups, options, **kwargs) cls.set_options(obj, options) yield if options is not None: Store._options_context = True cls.state(obj, state=optstate)
Context-manager for temporarily setting options on an object (if options is None, no options will be set) . Once the context manager exits, both the object and the Store will be left in exactly the same state they were in before the context manager was used. See holoviews.core.options.set_options function for more information on the options specification format.
386,554
def ResolveForCreate(self, document): if document is None: raise ValueError("document is None.") partition_key = self.partition_key_extractor(document) return self.consistent_hash_ring.GetCollectionNode(partition_key)
Resolves the collection for creating the document based on the partition key. :param dict document: The document to be created. :return: Collection Self link or Name based link which should handle the Create operation. :rtype: str
386,555
def clear_scroll(self, scroll_id = None, body = , params = {}, callback = None, **kwargs ): url = self.mk_url(*[, , scroll_id]) self.client.fetch( self.mk_req(url, method=, body=body, **kwargs), callback = callback )
Clear the scroll request created by specifying the scroll parameter to search. `<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_ :arg scroll_id: The scroll ID or a list of scroll IDs :arg body: A comma-separated list of scroll IDs to clear if none was specified via the scroll_id parameter
386,556
def hasDependencyRecursively(self, name, target=None, test_dependencies=False): dependencies = self.getDependenciesRecursive( target = target, test = test_dependencies ) return (name in dependencies)
Check if this module, or any of its dependencies, have a dependencies with the specified name in their dependencies, or in their targetDependencies corresponding to the specified target. Note that if recursive dependencies are not installed, this test may return a false-negative.
386,557
def doc_stream(path): with open(path, ) as f: for line in f: if line.strip(): yield line
Generator to feed tokenized documents (treating each line as a document).
386,558
def create_module(name, path): module = imp.new_module(name) module.__file__ = path execfile(path, module.__dict__) return module
Returns module created *on the fly*. Returned module would have name same as given ``name`` and would contain code read from file at the given ``path`` (it may also be a zip or package containing *__main__* module).
386,559
def single(self): records = list(self) size = len(records) if size == 0: return None if size != 1: warn("Expected a result with a single record, but this result contains %d" % size) return records[0]
Obtain the next and only remaining record from this result. A warning is generated if more than one record is available but the first of these is still returned. :returns: the next :class:`.Record` or :const:`None` if none remain :warns: if more than one record is available
386,560
def fromFile(cls, filename): self = cls.__new__(cls) start = _time() savedData = _loads(_open(filename, "r").read()) self.string, self.unit, self.voc, self.vocSize, self.SA, features = savedData[:6] self.length = len(self.SA) if self.unit == UNIT_WORD: self.tokSep = " " elif self.unit in (UNIT_CHARACTER, UNIT_BYTE): self.tokSep = "" else: raise Exception("Unknown unit type identifier:", self.unit) self.tokId = dict((char, iChar) for iChar, char in enumerate(self.voc)) self.nbSentences = self.string.count(self.tokId.get("\n", 0)) self.features = [] for featureName, (featureValues, featureDefault) in zip(features, savedData[6:]): self.addFeatureSA((lambda _: featureValues), name=featureName, default=featureDefault) self.fromFileTime = _time() - start if _trace: print >> _stderr, "fromFileTime %.2fs" % self.fromFileTime return self
Load a suffix array instance from filename, a file created by toFile. Accept any filename following the _open conventions.
386,561
def format_interface_name(intf_type, port, ch_grp=0): if ch_grp > 0: return % str(ch_grp) return % (intf_type.lower(), port)
Method to format interface name given type, port. Given interface type, port, and channel-group, this method formats an interface name. If channel-group is non-zero, then port-channel is configured. :param intf_type: Such as 'ethernet' or 'port-channel' :param port: unique identification -- 1/32 or 1 :ch_grp: If non-zero, ignore other params and format port-channel<ch_grp> :returns: the full formatted interface name. ex: ethernet:1/32, port-channel:1
386,562
def as_completed(fs, timeout=None): with _AcquireFutures(fs): finished = set(f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) pending = set(fs) - finished waiter = _create_and_install_waiters(fs, _AS_COMPLETED) timer = Timeout(timeout) timer.start() try: for future in finished: yield future while pending: waiter.event.wait() with waiter.lock: finished = waiter.finished_futures waiter.finished_futures = [] waiter.event.clear() for future in finished: yield future pending.remove(future) except Timeout as e: if timer is not e: raise raise TimeoutError( % (len(pending), len(fs))) finally: timer.cancel() for f in fs: f._waiters.remove(waiter)
An iterator over the given futures that yields each as it completes. Args: fs: The sequence of Futures (possibly created by different Executors) to iterate over. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. Returns: An iterator that yields the given Futures as they complete (finished or cancelled). Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout.
386,563
def build_docs(directory): os.chdir(directory) process = subprocess.Popen(["make", "html"], cwd=directory) process.communicate()
Builds sphinx docs from a given directory.
386,564
def past_date(start=): return lambda n, f: f.past_date( start_date=start, tzinfo=get_timezone(), )
Returns a ``date`` object in the past between 1 day ago and the specified ``start``. ``start`` can be a string, another date, or a timedelta. If it's a string, it must start with `-`, followed by and integer and a unit, Eg: ``'-30d'``. Defaults to `'-30d'` Valid units are: * ``'years'``, ``'y'`` * ``'weeks'``, ``'w'`` * ``'days'``, ``'d'`` * ``'hours'``, ``'h'`` * ``'minutes'``, ``'m'`` * ``'seconds'``, ``'s'``
386,565
def handle_cannot(reference_answers: List[str]): num_cannot = 0 num_spans = 0 for ref in reference_answers: if ref == : num_cannot += 1 else: num_spans += 1 if num_cannot >= num_spans: reference_answers = [] else: reference_answers = [x for x in reference_answers if x != ] return reference_answers
Process a list of reference answers. If equal or more than half of the reference answers are "CANNOTANSWER", take it as gold. Otherwise, return answers that are not "CANNOTANSWER".
386,566
def read(self, offset, length, min_length=0, unbuffered=False, wait=True, send=True): if length > self.connection.max_read_size: raise SMBException("The requested read length %d is greater than " "the maximum negotiated read size %d" % (length, self.connection.max_read_size)) read = SMB2ReadRequest() read[] = length read[] = offset read[] = min_length read[] = self.file_id read[] = b"\x50" if unbuffered: if self.connection.dialect < Dialects.SMB_3_0_2: raise SMBUnsupportedFeature(self.connection.dialect, Dialects.SMB_3_0_2, "SMB2_READFLAG_READ_UNBUFFERED", True) read[].set_flag(ReadFlags.SMB2_READFLAG_READ_UNBUFFERED) if not send: return read, self._read_response log.info("Session: %s, Tree Connect ID: %s - sending SMB2 Read " "Request for file %s" % (self.tree_connect.session.username, self.tree_connect.share_name, self.file_name)) log.debug(str(read)) request = self.connection.send(read, self.tree_connect.session.session_id, self.tree_connect.tree_connect_id) return self._read_response(request, wait)
Reads from an opened file or pipe Supports out of band send function, call this function with send=False to return a tuple of (SMB2ReadRequest, receive_func) instead of sending the the request and waiting for the response. The receive_func can be used to get the response from the server by passing in the Request that was used to sent it out of band. :param offset: The offset to start the read of the file. :param length: The number of bytes to read from the offset. :param min_length: The minimum number of bytes to be read for a successful operation. :param unbuffered: Whether to the server should cache the read data at intermediate layers, only value for SMB 3.0.2 or newer :param wait: If send=True, whether to wait for a response if STATUS_PENDING was received from the server or fail. :param send: Whether to send the request in the same call or return the message to the caller and the unpack function :return: A byte string of the bytes read
386,567
def download_url(url, destination): from settings import VALID_IMAGE_EXTENSIONS base_name, ext = os.path.splitext(url) ext = ext.lstrip() if ext not in VALID_IMAGE_EXTENSIONS: raise Exception("Invalid image extension") base_path, filename = os.path.split(destination) os.makedirs(base_path) urllib.urlretrieve(url, destination)
Download an external URL to the destination
386,568
def qtaax(mt, x, t, q, m=1): q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return taax(mtj, x, t) - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
geometrica
386,569
def property(func): attr = abc.abstractmethod(func) attr.__iproperty__ = True attr = Property(attr) return attr
Wrap a function as a property. This differs from attribute by identifying properties explicitly listed in the class definition rather than named attributes defined on instances of a class at init time.
386,570
def alias_tags(tags_list, alias_map): def _alias_dict(tags): tags_ = [alias_map.get(t, t) for t in tags] return list(set([t for t in tags_ if t is not None])) tags_list_ = [_alias_dict(tags) for tags in tags_list] return tags_list_
update tags to new values Args: tags_list (list): alias_map (list): list of 2-tuples with regex, value Returns: list: updated tags CommandLine: python -m utool.util_tags alias_tags --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_tags import * # NOQA >>> import utool as ut >>> tags_list = [['t1', 't2'], [], ['t3'], ['t4', 't5']] >>> ut.build_alias_map() >>> result = alias_tags(tags_list, alias_map) >>> print(result)
386,571
def read_git_branch(): if os.getenv(): return os.getenv() else: try: repo = git.repo.base.Repo(search_parent_directories=True) return repo.active_branch.name except Exception: return
Obtain the current branch name from the Git repository. If on Travis CI, use the ``TRAVIS_BRANCH`` environment variable.
386,572
def load_config(): cfg_path = cfg_file = if not os.path.isdir(cfg_path): print("Can not find default freelan config directory.") return cfg_file_path = os.path.join(cfg_path,cfg_file) if not os.path.isfile( cfg_file_path ): print("Can not find default freelan config file.") return return _load_config(cfg_file_path)
try loading config file from a default directory
386,573
def start_of_chunk(prev_tag, tag, prev_type, type_): chunk_start = False if tag == : chunk_start = True if tag == : chunk_start = True if prev_tag == and tag == : chunk_start = True if prev_tag == and tag == : chunk_start = True if prev_tag == and tag == : chunk_start = True if prev_tag == and tag == : chunk_start = True if prev_tag == and tag == : chunk_start = True if prev_tag == and tag == : chunk_start = True if tag != and tag != and prev_type != type_: chunk_start = True return chunk_start
Checks if a chunk started between the previous and current word. Args: prev_tag: previous chunk tag. tag: current chunk tag. prev_type: previous type. type_: current type. Returns: chunk_start: boolean.
386,574
def get_float(prompt=None): while True: s = get_string(prompt) if s is None: return None if len(s) > 0 and re.search(r"^[+-]?\d*(?:\.\d*)?$", s): try: return float(s) except ValueError: pass if prompt is None: print("Retry: ", end="")
Read a line of text from standard input and return the equivalent float as precisely as possible; if text does not represent a double, user is prompted to retry. If line can't be read, return None.
386,575
def reset(self): self.activeCells = [] self.winnerCells = [] self.activeSegments = [] self.matchingSegments = []
Indicates the start of a new sequence. Clears any predictions and makes sure synapses don't grow to the currently active cells in the next time step.
386,576
def MessageSetItemSizer(field_number): static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) + _TagSize(3)) local_VarintSize = _VarintSize def FieldSize(value): l = value.ByteSize() return static_size + local_VarintSize(l) + l return FieldSize
Returns a sizer for extensions of MessageSet. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } }
386,577
def _maybe_cast_to_float64(da): if da.dtype == np.float32: logging.warning( ) return da.astype(np.float64) else: return da
Cast DataArrays to np.float64 if they are of type np.float32. Parameters ---------- da : xr.DataArray Input DataArray Returns ------- DataArray
386,578
def save_profile(self, **params): image = self.get_image() if (image is None): return profile = image.get(, None) if profile is None: profile = Settings.SettingGroup() image.set(profile=profile) self.logger.debug("saving to image profile: params=%s" % ( str(params))) profile.set(**params) return profile
Save the given parameters into profile settings. Parameters ---------- params : dict Keywords and values to be saved.
386,579
def _count_elements(mapping, iterable): mapping_get = mapping.get for elem in iterable: mapping[elem] = mapping_get(elem, 0) + 1
Tally elements from the iterable.
386,580
def get_acl(self, key_name=, headers=None, version_id=None): return self.get_acl_helper(key_name, headers, STANDARD_ACL)
returns a bucket's acl. We include a version_id argument to support a polymorphic interface for callers, however, version_id is not relevant for Google Cloud Storage buckets and is therefore ignored here.
386,581
def waitget(self,key, maxwaittime = 60 ): wtimes = [0.2] * 3 + [0.5] * 2 + [1] tries = 0 waited = 0 while 1: try: val = self[key] return val except KeyError: pass if waited > maxwaittime: raise KeyError(key) time.sleep(wtimes[tries]) waited+=wtimes[tries] if tries < len(wtimes) -1: tries+=1
Wait (poll) for a key to get a value Will wait for `maxwaittime` seconds before raising a KeyError. The call exits normally if the `key` field in db gets a value within the timeout period. Use this for synchronizing different processes or for ensuring that an unfortunately timed "db['key'] = newvalue" operation in another process (which causes all 'get' operation to cause a KeyError for the duration of pickling) won't screw up your program logic.
386,582
def from_xdr_object(cls, asset_xdr_object): if asset_xdr_object.type == Xdr.const.ASSET_TYPE_NATIVE: return Asset.native() elif asset_xdr_object.type == Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM4: issuer = encode_check( , asset_xdr_object.alphaNum4.issuer.ed25519).decode() code = asset_xdr_object.alphaNum4.assetCode.decode().rstrip() else: issuer = encode_check( , asset_xdr_object.alphaNum12.issuer.ed25519).decode() code = ( asset_xdr_object.alphaNum12.assetCode.decode().rstrip()) return cls(code, issuer)
Create a :class:`Asset` from an XDR Asset object. :param asset_xdr_object: The XDR Asset object. :return: A new :class:`Asset` object from the given XDR Asset object.
386,583
def _create_namespace(namespace, apiserver_url): url = "{0}/api/v1/namespaces".format(apiserver_url) data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) ret = _kpost(url, data) log.trace("result is: %s", ret) return ret
create namespace on the defined k8s cluster
386,584
def pretty_unicode(string): if isinstance(string, six.text_type): return string try: return string.decode("utf8") except UnicodeDecodeError: return string.decode().encode().decode("utf8")
Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed.
386,585
def _num_values(self, zVar, varNum): values = 1 if (zVar == True): numDims = self.zvarsinfo[varNum][2] dimSizes = self.zvarsinfo[varNum][3] dimVary = self.zvarsinfo[varNum][4] else: numDims = self.rvarsinfo[varNum][2] dimSizes = self.rvarsinfo[varNum][3] dimVary = self.rvarsinfo[varNum][4] if (numDims < 1): return values else: for x in range(0, numDims): if (zVar == True): values = values * dimSizes[x] else: if (dimVary[x] != 0): values = values * dimSizes[x] return values
Determines the number of values in a record. Set zVar=True if this is a zvariable.
386,586
def eval_basis(self, x, regularize=True): if regularize: x = regularize_array(x) out = zeros((self.n, x.shape[0]), dtype=float, order=) for i in xrange(self.n): out[i] = self.basis[i](x, **self.params) return out
basis_mat = C.eval_basis(x) Evaluates self's basis functions on x and returns them stacked in a matrix. basis_mat[i,j] gives basis function i evaluated at x[j,:].
386,587
def setup_package(): setup( name=, version=_get_version(), description=, long_description=_get_long_description(), long_description_content_type=, url=, author=, author_email=, license=, classifiers=[ , , , , ], python_requires=, keywords=, packages=find_packages(exclude=[]))
Package setup
386,588
def sr1(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): s = conf.L3socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) ans, _ = sndrcv(s, x, *args, **kargs) s.close() if len(ans) > 0: return ans[0][1] else: return None
Send packets at layer 3 and return only the first answer
386,589
def apply(self, config, raise_on_unknown_key=True): _recursive_merge(self._data, config, raise_on_unknown_key)
Apply additional configuration from a dictionary This will look for dictionary items that exist in the base_config any apply their values on the current configuration object
386,590
def edgepaths(self): edgepaths = super(TriMesh, self).edgepaths edgepaths.crs = self.crs return edgepaths
Returns the fixed EdgePaths or computes direct connections between supplied nodes.
386,591
def make_repr(*args, **kwargs): def method(self): cls_name = self.__class__.__name__ if args: field_names = args else: def undercored(name): return name.startswith() def is_method(name): return callable(getattr(self, name)) def good_name(name): return not undercored(name) and not is_method(name) field_names = filter(good_name, dir(self)) field_names = sorted(field_names) field_getters = zip(field_names, map(attrgetter, field_names)) return result return method
Returns __repr__ method which returns ASCII representaion of the object with given fields. Without arguments, ``make_repr`` generates a method which outputs all object's non-protected (non-undercored) arguments which are not callables. Accepts ``*args``, which should be a names of object's attributes to be included in the output:: __repr__ = make_repr('foo', 'bar') If you want to generate attribute's content on the fly, then you should use keyword arguments and pass a callable of one argument:: __repr__ = make_repr(foo=lambda obj: obj.blah + 100500)
386,592
def _filter_valid_arguments(self, arguments, binary="mongod", config=False): if self.args and self.args[]: binary = os.path.join(self.args[], binary) ret = (subprocess.Popen([ % binary, ], stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=False)) out, err = ret.communicate() accepted_arguments = [] for line in [option for option in out.decode().split()]: line = line.lstrip() if line.startswith(): argument = line.split()[0] result.append(arg) return .join(result)
Return a list of accepted arguments. Check which arguments in list are accepted by the specified binary (mongod, mongos). If an argument does not start with '-' but its preceding argument was accepted, then it is accepted as well. Example ['--slowms', '1000'] both arguments would be accepted for a mongod.
386,593
def _notify_new_tick_event(self, tick): if tick.time is : return event = Event(type_=EVENT_TINY_TICK) event.dict_[] = tick self._event_engine.put(event)
tick推送
386,594
def _flush_data(self): if self._data and self._data.modified: hookenv.relation_set(self.relation_id, dict(self.to_publish.data))
If this relation's local unit data has been modified, publish it on the relation. This should be automatically called.
386,595
def accept(self): result = dict(self.python_data) for field in self.fields: if field.writable: result.update(field.accept()) else: field.set_raw_value(self.form.raw_data, field.from_python(result[field.name])) self.clean_value = self.conv.accept(result) return {self.name: self.clean_value}
Accepts all children fields, collects resulting values into dict and passes that dict to converter. Returns result of converter as separate value in parent `python_data`
386,596
def edit_wiki_page(self, subreddit, page, content, reason=): data = {: content, : page, : six.text_type(subreddit), : reason} evict = self.config[].format( subreddit=six.text_type(subreddit), page=page.lower()) self.evict(evict) return self.request_json(self.config[], data=data)
Create or edit a wiki page with title `page` for `subreddit`. :returns: The json response from the server.
386,597
def get_span_column_count(span): columns = 1 first_column = span[0][1] for i in range(len(span)): if span[i][1] > first_column: columns += 1 first_column = span[i][1] return columns
Find the length of a colspan. Parameters ---------- span : list of lists of int The [row, column] pairs that make up the span Returns ------- columns : int The number of columns included in the span Example ------- Consider this table:: +------+------------------+ | foo | bar | +------+--------+---------+ | spam | goblet | berries | +------+--------+---------+ :: >>> span = [[0, 1], [0, 2]] >>> print(get_span_column_count(span)) 2
386,598
def verify(expr, params=None): try: compile(expr, params=params) return True except com.TranslationError: return False
Determine if expression can be successfully translated to execute on MapD
386,599
def flavor_list_paged(request, is_public=True, get_extras=False, marker=None, paginate=False, sort_key="name", sort_dir="desc", reversed_order=False): has_more_data = False has_prev_data = False if paginate: if reversed_order: sort_dir = if sort_dir == else page_size = utils.get_page_size(request) flavors = _nova.novaclient(request).flavors.list(is_public=is_public, marker=marker, limit=page_size + 1, sort_key=sort_key, sort_dir=sort_dir) flavors, has_more_data, has_prev_data = update_pagination( flavors, page_size, marker, reversed_order) else: flavors = _nova.novaclient(request).flavors.list(is_public=is_public) if get_extras: for flavor in flavors: flavor.extras = flavor_get_extras(request, flavor.id, True, flavor) return (flavors, has_more_data, has_prev_data)
Get the list of available instance sizes (flavors).