Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
6,100
def get_identity_provider(provider_id): try: from third_party_auth.provider import Registry except ImportError as exception: LOGGER.warning("Could not import Registry from third_party_auth.provider") LOGGER.warning(exception) Registry = None try: return Registry and Registry.get(provider_id) except ValueError: return None
Get Identity Provider with given id. Return: Instance of ProviderConfig or None.
6,101
def windowed_iter(src, size): tees = itertools.tee(src, size) try: for i, t in enumerate(tees): for _ in xrange(i): next(t) except StopIteration: return izip([]) return izip(*tees)
Returns tuples with length *size* which represent a sliding window over iterable *src*. >>> list(windowed_iter(range(7), 3)) [(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)] If the iterable is too short to make a window of length *size*, then no window tuples are returned. >>> list(windowed_iter(range(3), 5)) []
6,102
def _parse_value(self, html_data, field): scheme = PLAYER_SCHEME[field] items = [i.text() for i in html_data(scheme).items()] if len(items) == 0: return None return items
Parse the HTML table to find the requested field's value. All of the values are passed in an HTML table row instead of as individual items. The values need to be parsed by matching the requested attribute with a parsing scheme that sports-reference uses to differentiate stats. This function returns a single value for the given attribute. Parameters ---------- html_data : string A string containing all of the rows of stats for a given team. If multiple tables are being referenced, this will be comprised of multiple rows in a single string. field : string The name of the attribute to match. Field must be a key in the PLAYER_SCHEME dictionary. Returns ------- list A list of all values that match the requested field. If no value could be found, returns None.
6,103
def send(self, send, expect=None, shutit_pexpect_child=None, timeout=None, check_exit=None, fail_on_empty_before=True, record_command=True, exit_values=None, echo=None, escape=False, retry=3, note=None, assume_gnu=True, follow_on_commands=None, searchwindowsize=None, maxread=None, delaybeforesend=None, secret=False, nonewline=False, background=False, wait=True, block_other_commands=True, loglevel=logging.INFO): shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) ignore_background = not wait return shutit_pexpect_session.send(ShutItSendSpec(shutit_pexpect_session, send, expect=expect, timeout=timeout, check_exit=check_exit, fail_on_empty_before=fail_on_empty_before, record_command=record_command, exit_values=exit_values, echo=echo, escape=escape, retry=retry, note=note, assume_gnu=assume_gnu, loglevel=loglevel, follow_on_commands=follow_on_commands, searchwindowsize=searchwindowsize, maxread=maxread, delaybeforesend=delaybeforesend, secret=secret, nonewline=nonewline, run_in_background=background, ignore_background=ignore_background, block_other_commands=block_other_commands))
Send string as a shell command, and wait until the expected output is seen (either a string or any from a list of strings) before returning. The expected string will default to the currently-set default expected string (see get_default_shutit_pexpect_session_expect) Returns the pexpect return value (ie which expected string in the list matched) @param send: See shutit.ShutItSendSpec @param expect: See shutit.ShutItSendSpec @param shutit_pexpect_child: See shutit.ShutItSendSpec @param timeout: See shutit.ShutItSendSpec @param check_exit: See shutit.ShutItSendSpec @param fail_on_empty_before:See shutit.ShutItSendSpec @param record_command:See shutit.ShutItSendSpec @param exit_values:See shutit.ShutItSendSpec @param echo: See shutit.ShutItSendSpec @param escape: See shutit.ShutItSendSpec @param retry: See shutit.ShutItSendSpec @param note: See shutit.ShutItSendSpec @param assume_gnu: See shutit.ShutItSendSpec @param wait: See shutit.ShutItSendSpec @param block_other_commands: See shutit.ShutItSendSpec.block_other_commands @return: The pexpect return value (ie which expected string in the list matched) @rtype: string
6,104
def masked(name, runtime=False): ret = {: name, : {}, : True, : } if not in __salt__: ret[] = ret[] = False return ret mask_type = if runtime else expected_changes = {mask_type: {: False, : True}} try: if __salt__[](name, runtime): ret[] = .format( name, mask_type, ) return ret if __opts__[]: ret[] = None ret[] = expected_changes ret[] = .format(name, mask_type) return ret __salt__[](name, runtime) if __salt__[](name, runtime): ret[] = expected_changes ret[] = .format(name, mask_type) else: ret[] = .format(name) return ret except CommandExecutionError as exc: ret[] = False ret[] = exc.strerror return ret
.. versionadded:: 2017.7.0 .. note:: This state is only available on minions which use systemd_. Ensures that the named service is masked (i.e. prevented from being started). name Name of the service to mask runtime : False By default, this state will manage an indefinite mask for the named service. Set this argument to ``True`` to runtime mask the service. .. note:: It is possible for a service to have both indefinite and runtime masks set for it. Therefore, this state will manage a runtime or indefinite mask independently of each other. This means that if the service is already indefinitely masked, running this state with ``runtime`` set to ``True`` will _not_ remove the indefinite mask before setting a runtime mask. In these cases, if it is desirable to ensure that the service is runtime masked and not indefinitely masked, pair this state with a :py:func:`service.unmasked <salt.states.service.unmasked>` state, like so: .. code-block:: yaml mask_runtime_foo: service.masked: - name: foo - runtime: True unmask_indefinite_foo: service.unmasked: - name: foo - runtime: False .. _systemd: https://freedesktop.org/wiki/Software/systemd/
6,105
def run(fn, blocksize, seed, c, delta): with open(fn, ) as f: for block in encode.encoder(f, blocksize, seed, c, delta): sys.stdout.buffer.write(block)
Run the encoder until the channel is broken, signalling that the receiver has successfully reconstructed the file
6,106
def ext_pillar(minion_id, pillar, config_file): config_template = None try: config_template = _render_template(config_file) except jinja2.exceptions.TemplateNotFound: log.debug(, config_file) except Exception: log.debug(, config_file, exc_info=True) if not config_template: if result: data = _result_to_dict(data, result, config, source) return data
Execute LDAP searches and return the aggregated data
6,107
async def cancel_handler(message: types.Message, state: FSMContext, raw_state: Optional[str] = None): if raw_state is None: return await state.finish() await message.reply(, reply_markup=types.ReplyKeyboardRemove())
Allow user to cancel any action
6,108
def run(self, tokens): for fn in self._stack: results = [] for i, token in enumerate(tokens): result = fn(token, i, tokens) if not result: continue if isinstance(result, (list, tuple)): results.extend(result) else: results.append(result) tokens = results return tokens
Runs the current list of functions that make up the pipeline against the passed tokens.
6,109
def render(self, context): import markdown content = self.get_content_from_context(context) return markdown.markdown(content)
Render markdown.
6,110
def _getSyntaxByXmlFileName(self, xmlFileName): import qutepart.syntax.loader with self._loadedSyntaxesLock: if not xmlFileName in self._loadedSyntaxes: xmlFilePath = os.path.join(os.path.dirname(__file__), "data", "xml", xmlFileName) syntax = Syntax(self) self._loadedSyntaxes[xmlFileName] = syntax qutepart.syntax.loader.loadSyntax(syntax, xmlFilePath) return self._loadedSyntaxes[xmlFileName]
Get syntax by its xml file name
6,111
def lstring_as_obj(true_or_false=None): if true_or_false is not None: _default_types_status[] = true_or_false numpy.typeDict[u] = numpy.object_ \ if _default_types_status[] \ else % _default_types_status[] return _default_types_status[]
Toggles whether lstrings should be treated as strings or as objects. When FieldArrays is first loaded, the default is True. Parameters ---------- true_or_false : {None|bool} Pass True to map lstrings to objects; False otherwise. If None provided, just returns the current state. Return ------ current_stat : bool The current state of lstring_as_obj. Examples -------- >>> from pycbc.io import FieldArray >>> FieldArray.lstring_as_obj() True >>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')]) FieldArray([(0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,)], dtype=[('foo', 'O')]) >>> FieldArray.lstring_as_obj(False) False >>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')]) FieldArray([('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',)], dtype=[('foo', 'S50')])
6,112
def LogNormSpheres(q, A, mu, sigma, N=1000): Rmin = 0 Rmax = np.exp(mu + 3 * sigma) R = np.linspace(Rmin, Rmax, N + 1)[1:] P = 1 / np.sqrt(2 * np.pi * sigma ** 2 * R ** 2) * np.exp(-(np.log(R) - mu) ** 2 / (2 * sigma ** 2)) def Fsphere_outer(q, R): qR = np.outer(q, R) q1 = np.outer(q, np.ones_like(R)) return 4 * np.pi / q1 ** 3 * (np.sin(qR) - qR * np.cos(qR)) I = (Fsphere_outer(q, R) ** 2 * np.outer(np.ones_like(q), P)) return A * I.sum(1) / P.sum()
Scattering of a population of non-correlated spheres (radii from a log-normal distribution) Inputs: ------- ``q``: independent variable ``A``: scaling factor ``mu``: expectation of ``ln(R)`` ``sigma``: hwhm of ``ln(R)`` Non-fittable inputs: -------------------- ``N``: the (integer) number of spheres Formula: -------- The integral of ``F_sphere^2(q,R) * P(R)`` where ``P(R)`` is a log-normal distribution of the radii.
6,113
def print_loading(self, wait, message): tags = [, , , ] for i in range(wait): time.sleep(0.25) sys.stdout.write("%(message)s... %(tag)s\r" % { : message, : tags[i % 4] }) sys.stdout.flush() pass sys.stdout.write("%s... Done...\n" % message) sys.stdout.flush() pass
print loading message on screen .. note:: loading message only write to `sys.stdout` :param int wait: seconds to wait :param str message: message to print :return: None
6,114
def from_json(cls, json, image_config=None): cls.image_config = image_config return cls(**{ attr: json.get(attr if key is None else key) for attr, key in cls.JSON_MAPPING.items() })
Create a model instance Arguments: json (:py:class:`dict`): The parsed JSON data. image_config (:py:class:`dict`): The API image configuration data. Returns: :py:class:`BaseModel`: The model instance.
6,115
def generate_delete_user_command(username=None, manage_home=None): command = None remove_home = if manage_home else if get_platform() in (, ): command = .format(sudo_check(), LINUX_CMD_USERDEL, remove_home, username) elif get_platform() == : command = .format(sudo_check(), FREEBSD_CMD_PW, remove_home, username) if command: return shlex.split(str(command))
Generate command to delete a user. args: username (str): user name manage_home (bool): manage home directory returns: list: The user delete command string split into shell-like syntax
6,116
def parse_known_chained(self, args=None): ns, remainder = self.parse_known_args(args) kws = vars(ns) return self._parse2subparser_funcs(kws), remainder
Parse the argument directly to the function used for setup This function parses the command line arguments to the function that has been used for the :meth:`setup_args` method. Parameters ---------- args: list The arguments parsed to the :meth:`parse_args` function Returns ------- argparse.Namespace The namespace with mapping from command name to the function return list The remaining arguments that could not be interpreted See also -------- parse_known
6,117
def main(): parser = argparse.ArgumentParser(description=DESCRIPTION) for arg in ARGUMENTS: if "action" in arg: if arg["short"] is not None: parser.add_argument(arg["short"], arg["long"], action=arg["action"], help=arg["help"]) else: parser.add_argument(arg["long"], action=arg["action"], help=arg["help"]) else: if arg["short"] is not None: parser.add_argument(arg["short"], arg["long"], nargs=arg["nargs"], type=arg["type"], default=arg["default"], help=arg["help"]) else: parser.add_argument(arg["long"], nargs=arg["nargs"], type=arg["type"], default=arg["default"], help=arg["help"]) vargs = vars(parser.parse_args()) command = vargs["command"] string = to_unicode_string(vargs["string"]) if command not in COMMAND_MAP: parser.print_help() sys.exit(2) COMMAND_MAP[command](string, vargs) sys.exit(0)
Entry point.
6,118
def inten(function): "Decorator. Attempts to convert return value to int" def wrapper(*args, **kwargs): return coerce_to_int(function(*args, **kwargs)) return wrapper
Decorator. Attempts to convert return value to int
6,119
def NEW_DEBUG_FRAME(self, requestHeader): if self.DEBUG_FLAG: new_frame = [requestHeader, None, None, None] if self._frameCount < self.DEBUG_FRAME_BUFFER_SIZE - 1: self._frameBuffer.append(new_frame) else: self._frameBuffer[0] = new_frame self._frameCount = len(self._frameBuffer) - 1
Initialize a debug frame with requestHeader Frame count is updated and will be attached to respond header The structure of a frame: [requestHeader, statusCode, responseHeader, raw_data] Some of them may be None
6,120
def jwt_verify_token(headers): token = headers.get( current_app.config[] ) if token is None: raise JWTInvalidHeaderError authentication_type = \ current_app.config[] if authentication_type is not None: prefix, token = token.split() if prefix != authentication_type: raise JWTInvalidHeaderError try: decode = jwt_decode_token(token) if current_user.get_id() != decode.get(): raise JWTInvalidIssuer return decode except _JWTDecodeError as exc: raise_from(JWTDecodeError(), exc) except _JWTExpiredToken as exc: raise_from(JWTExpiredToken(), exc)
Verify the JWT token. :param dict headers: The request headers. :returns: The token data. :rtype: dict
6,121
def new_instance(cls, classname): try: return javabridge.static_call( "Lweka/core/Utils;", "forName", "(Ljava/lang/Class;Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/Object;", javabridge.class_for_name("java.lang.Object"), classname, []) except JavaException as e: print("Failed to instantiate " + classname + ": " + str(e)) return None
Creates a new object from the given classname using the default constructor, None in case of error. :param classname: the classname in Java notation (eg "weka.core.DenseInstance") :type classname: str :return: the Java object :rtype: JB_Object
6,122
def _iter_avro_blocks(fo, header, codec, writer_schema, reader_schema): sync_marker = header[] read_block = BLOCK_READERS.get(codec) if not read_block: raise ValueError( % codec) while True: offset = fo.tell() try: num_block_records = read_long(fo) except StopIteration: return block_bytes = read_block(fo) skip_sync(fo, sync_marker) size = fo.tell() - offset yield Block( block_bytes, num_block_records, codec, reader_schema, writer_schema, offset, size )
Return iterator over avro blocks.
6,123
def moving_average_bias_ratio(self, date1, date2): data1 = self.moving_average(date1)[0] data2 = self.moving_average(date2)[0] cal_list = [] for i in range(1, min(len(data1), len(data2)) + 1): cal_list.append(data1[-i] - data2[-i]) cal_list.reverse() cont = self.__cal_continue(cal_list) return cal_list, cont
計算乖離率(均價) date1 - date2 :param int data1: n 日 :param int data2: m 日 :rtype: tuple (序列 舊→新, 持續天數)
6,124
def normalize_keys(suspect, snake_case=True): if not isinstance(suspect, dict): raise TypeError() for key in list(suspect): if not isinstance(key, six.string_types): continue if snake_case: s1 = first_cap_re.sub(r, key) new_key = all_cap_re.sub(r, s1).lower() else: new_key = key.lower() value = suspect.pop(key) if isinstance(value, dict): suspect[new_key] = normalize_keys(value, snake_case) elif isinstance(value, list): for i in range(0, len(value)): if isinstance(value[i], dict): normalize_keys(value[i], snake_case) suspect[new_key] = value else: suspect[new_key] = value return suspect
take a dict and turn all of its type string keys into snake_case
6,125
def _Struct_set_Poly(Poly, pos=None, extent=None, arrayorder=, Type=, Clock=False): Poly = _GG.Poly_Order(Poly, order=, Clock=False, close=True, layout=, Test=True) assert Poly.shape[0]==2, "Arg Poly must be a 2D polygon !" fPfmt = np.ascontiguousarray if arrayorder== else np.asfortranarray NP = Poly.shape[1]-1 P1Max = Poly[:,np.argmax(Poly[0,:])] P1Min = Poly[:,np.argmin(Poly[0,:])] P2Max = Poly[:,np.argmax(Poly[1,:])] P2Min = Poly[:,np.argmin(Poly[1,:])] BaryP = np.sum(Poly[:,:-1],axis=1,keepdims=False)/(Poly.shape[1]-1) BaryL = np.array([(P1Max[0]+P1Min[0])/2., (P2Max[1]+P2Min[1])/2.]) TorP = plg.Polygon(Poly.T) Surf = TorP.area() BaryS = np.array(TorP.center()).flatten() noccur = int(pos.size) Multi = noccur>1 if Type.lower()==: Vol, BaryV = None, None else: Vol, BaryV = _GG.Poly_VolAngTor(Poly) msg = "Pb. with volume computation for Ves object of type !" assert Vol>0., msg Vect = np.diff(Poly,n=1,axis=1) Vect = fPfmt(Vect) Vin = np.array([Vect[1,:],-Vect[0,:]]) if not _GG.Poly_isClockwise(Poly): Vin = -Vin Vin = Vin/np.hypot(Vin[0,:],Vin[1,:])[np.newaxis,:] Vin = fPfmt(Vin) poly = _GG.Poly_Order(Poly, order=arrayorder, Clock=Clock, close=False, layout=, Test=True) circC = BaryS r = np.sqrt(np.sum((poly-circC[:,np.newaxis])**2,axis=0)) circr = np.max(r) dout = {:poly, :pos, :extent, :noccur, :Multi, :NP, :P1Max, :P1Min, :P2Max, :P2Min, :BaryP, :BaryL, :BaryS, :BaryV, :Surf, :Vol, :Vect, :Vin, :circC, :circr, :Clock} return dout
Compute geometrical attributes of a Struct object
6,126
def get_group_gn(dim, dim_per_gp, num_groups): assert dim_per_gp == -1 or num_groups == -1, \ "GroupNorm: can only specify G or C/G." if dim_per_gp > 0: assert dim % dim_per_gp == 0, \ "dim: {}, dim_per_gp: {}".format(dim, dim_per_gp) group_gn = dim // dim_per_gp else: assert dim % num_groups == 0, \ "dim: {}, num_groups: {}".format(dim, num_groups) group_gn = num_groups return group_gn
get number of groups used by GroupNorm, based on number of channels.
6,127
def get_device_mac(self) -> str: output, _ = self._execute( , self.device_sn, , , ) return output.strip()
Show device MAC.
6,128
def run_once(func): def _inner(*args, **kwargs): if func.__name__ in CTX.run_once: LOGGER.info(, func.__name__) return CTX.run_once[func.__name__] LOGGER.info(, func.__name__) result = func(*args, **kwargs) CTX.run_once[func.__name__] = result return result return _inner
Simple decorator to ensure a function is ran only once
6,129
def _init_vocab(self, analyzed_docs): class SetAccum(AccumulatorParam): def zero(self, initialValue): return set(initialValue) def addInPlace(self, v1, v2): v1 |= v2 return v1 if not self.fixed_vocabulary_: accum = analyzed_docs._rdd.context.accumulator(set(), SetAccum()) analyzed_docs.foreach( lambda x: accum.add(set(chain.from_iterable(x)))) vocabulary = {t: i for i, t in enumerate(accum.value)} else: vocabulary = self.vocabulary_ if not vocabulary: raise ValueError("empty vocabulary; perhaps the documents only" " contain stop words") return vocabulary
Create vocabulary
6,130
def stalta_pick(stream, stalen, ltalen, trig_on, trig_off, freqmin=False, freqmax=False, debug=0, show=False): event = Event() event.origins.append(Origin()) event.creation_info = CreationInfo(author=, creation_time=UTCDateTime()) event.comments.append(Comment(text=)) picks = [] for tr in stream: if tr.stats.channel[-1] == : phase = else: phase = if freqmin and freqmax: tr.detrend() tr.filter(, freqmin=freqmin, freqmax=freqmax, corners=3, zerophase=True) df = tr.stats.sampling_rate cft = classic_sta_lta(tr.data, int(stalen * df), int(ltalen * df)) if debug > 3: plot_trigger(tr, cft, trig_on, trig_off) triggers = trigger_onset(cft, trig_on, trig_off) for trigger in triggers: on = tr.stats.starttime + (trigger[0] / df) wav_id = WaveformStreamID(station_code=tr.stats.station, channel_code=tr.stats.channel, network_code=tr.stats.network) p = Pick(waveform_id=wav_id, phase_hint=phase, time=on) if debug > 2: print() print(p) picks.append(p) pick_stations = list(set([pick.waveform_id.station_code for pick in picks])) for pick_station in pick_stations: station_picks = [pick for pick in picks if pick.waveform_id.station_code == pick_station] p_time = [pick.time for pick in station_picks if pick.phase_hint == ] s_time = [pick.time for pick in station_picks if pick.phase_hint == ] if p_time > s_time: p_pick = [pick for pick in station_picks if pick.phase_hint == ] for pick in p_pick: print() picks.remove(pick) if show: plotting.pretty_template_plot(stream, picks=picks, title=, size=(8, 9)) event.picks = picks if len(event.picks) > 0: event.origins[0].time = min([pick.time for pick in event.picks]) - 1 return event
Basic sta/lta picker, suggest using alternative in obspy. Simple sta/lta (short-term average/long-term average) picker, using obspy's :func:`obspy.signal.trigger.classic_sta_lta` routine to generate the characteristic function. Currently very basic quick wrapper, there are many other (better) options in obspy in the :mod:`obspy.signal.trigger` module. :type stream: obspy.core.stream.Stream :param stream: The stream to pick on, can be any number of channels. :type stalen: float :param stalen: Length of the short-term average window in seconds. :type ltalen: float :param ltalen: Length of the long-term average window in seconds. :type trig_on: float :param trig_on: sta/lta ratio to trigger a detection/pick :type trig_off: float :param trig_off: sta/lta ratio to turn the trigger off - no further picks\ will be made between exceeding trig_on until trig_off is reached. :type freqmin: float :param freqmin: Low-cut frequency in Hz for bandpass filter :type freqmax: float :param freqmax: High-cut frequency in Hz for bandpass filter :type debug: int :param debug: Debug output level from 0-5. :type show: bool :param show: Show picks on waveform. :returns: :class:`obspy.core.event.event.Event` .. rubric:: Example >>> from obspy import read >>> from eqcorrscan.utils.picker import stalta_pick >>> st = read() >>> event = stalta_pick(st, stalen=0.2, ltalen=4, trig_on=10, ... trig_off=1, freqmin=3.0, freqmax=20.0) >>> print(event.creation_info.author) EQcorrscan .. warning:: This function is not designed for accurate picking, rather it can give a first idea of whether picks may be possible. Proceed with caution.
6,131
def get_panels(config): task = TaskPanels(config) task.execute() task = TaskPanelsMenu(config) task.execute() logging.info("Panels creation finished!")
Execute the panels phase :param config: a Mordred config object
6,132
def create_xz(archive, compression, cmd, verbosity, interactive, filenames): return _create(archive, compression, cmd, , verbosity, filenames)
Create an XZ archive with the lzma Python module.
6,133
def register(self, model, **attr): metadata = self.metadata if not isinstance(model, Table): model_name = self._create_model(model, **attr) if not model_name: return model, name = model_name table = model.__table__ self._declarative_register[name] = model if name in self._bases: for model in self._bases.pop(name): self.register(model) else: table = model.tometadata(metadata) model = table engine = None label = table.info.get() keys = ( % (label, table.key), label, None) if label else (None,) for key in keys: engine = self.get_engine(key) if engine: break assert engine self.binds[table] = engine return model
Register a model or a table with this mapper :param model: a table or a :class:`.BaseModel` class :return: a Model class or a table
6,134
def incrementSub(self, amount=1): self._subProgressBar.setValue(self.subValue() + amount) QApplication.instance().processEvents()
Increments the sub-progress bar by amount.
6,135
def nth_combination(iterable, r, index): pool = tuple(iterable) n = len(pool) if (r < 0) or (r > n): raise ValueError c = 1 k = min(r, n - r) for i in range(1, k + 1): c = c * (n - k + i) // i if index < 0: index += c if (index < 0) or (index >= c): raise IndexError result = [] while r: c, n, r = c * r // n, n - 1, r - 1 while index >= c: index -= c c, n = c * (n - r) // n, n - 1 result.append(pool[-1 - n]) return tuple(result)
Equivalent to ``list(combinations(iterable, r))[index]``. The subsequences of *iterable* that are of length *r* can be ordered lexicographically. :func:`nth_combination` computes the subsequence at sort position *index* directly, without computing the previous subsequences.
6,136
def nl_socket_add_memberships(sk, *group): if sk.s_fd == -1: return -NLE_BAD_SOCK for grp in group: if not grp: break if grp < 0: return -NLE_INVAL try: sk.socket_instance.setsockopt(SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, grp) except OSError as exc: return -nl_syserr2nlerr(exc.errno) return 0
Join groups. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/socket.c#L417 Joins the specified groups using the modern socket option. The list of groups has to be terminated by 0. Make sure to use the correct group definitions as the older bitmask definitions for nl_join_groups() are likely to still be present for backward compatibility reasons. Positional arguments: sk -- Netlink socket (nl_sock class instance). group -- group identifier (integer). Returns: 0 on success or a negative error code.
6,137
def set_inputs(self, inputs): if len(inputs) != len(self.inputs): raise RuntimeError( "Number of inputs {0:d} does not match number of input nodes {1:d}".format( len(inputs), len(self.inputs))) for i, v in zip(self.inputs, inputs): self.input_values[i] = v
Assign input voltages.
6,138
def ping(self, destination, length=20): print % self.port print %destination try: cmd = % (destination, str(length)) print cmd self._sendline(cmd) self._expect(cmd) time.sleep(1) except Exception, e: ModuleHelper.WriteIntoDebugLogger("ping() Error: " + str(e))
send ICMPv6 echo request with a given length to a unicast destination address Args: destination: the unicast destination address of ICMPv6 echo request length: the size of ICMPv6 echo request payload
6,139
def create_node(participant_id): exp = experiment(session) try: participant = models.Participant.\ query.filter_by(id=participant_id).one() except NoResultFound: return error_response(error_type="/node POST no participant found", status=403) check_for_duplicate_assignments(participant) if participant.status != "working": error_type = "/node POST, status = {}".format(participant.status) return error_response(error_type=error_type, participant=participant) try: network = exp.get_network_for_participant(participant=participant) if network is None: return Response(dumps({"status": "error"}), status=403) node = exp.create_node( participant=participant, network=network) assign_properties(node) exp.add_node_to_network( node=node, network=network) session.commit() exp.node_post_request(participant=participant, node=node) session.commit() except: return error_response(error_type="/node POST server error", status=403, participant=participant) return success_response(field="node", data=node.__json__(), request_type="/node POST")
Send a POST request to the node table. This makes a new node for the participant, it calls: 1. exp.get_network_for_participant 2. exp.create_node 3. exp.add_node_to_network 4. exp.node_post_request
6,140
def to_delete(datetimes, years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, firstweekday=SATURDAY, now=None): datetimes = set(datetimes) return datetimes - to_keep(datetimes, years=years, months=months, weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds, firstweekday=firstweekday, now=now)
Return a set of datetimes that should be deleted, out of ``datetimes``. See ``to_keep`` for a description of arguments.
6,141
def pfunc(func): if isinstance(func, np.ufunc): return pufunc(func) elif not inspect.isfunction(func): if func.__name__ == : raise ValueError( ) try: return pfunc(func.__call__) except: cls, inst, tb = sys.exc_info() inst = cls( % (func, inst.message)) six.reraise(cls, inst, tb) fargs, fdefaults = get_signature(func) n_fargs = len(fargs) def dtrm_generator(*args, **kwds): name = func.__name__ + + .join([str(arg) for arg in list(args) + list(kwds.values())]) + doc_str = % ( func.__name__, .join([str(arg) for arg in args]), .join([ % (key, str(val)) for key, val in six.iteritems(kwds)])) parents = {} varargs = [] for kwd, val in six.iteritems(kwds): parents[kwd] = val for i in xrange(len(args)): if i < n_fargs: parents[fargs[i]] = args[i] else: varargs.append(args[i]) if len(varargs) == 0: eval_fun = func else: parents[] = varargs def wrapper(**wkwds_in): wkwds = copy(wkwds_in) wargs = [] for arg in fargs: wargs.append(wkwds.pop(arg)) wargs.extend(wkwds.pop()) return func(*wargs, **wkwds) eval_fun = wrapper return pm.Deterministic( eval_fun, doc_str, name, parents, trace=False, plot=False) dtrm_generator.__name__ = func.__name__ + dtrm_generator.__doc__ = % (func.__name__, * 60, func.__doc__) return dtrm_generator
pf = pfunc(func) Returns a function that can be called just like func; however its arguments may be PyMC objects or containers of PyMC objects, and its return value will be a deterministic. Example: >>> A = pymc.Normal('A',0,1,size=10) >>> pprod = pymc.pfunc(numpy.prod) >>> B = pprod(A, axis=0) >>> B <pymc.PyMCObjects.Deterministic 'prod(A_0)' at 0x3ce49b0> >>> B.value -0.0049333289649554912 >>> numpy.prod(A.value) -0.0049333289649554912
6,142
def fuller_scaling(target, DABo, To, Po, temperature=, pressure=): r Ti = target[temperature] Pi = target[pressure] value = DABo*(Ti/To)**1.75*(Po/Pi) return value
r""" Uses Fuller model to adjust a diffusion coefficient for gases from reference conditions to conditions of interest Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. DABo : float, array_like Diffusion coefficient at reference conditions Po, To : float, array_like Pressure & temperature at reference conditions, respectively pressure : string The dictionary key containing the pressure values in Pascals (Pa) temperature : string The dictionary key containing the temperature values in Kelvin (K)
6,143
def _termination_callback(self, returncode): if self.started: log.info("QEMU process has stopped, return code: %d", returncode) yield from self.stop() if returncode != 0 and (returncode != 1 or not sys.platform.startswith("win")): self.project.emit("log.error", {"message": "QEMU process has stopped, return code: {}\n{}".format(returncode, self.read_stdout())})
Called when the process has stopped. :param returncode: Process returncode
6,144
def get_between_ngrams(c, attrib="words", n_min=1, n_max=1, lower=True): if len(c) != 2: raise ValueError("Only applicable to binary Candidates") span0 = _to_span(c[0]) span1 = _to_span(c[1]) if span0.sentence != span1.sentence: raise ValueError( "Only applicable to Candidates where both spans are \ from the same immediate Context." ) distance = abs(span0.get_word_start_index() - span1.get_word_start_index()) if span0.get_word_start_index() < span1.get_word_start_index(): for ngram in get_right_ngrams( span0, window=distance - 1, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower, ): yield ngram else: for ngram in get_right_ngrams( span1, window=distance - 1, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower, ): yield ngram
Return the ngrams *between* two unary Mentions of a binary-Mention Candidate. Get the ngrams *between* two unary Mentions of a binary-Mention Candidate, where both share the same sentence Context. :param c: The binary-Mention Candidate to evaluate. :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If 'True', all ngrams will be returned in lower case :rtype: a *generator* of ngrams
6,145
def _generate_comparator(cls, field_names): field_names = list(field_names) reverses = [1] * len(field_names) for i, field_name in enumerate(field_names): if field_name[0] == : reverses[i] = -1 field_names[i] = field_name[1:] field_names = [f.replace(LOOKUP_SEP, ) for f in field_names] def comparator(i1, i2): v1 = attrgetter(*field_names)(i1) v2 = attrgetter(*field_names)(i2) if len(field_names) == 1: return cls._cmp(v1, v2) * reverses[0] order = multiply_iterables(list(map(cls._cmp, v1, v2)), reverses) try: return next(dropwhile(__not__, order)) except StopIteration: return 0 return comparator
Construct a comparator function based on the field names. The comparator returns the first non-zero comparison value. Inputs: field_names (iterable of strings): The field names to sort on. Returns: A comparator function.
6,146
def kline_echarts(self, code=None): def kline_formater(param): return param.name + + vars(param) if code is None: path_name = + os.sep + + self.type + \ + self.if_fq + kline = Kline( + self.if_fq + + self.type, width=1360, height=700, page_title= ) bar = Bar() data_splits = self.splits() for ds in data_splits: data = [] axis = [] if ds.type[-3:] == : datetime = np.array(ds.date.map(str)) else: datetime = np.array(ds.datetime.map(str)) ohlc = np.array( ds.data.loc[:, [, , , ]] ) kline.add( ds.code[0], datetime, ohlc, mark_point=["max", "min"], is_datazoom_show=True, datazoom_orient= ) return kline else: data = [] axis = [] ds = self.select_code(code) data = [] if self.type[-3:] == : datetime = np.array(ds.date.map(str)) else: datetime = np.array(ds.datetime.map(str)) ohlc = np.array(ds.data.loc[:, [, , , ]]) vol = np.array(ds.volume) kline = Kline( .format(code, self.if_fq, self.type), width=1360, height=700, page_title= ) bar = Bar() kline.add(self.code, datetime, ohlc, mark_point=["max", "min"], is_datazoom_show=True, is_xaxis_show=False, tooltip_formatter=, datazoom_orient=) bar.add( self.code, datetime, vol, is_datazoom_show=True, datazoom_xaxis_index=[0, 1] ) grid = Grid(width=1360, height=700, page_title=) grid.add(bar, grid_top="80%") grid.add(kline, grid_bottom="30%") return grid
plot the market_data
6,147
def pprint(self): strings = [] for key in sorted(self.keys()): values = self[key] for value in values: strings.append("%s=%s" % (key, value)) return "\n".join(strings)
Print tag key=value pairs.
6,148
def post_process(self, tagnum2name): for tag, value in self.raw_ifd.items(): try: tag_name = tagnum2name[tag] except KeyError: warnings.warn(msg, UserWarning) tag_name = tag self.processed_ifd[tag_name] = value
Map the tag name instead of tag number to the tag value.
6,149
def write_source_description( self, capability_lists=None, outfile=None, links=None): rsd = SourceDescription(ln=links) rsd.pretty_xml = self.pretty_xml if (capability_lists is not None): for uri in capability_lists: rsd.add_capability_list(uri) if (outfile is None): print(rsd.as_xml()) else: rsd.write(basename=outfile)
Write a ResourceSync Description document to outfile or STDOUT.
6,150
def parseFASTACommandLineOptions(args): if not (args.fasta or args.fastq or args.fasta_ss): args.fasta = True readClass = readClassNameToClass[args.readClass] if args.fasta: from dark.fasta import FastaReads return FastaReads(args.fastaFile, readClass=readClass) elif args.fastq: from dark.fastq import FastqReads return FastqReads(args.fastaFile, readClass=readClass) else: from dark.fasta_ss import SSFastaReads return SSFastaReads(args.fastaFile, readClass=readClass)
Examine parsed command-line options and return a Reads instance. @param args: An argparse namespace, as returned by the argparse C{parse_args} function. @return: A C{Reads} subclass instance, depending on the type of FASTA file given.
6,151
def _set_default_cfg_profile(self): try: cfgplist = self.config_profile_list() if self.default_cfg_profile not in cfgplist: self.default_cfg_profile = ( if self._is_iplus else ) except dexc.DfaClientRequestFailed: LOG.error("Failed to send request to DCNM.") self.default_cfg_profile =
Set default network config profile. Check whether the default_cfg_profile value exist in the current version of DCNM. If not, set it to new default value which is supported by latest version.
6,152
def _get_samples_to_process(fn, out_dir, config, force_single, separators): out_dir = os.path.abspath(out_dir) samples = defaultdict(list) with open(fn) as handle: for l in handle: if l.find("description") > 0: logger.info("Skipping header.") continue cols = l.strip().split(",") if len(cols) > 0: if len(cols) < 2: raise ValueError("Line needs 2 values: file and name.") if utils.file_exists(cols[0]) or is_gsm(cols[0]) or is_srr(cols[0]): if cols[0].find(" ") > -1: new_name = os.path.abspath(cols[0].replace(" ", "_")) logger.warning("Space finds in %s. Linked to %s." % (cols[0], new_name)) logger.warning("Please, avoid names with spaces in the future.") utils.symlink_plus(os.path.abspath(cols[0]), new_name) cols[0] = new_name samples[cols[1]].append(cols) else: logger.info("skipping %s, File doesnfilesout_filefnannoconfignameout_dir': out_dir}] return [samples[sample] for sample in samples]
parse csv file with one line per file. It will merge all files that have the same description name
6,153
def force_hashable(obj, recursive=True): if hasattr(obj, ) and not hasattr(obj, ) and not hasattr(obj, ): try: hash(obj) return obj except (IndexError, ValueError, AttributeError, TypeError): pass if hasattr(obj, ): if hasattr(obj, ) and hasattr(obj, ): return force_hashable(tuple(obj.items())) if recursive: return tuple(force_hashable(item) for item in obj) return tuple(obj) return str(obj)
Force frozenset() command to freeze the order and contents of mutables and iterables like lists, dicts, generators Useful for memoization and constructing dicts or hashtables where keys must be immutable. FIXME: Rename function because "hashable" is misleading. A better name might be `force_immutable`. because some hashable objects (generators) are tuplized by this function `tuplized` is probably a better name, but strings are left alone, so not quite right >>> force_hashable([1,2.,['3','four'],'five', {'s': 'ix'}]) (1, 2.0, ('3', 'four'), 'five', (('s', 'ix'),)) >>> force_hashable(i for i in range(4)) (0, 1, 2, 3) >>> sorted(force_hashable(Counter('abbccc'))) == [('a', 1), ('b', 2), ('c', 3)] True
6,154
def _legal_operations(self, model, tabu_list=[], max_indegree=None): local_score = self.scoring_method.local_score nodes = self.state_names.keys() potential_new_edges = (set(permutations(nodes, 2)) - set(model.edges()) - set([(Y, X) for (X, Y) in model.edges()])) for (X, Y) in potential_new_edges: if nx.is_directed_acyclic_graph(nx.DiGraph(list(model.edges()) + [(X, Y)])): operation = (, (X, Y)) if operation not in tabu_list: old_parents = model.get_parents(Y) new_parents = old_parents + [X] if max_indegree is None or len(new_parents) <= max_indegree: score_delta = local_score(Y, new_parents) - local_score(Y, old_parents) yield(operation, score_delta) for (X, Y) in model.edges(): operation = (, (X, Y)) if operation not in tabu_list: old_parents = model.get_parents(Y) new_parents = old_parents[:] new_parents.remove(X) score_delta = local_score(Y, new_parents) - local_score(Y, old_parents) yield(operation, score_delta) for (X, Y) in model.edges(): new_edges = list(model.edges()) + [(Y, X)] new_edges.remove((X, Y)) if nx.is_directed_acyclic_graph(nx.DiGraph(new_edges)): operation = (, (X, Y)) if operation not in tabu_list and (, (Y, X)) not in tabu_list: old_X_parents = model.get_parents(X) old_Y_parents = model.get_parents(Y) new_X_parents = old_X_parents + [Y] new_Y_parents = old_Y_parents[:] new_Y_parents.remove(X) if max_indegree is None or len(new_X_parents) <= max_indegree: score_delta = (local_score(X, new_X_parents) + local_score(Y, new_Y_parents) - local_score(X, old_X_parents) - local_score(Y, old_Y_parents)) yield(operation, score_delta)
Generates a list of legal (= not in tabu_list) graph modifications for a given model, together with their score changes. Possible graph modifications: (1) add, (2) remove, or (3) flip a single edge. For details on scoring see Koller & Fridman, Probabilistic Graphical Models, Section 18.4.3.3 (page 818). If a number `max_indegree` is provided, only modifications that keep the number of parents for each node below `max_indegree` are considered.
6,155
def prov(self): if self._prov: return self._prov elif not self.abstract: return self.read_prov() raise EmptyDocumentException()
Provenance stored for this document as :py:class:`prov.model.ProvDocument`
6,156
def read_math_env(src, expr): r content = src.forward_until(lambda s: s == expr.end) if not src.startswith(expr.end): end = src.peek() explanation = % end if end else raise EOFError( % (expr.end, explanation)) else: src.forward(1) expr.append(content) return expr
r"""Read the environment from buffer. Advances the buffer until right after the end of the environment. Adds parsed content to the expression automatically. :param Buffer src: a buffer of tokens :param TexExpr expr: expression for the environment :rtype: TexExpr
6,157
def _change_sample_name(in_file, sample_name, data=None): out_file = append_stem(in_file, "_fixed") with file_transaction(data, out_file) as tx_out: with open(tx_out, "w") as out_handle: with open(in_file) as in_handle: for line in in_handle: if line.startswith("Status"): line = "Status\t%s.bam" % sample_name out_handle.write("%s\n" % line.strip()) return out_file
Fix name in feature counts log file to get the same name in multiqc report.
6,158
def fullname(self): by_object = self.reddit_session.config.by_object return .format(by_object[self.__class__], self.id)
Return the object's fullname. A fullname is an object's kind mapping like `t3` followed by an underscore and the object's base36 id, e.g., `t1_c5s96e0`.
6,159
def safe_process_files(path, files, args, state): for fn in files: full_fn = os.path.join(path, fn) try: if not process_file(path, fn, args, state): return False except Exception, e: sys.stderr.write("error: %s\n%s\n" % (os.path.join(path, fn), traceback.format_exc())) state.log_failed(full_fn) if state.should_quit(): return False return True
Process a number of files in a directory. Catches any exception from the processing and checks if we should fail directly or keep going.
6,160
def add_commands(self): self.parser.add_argument( , action="count", **self.config.default.debug.get_arg_parse_arguments())
You can override this method in order to add your command line arguments to the argparse parser. The configuration file was reloaded at this time.
6,161
def flatten(iterable): return itertools.chain.from_iterable(a if isinstance(a,Iterable) and not isinstance(a, str) else [a] for a in iterable)
This function allows a simple a way to iterate over a "complex" iterable, for example, if the input [12, [23], (4, 3), "lkjasddf"], this will return an Iterable that returns 12, 23, 4, 3 and "lkjasddf". Args: iterable (Iterable) - A complex iterable that will be flattened Returns: (Iterable): An Iterable that flattens multiple interables
6,162
def outputs_of(self, partition_index): partition = self.partition[partition_index] outputs = set(partition).intersection(self.output_indices) return tuple(sorted(outputs))
The outputs of the partition at ``partition_index``. Note that this returns a tuple of element indices, since coarse- grained blackboxes may have multiple outputs.
6,163
def euclideanDistance(instance1, instance2, considerDimensions): distance = 0 for x in considerDimensions: distance += pow((instance1[x] - instance2[x]), 2) return math.sqrt(distance)
Calculate Euclidean Distance between two samples Example use: data1 = [2, 2, 2, 'class_a'] data2 = [4, 4, 4, 'class_b'] distance = euclideanDistance(data1, data2, 3) :param instance1: list of attributes :param instance2: list of attributes :param considerDimensions: a list of dimensions to consider :return: float euclidean distance between data1 & 2
6,164
def build_catalog(site, datasets, format=None): site_url = url_for(, _external=True) catalog_url = url_for(, _external=True) graph = Graph(namespace_manager=namespace_manager) catalog = graph.resource(URIRef(catalog_url)) catalog.set(RDF.type, DCAT.Catalog) catalog.set(DCT.title, Literal(site.title)) catalog.set(DCT.language, Literal(current_app.config[])) catalog.set(FOAF.homepage, URIRef(site_url)) publisher = graph.resource(BNode()) publisher.set(RDF.type, FOAF.Organization) publisher.set(FOAF.name, Literal(current_app.config[])) catalog.set(DCT.publisher, publisher) for dataset in datasets: catalog.add(DCAT.dataset, dataset_to_rdf(dataset, graph)) if isinstance(datasets, Paginable): if not format: raise ValueError() catalog.add(RDF.type, HYDRA.Collection) catalog.set(HYDRA.totalItems, Literal(datasets.total)) kwargs = { : format, : datasets.page_size, : True, } first_url = url_for(, page=1, **kwargs) page_url = url_for(, page=datasets.page, **kwargs) last_url = url_for(, page=datasets.pages, **kwargs) pagination = graph.resource(URIRef(page_url)) pagination.set(RDF.type, HYDRA.PartialCollectionView) pagination.set(HYDRA.first, URIRef(first_url)) pagination.set(HYDRA.last, URIRef(last_url)) if datasets.has_next: next_url = url_for(, page=datasets.page + 1, **kwargs) pagination.set(HYDRA.next, URIRef(next_url)) if datasets.has_prev: prev_url = url_for(, page=datasets.page - 1, **kwargs) pagination.set(HYDRA.previous, URIRef(prev_url)) catalog.set(HYDRA.view, pagination) return catalog
Build the DCAT catalog for this site
6,165
def write_block_data(self, i2c_addr, register, data, force=None): length = len(data) if length > I2C_SMBUS_BLOCK_MAX: raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX) self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA ) msg.data.contents.block[0] = length msg.data.contents.block[1:length + 1] = data ioctl(self.fd, I2C_SMBUS, msg)
Write a block of byte data to a given register. :param i2c_addr: i2c address :type i2c_addr: int :param register: Start register :type register: int :param data: List of bytes :type data: list :param force: :type force: Boolean :rtype: None
6,166
def compare(self, textOrFingerprint1, textOrFingerprint2): compareList = [self._createDictionary(textOrFingerprint1), self._createDictionary(textOrFingerprint2)] metric = self._fullClient.compare(json.dumps(compareList)) return metric.cosineSimilarity
Returns the semantic similarity of texts or fingerprints. Each argument can be eiter a text or a fingerprint. Args: textOrFingerprint1, str OR list of integers textOrFingerprint2, str OR list of integers Returns: float: the semantic similarity in the range [0;1] Raises: CorticalioException: if the request was not successful
6,167
def sign(self, signer: Signer): message_data = self._data_to_sign() self.signature = signer.sign(data=message_data)
Sign message using signer.
6,168
def difference(self, boolean_switches): ops = SimStateOptions(self) for key in boolean_switches: ops.discard(key) return ops
[COMPATIBILITY] Make a copy of the current instance, and then discard all options that are in boolean_switches. :param set boolean_switches: A collection of Boolean switches to disable. :return: A new SimStateOptions instance.
6,169
def open_macros(self, filepath): try: wx.BeginBusyCursor() self.main_window.grid.Disable() with open(filepath) as macro_infile: self.main_window.grid.actions.enter_safe_mode() post_command_event(self.main_window, self.SafeModeEntryMsg) post_command_event(self.main_window, self.ContentChangedMsg) macrocode = macro_infile.read() self.grid.code_array.macros += "\n" + macrocode.strip("\n") self.grid.main_window.macro_panel.codetext_ctrl.SetText( self.grid.code_array.macros) except IOError: msg = _("Error opening file {filepath}.").format(filepath=filepath) post_command_event(self.main_window, self.StatusBarMsg, text=msg) return False finally: self.main_window.grid.Enable() wx.EndBusyCursor() try: post_command_event(self.main_window, self.ContentChangedMsg) except TypeError: pass
Loads macros from file and marks grid as changed Parameters ---------- filepath: String \tPath to macro file
6,170
def fetch_path(self, name): with codecs.open(self.lookup_path(name), encoding=) as fd: return fd.read()
Fetch contents from the path retrieved via lookup_path. No caching will be done.
6,171
def begin(self): resp = ws.ws2811_init(self._leds) if resp != 0: str_resp = ws.ws2811_get_return_t_str(resp) raise RuntimeError(.format(resp, str_resp))
Initialize library, must be called once before other functions are called.
6,172
def resolver(schema): name = schema.__name__ if name.endswith("Schema"): return name[:-6] or name return name
Default implementation of a schema name resolver function
6,173
def add_worksheet(self, name=None): url = self.build_url(self._endpoints.get()) response = self.session.post(url, data={: name} if name else None) if not response: return None data = response.json() return self.worksheet_constructor(parent=self, **{self._cloud_data_key: data})
Adds a new worksheet
6,174
def restore(self, remotepath): rpath = get_pcs_path(remotepath) pars = { : } self.pd("Searching for fs_id to restore") return self.__get(pcsurl + , pars, self.__restore_search_act, rpath)
Usage: restore <remotepath> - \ restore a file from the recycle bin remotepath - the remote path to restore
6,175
def Default(self, *statements): assert self.parentStm is None self.rank += 1 self.default = [] self._register_stements(statements, self.default) return self
c-like default of switch statement
6,176
def mvn(*args, **kwargs): return tfd.Independent(tfd.Normal(*args, **kwargs), reinterpreted_batch_ndims=1)
Convenience function to efficiently construct a MultivariateNormalDiag.
6,177
async def deaths(self, root): return { elem.get(): float(elem.text) for elem in root.find() }
Causes of death in the nation, as percentages. Returns ------- an :class:`ApiQuery` of dict with keys of str and values of float
6,178
def scan_cnproxy(self): self.logger.info( ) response = requests.get() soup = BeautifulSoup(response.content, ) tables = soup.find_all(, class_=) for table in tables: for tr in table.tbody.find_all(): info = tr.find_all() addr = .format(info[0].string, info[1].string) self.proxy_queue.put({: addr, : })
Scan candidate (mainland) proxies from http://cn-proxy.com
6,179
def get_instantiated_service(self, name): if name not in self.instantiated_services: raise UninstantiatedServiceException return self.instantiated_services[name]
Get instantiated service by name
6,180
def close(self): for c in self.cursors: c.close() self.cursors = [] self.impl = None
Close the connection and all associated cursors. This will implicitly roll back any uncommitted operations.
6,181
def manual_configure(): print("Manual configuring jackal") mapping = { : , : } config = Config() host = input_with_default("What is the Elasticsearch host?", config.get(, )) config.set(, , host) if input_with_default("Use SSL?", mapping[config.get(, )]) == : config.set(, , ) if input_with_default("Setup custom server cert?", ) == : ca_certs = input_with_default("Server certificate location?", config.get(, )) config.set(, , ca_certs) else: config.set(, , ) else: config.set(, , ) if input_with_default("Setup client certificates?", mapping[config.get(, )]) == : config.set(, , ) client_cert = input_with_default("Client cert location?", config.get(, )) config.set(, , client_cert) client_key = input_with_default("Client key location?", config.get(, )) config.set(, , client_key) else: config.set(, , ) index = input_with_default("What index prefix should jackal use?", config.get(, )) config.set(, , index) initialize_indices = (input_with_default("Do you want to initialize the indices?", ).lower() == ) nmap_dir = input_with_default("What directory do you want to place the nmap results in?", config.get(, )) if not os.path.exists(nmap_dir): os.makedirs(nmap_dir) config.set(, , nmap_dir) nmap_options = input_with_default("What nmap options do you want to set for (for example )?", config.get(, )) config.set(, , nmap_options) configure_nessus = (input_with_default("Do you want to setup nessus?", ).lower() == ) if configure_nessus: nessus_host = input_with_default("What is the nessus host?", config.get(, )) nessus_template = input_with_default("What template should jackal use?", config.get(, )) nessus_access = input_with_default("What api access key should jackal use?", config.get(, )) nessus_secret = input_with_default("What api secret key should jackal use?", config.get(, )) config.set(, , nessus_host) config.set(, , nessus_template) config.set(, , nessus_access) config.set(, , nessus_secret) configure_pipes = (input_with_default("Do you want to setup named pipes?", ).lower() == ) if configure_pipes: directory = input_with_default("What directory do you want to place the named pipes in?", config.get(, )) config.set(, , directory) config_file = input_with_default("What is the name of the named pipe config?", config.get(, )) config.set(, , config_file) if not os.path.exists(directory): create = (input_with_default("Do you want to create the directory?", ).lower() == ) if create: os.makedirs(directory) if not os.path.exists(os.path.join(config.config_dir, config_file)): f = open(os.path.join(config.config_dir, config_file), ) f.close() config.write_config(initialize_indices)
Function to manually configure jackal.
6,182
def on_message(self, *args, accept_query=False, matcher=None, **kwargs): if accept_query: def new_matcher(msg: Message): ret = True if matcher: ret = matcher(msg) if ret is None or ret is False: return ret if msg.recipient is not self and not isinstance(msg.sender, User): return False return ret else: kwargs.setdefault("channel", self.name) new_matcher = matcher return self.client.on_message(*args, matcher=new_matcher, **kwargs)
Convenience wrapper of `Client.on_message` pre-bound with `channel=self.name`.
6,183
def t_prepro_ID(self, t): r t.type = reserved_directives.get(t.value.lower(), ) if t.type == : t.lexer.begin() elif t.type == : t.lexer.begin() return t
r'[_a-zA-Z][_a-zA-Z0-9]*
6,184
def unlink(self): if self._closed: self._raise_closed() self._accessor.unlink(self)
Remove this file or link. If the path is a directory, use rmdir() instead.
6,185
def __create_canvas(self, dimension, pairs, position, **kwargs): visible_grid = kwargs.get(, True) visible_labels = kwargs.get(, True) visible_axis = kwargs.get(, False) ax = self.__figure.add_subplot(self.__grid_spec[position]) if dimension > 1: if visible_labels: ax.set_xlabel("x%d" % pairs[position][0]) ax.set_ylabel("x%d" % pairs[position][1]) else: ax.set_ylim(-0.5, 0.5) ax.set_yticklabels([]) if visible_grid: ax.grid(True) if not visible_axis: ax.set_yticklabels([]) ax.set_xticklabels([]) return ax
! @brief Create new canvas with user defined parameters to display cluster or chunk of cluster on it. @param[in] dimension (uint): Data-space dimension. @param[in] pairs (list): Pair of coordinates that will be displayed on the canvas. If empty than label will not be displayed on the canvas. @param[in] position (uint): Index position of canvas on a grid. @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'visible_axis' 'visible_labels', 'visible_grid'). <b>Keyword Args:</b><br> - visible_axis (bool): Defines visibility of axes on each canvas, if True - axes are visible. By default axis are not displayed. - visible_labels (bool): Defines visibility of labels on each canvas, if True - labels is displayed. By default labels are displayed. - visible_grid (bool): Defines visibility of grid on each canvas, if True - grid is displayed. By default grid is displayed. @return (matplotlib.Axis) Canvas to display cluster of chuck of cluster.
6,186
def _find_bounds_1d(data, x): idx = np.searchsorted(data, x) if idx == 0: idx0 = 0 elif idx == len(data): idx0 = idx - 2 else: idx0 = idx - 1 return idx0
Find the index of the lower bound where ``x`` should be inserted into ``a`` to maintain order. The index of the upper bound is the index of the lower bound plus 2. Both bound indices must be within the array. Parameters ---------- data : 1D `~numpy.ndarray` The 1D array to search. x : float The value to insert. Returns ------- index : int The index of the lower bound.
6,187
def _list_dir(self, path): try: elements = [ os.path.join(path, x) for x in os.listdir(path) ] if os.path.isdir(path) else [] elements.sort() except OSError: elements = None return elements
returns absolute paths for all entries in a directory
6,188
def load_all_methods(self): r methods_P = [IDEAL] if all((self.Tc, self.Pc, self.omega)): methods_P.extend([TSONOPOULOS_EXTENDED, TSONOPOULOS, ABBOTT, PITZER_CURL]) if self.eos: methods_P.append(EOS) if self.CASRN in CRC_virial_data.index: methods_P.append(CRC_VIRIAL) self.CRC_VIRIAL_coeffs = _CRC_virial_data_values[CRC_virial_data.index.get_loc(self.CASRN)].tolist()[1:] if has_CoolProp and self.CASRN in coolprop_dict: methods_P.append(COOLPROP) self.CP_f = coolprop_fluids[self.CASRN] self.all_methods_P = set(methods_P)
r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets obj:`all_methods_P` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters.
6,189
def metric(self, name, count, elapsed): if name is None: warnings.warn("Ignoring unnamed metric", stacklevel=3) return with self.lock: self.writer.writerow((name, count, "%f"%elapsed))
A metric function that writes a single CSV file :arg str name: name of the metric :arg int count: number of items :arg float elapsed: time in seconds
6,190
def gen_search_gzh_url(keyword, page=1): assert isinstance(page, int) and page > 0 qs_dict = OrderedDict() qs_dict[] = _search_type_gzh qs_dict[] = page qs_dict[] = qs_dict[] = keyword return .format(urlencode(qs_dict))
拼接搜索 公众号 URL Parameters ---------- keyword : str or unicode 搜索文字 page : int, optional 页数 the default is 1 Returns ------- str search_gzh_url
6,191
def rollback(self, revision=None, annotations=None): rollback = DeploymentRollback() rollback.name = self.name rollback_config = RollbackConfig() if revision is not None: rollback_config.revision = revision else: current_revision = int(self.get_annotation(self.REVISION_ANNOTATION)) rev = max(current_revision - 1, 0) rollback_config.revision = rev rollback.rollback_to = rollback_config if annotations is not None: rollback.updated_annotations = annotations url = .format(base=self.base_url, name=self.name) state = self.request( method=, url=url, data=rollback.serialize()) if not state.get(): status = state.get(, ) reason = state.get(, dict()).get(, None) message = .format(status, reason) raise BadRequestException(message) time.sleep(0.2) self._wait_for_desired_replicas() self.get() return self
Performs a rollback of the Deployment. If the 'revision' parameter is omitted, we fetch the Deployment's system-generated annotation containing the current revision, and revert to the version immediately preceding the current version. :param revision: The revision to rollback to. :param annotations: Annotations we'd like to update. :return: self
6,192
def stem(self, word): lowered = word.lower() if lowered[-3:] == and lowered[-4:-3] not in {, }: return word[:-3] + ( if word[-1:].isupper() else ) if lowered[-2:] == and lowered[-3:-2] not in {, , }: return word[:-1] if lowered[-1:] == and lowered[-2:-1] not in {, }: return word[:-1] return word
Return the S-stemmed form of a word. Parameters ---------- word : str The word to stem Returns ------- str Word stem Examples -------- >>> stmr = SStemmer() >>> stmr.stem('summaries') 'summary' >>> stmr.stem('summary') 'summary' >>> stmr.stem('towers') 'tower' >>> stmr.stem('reading') 'reading' >>> stmr.stem('census') 'census'
6,193
def vecs_to_datmesh(x, y): x, y = meshgrid(x, y) out = zeros(x.shape + (2,), dtype=float) out[:, :, 0] = x out[:, :, 1] = y return out
Converts input arguments x and y to a 2d meshgrid, suitable for calling Means, Covariances and Realizations.
6,194
def handle_GET(self): self.send_response(404) self.end_headers() self.wfile.write(.encode())
Overwrite this method to handle a GET request. The default action is to respond with "error 404 (not found)".
6,195
def commit(self, snapshot: Tuple[Hash32, UUID]) -> None: _, account_snapshot = snapshot self._account_db.commit(account_snapshot)
Commit the journal to the point where the snapshot was taken. This will merge in any changesets that were recorded *after* the snapshot changeset.
6,196
def get_authentication_statement(self, subject, ticket): authentication_statement = etree.Element() authentication_statement.set(, self.instant(instant=ticket.consumed)) authentication_statement.set(, self.authn_method_password) authentication_statement.append(subject) return authentication_statement
Build an AuthenticationStatement XML block for a SAML 1.1 Assertion.
6,197
def _on_state(self, state, client): def cb(outputs): try: distrib, value = outputs.result() except CancelledError: logger.info("Client {} cancelled.".format(client.ident)) return assert np.all(np.isfinite(distrib)), distrib action = np.random.choice(len(distrib), p=distrib) client.memory.append(TransitionExperience( state, action, reward=None, value=value, prob=distrib[action])) self.send_queue.put([client.ident, dumps(action)]) self.async_predictor.put_task([state], cb)
Launch forward prediction for the new state given by some client.
6,198
def setup(cli): if not cli.global_config.loaded: setup_dotcloud_account(cli) discover_satellite(cli) cli.success("Skypipe is ready for action")
Everything to make skypipe ready to use
6,199
def update_playlist_song(self, playlist_id, song_id, op): action = .format( if op == else ) payload = { : playlist_id, : [song_id] } code, msg, rv = self.request(action, payload) return rv[][][] ==
从播放列表删除或者增加一首歌曲 如果歌曲不存在与歌单中,删除时返回 True;如果歌曲已经存在于 歌单,添加时也返回 True。