code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def get_identity_provider(provider_id):
"""
Get Identity Provider with given id.
Return:
Instance of ProviderConfig or None.
"""
try:
from third_party_auth.provider import Registry # pylint: disable=redefined-outer-name
except ImportError as exception:
LOGGER.warning("Could not import Registry from third_party_auth.provider")
LOGGER.warning(exception)
Registry = None # pylint: disable=redefined-outer-name
try:
return Registry and Registry.get(provider_id)
except ValueError:
return None | Get Identity Provider with given id.
Return:
Instance of ProviderConfig or None. |
def windowed_iter(src, size):
"""Returns tuples with length *size* which represent a sliding
window over iterable *src*.
>>> list(windowed_iter(range(7), 3))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)]
If the iterable is too short to make a window of length *size*,
then no window tuples are returned.
>>> list(windowed_iter(range(3), 5))
[]
"""
# TODO: lists? (for consistency)
tees = itertools.tee(src, size)
try:
for i, t in enumerate(tees):
for _ in xrange(i):
next(t)
except StopIteration:
return izip([])
return izip(*tees) | Returns tuples with length *size* which represent a sliding
window over iterable *src*.
>>> list(windowed_iter(range(7), 3))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)]
If the iterable is too short to make a window of length *size*,
then no window tuples are returned.
>>> list(windowed_iter(range(3), 5))
[] |
def _parse_value(self, html_data, field):
"""
Parse the HTML table to find the requested field's value.
All of the values are passed in an HTML table row instead of as
individual items. The values need to be parsed by matching the
requested attribute with a parsing scheme that sports-reference uses
to differentiate stats. This function returns a single value for the
given attribute.
Parameters
----------
html_data : string
A string containing all of the rows of stats for a given team. If
multiple tables are being referenced, this will be comprised of
multiple rows in a single string.
field : string
The name of the attribute to match. Field must be a key in the
PLAYER_SCHEME dictionary.
Returns
-------
list
A list of all values that match the requested field. If no value
could be found, returns None.
"""
scheme = PLAYER_SCHEME[field]
items = [i.text() for i in html_data(scheme).items()]
# Stats can be added and removed on a yearly basis. If no stats are
# found, return None and have that be the value.
if len(items) == 0:
return None
return items | Parse the HTML table to find the requested field's value.
All of the values are passed in an HTML table row instead of as
individual items. The values need to be parsed by matching the
requested attribute with a parsing scheme that sports-reference uses
to differentiate stats. This function returns a single value for the
given attribute.
Parameters
----------
html_data : string
A string containing all of the rows of stats for a given team. If
multiple tables are being referenced, this will be comprised of
multiple rows in a single string.
field : string
The name of the attribute to match. Field must be a key in the
PLAYER_SCHEME dictionary.
Returns
-------
list
A list of all values that match the requested field. If no value
could be found, returns None. |
def send(self,
send,
expect=None,
shutit_pexpect_child=None,
timeout=None,
check_exit=None,
fail_on_empty_before=True,
record_command=True,
exit_values=None,
echo=None,
escape=False,
retry=3,
note=None,
assume_gnu=True,
follow_on_commands=None,
searchwindowsize=None,
maxread=None,
delaybeforesend=None,
secret=False,
nonewline=False,
background=False,
wait=True,
block_other_commands=True,
loglevel=logging.INFO):
"""Send string as a shell command, and wait until the expected output
is seen (either a string or any from a list of strings) before
returning. The expected string will default to the currently-set
default expected string (see get_default_shutit_pexpect_session_expect)
Returns the pexpect return value (ie which expected string in the list
matched)
@param send: See shutit.ShutItSendSpec
@param expect: See shutit.ShutItSendSpec
@param shutit_pexpect_child: See shutit.ShutItSendSpec
@param timeout: See shutit.ShutItSendSpec
@param check_exit: See shutit.ShutItSendSpec
@param fail_on_empty_before:See shutit.ShutItSendSpec
@param record_command:See shutit.ShutItSendSpec
@param exit_values:See shutit.ShutItSendSpec
@param echo: See shutit.ShutItSendSpec
@param escape: See shutit.ShutItSendSpec
@param retry: See shutit.ShutItSendSpec
@param note: See shutit.ShutItSendSpec
@param assume_gnu: See shutit.ShutItSendSpec
@param wait: See shutit.ShutItSendSpec
@param block_other_commands: See shutit.ShutItSendSpec.block_other_commands
@return: The pexpect return value (ie which expected string in the list matched)
@rtype: string
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
ignore_background = not wait
return shutit_pexpect_session.send(ShutItSendSpec(shutit_pexpect_session,
send,
expect=expect,
timeout=timeout,
check_exit=check_exit,
fail_on_empty_before=fail_on_empty_before,
record_command=record_command,
exit_values=exit_values,
echo=echo,
escape=escape,
retry=retry,
note=note,
assume_gnu=assume_gnu,
loglevel=loglevel,
follow_on_commands=follow_on_commands,
searchwindowsize=searchwindowsize,
maxread=maxread,
delaybeforesend=delaybeforesend,
secret=secret,
nonewline=nonewline,
run_in_background=background,
ignore_background=ignore_background,
block_other_commands=block_other_commands)) | Send string as a shell command, and wait until the expected output
is seen (either a string or any from a list of strings) before
returning. The expected string will default to the currently-set
default expected string (see get_default_shutit_pexpect_session_expect)
Returns the pexpect return value (ie which expected string in the list
matched)
@param send: See shutit.ShutItSendSpec
@param expect: See shutit.ShutItSendSpec
@param shutit_pexpect_child: See shutit.ShutItSendSpec
@param timeout: See shutit.ShutItSendSpec
@param check_exit: See shutit.ShutItSendSpec
@param fail_on_empty_before:See shutit.ShutItSendSpec
@param record_command:See shutit.ShutItSendSpec
@param exit_values:See shutit.ShutItSendSpec
@param echo: See shutit.ShutItSendSpec
@param escape: See shutit.ShutItSendSpec
@param retry: See shutit.ShutItSendSpec
@param note: See shutit.ShutItSendSpec
@param assume_gnu: See shutit.ShutItSendSpec
@param wait: See shutit.ShutItSendSpec
@param block_other_commands: See shutit.ShutItSendSpec.block_other_commands
@return: The pexpect return value (ie which expected string in the list matched)
@rtype: string |
def masked(name, runtime=False):
'''
.. versionadded:: 2017.7.0
.. note::
This state is only available on minions which use systemd_.
Ensures that the named service is masked (i.e. prevented from being
started).
name
Name of the service to mask
runtime : False
By default, this state will manage an indefinite mask for the named
service. Set this argument to ``True`` to runtime mask the service.
.. note::
It is possible for a service to have both indefinite and runtime masks
set for it. Therefore, this state will manage a runtime or indefinite
mask independently of each other. This means that if the service is
already indefinitely masked, running this state with ``runtime`` set to
``True`` will _not_ remove the indefinite mask before setting a runtime
mask. In these cases, if it is desirable to ensure that the service is
runtime masked and not indefinitely masked, pair this state with a
:py:func:`service.unmasked <salt.states.service.unmasked>` state, like
so:
.. code-block:: yaml
mask_runtime_foo:
service.masked:
- name: foo
- runtime: True
unmask_indefinite_foo:
service.unmasked:
- name: foo
- runtime: False
.. _systemd: https://freedesktop.org/wiki/Software/systemd/
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if 'service.masked' not in __salt__:
ret['comment'] = 'Service masking not available on this minion'
ret['result'] = False
return ret
mask_type = 'runtime masked' if runtime else 'masked'
expected_changes = {mask_type: {'old': False, 'new': True}}
try:
if __salt__['service.masked'](name, runtime):
ret['comment'] = 'Service {0} is already {1}'.format(
name,
mask_type,
)
return ret
if __opts__['test']:
ret['result'] = None
ret['changes'] = expected_changes
ret['comment'] = 'Service {0} would be {1}'.format(name, mask_type)
return ret
__salt__['service.mask'](name, runtime)
if __salt__['service.masked'](name, runtime):
ret['changes'] = expected_changes
ret['comment'] = 'Service {0} was {1}'.format(name, mask_type)
else:
ret['comment'] = 'Failed to mask service {0}'.format(name)
return ret
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret | .. versionadded:: 2017.7.0
.. note::
This state is only available on minions which use systemd_.
Ensures that the named service is masked (i.e. prevented from being
started).
name
Name of the service to mask
runtime : False
By default, this state will manage an indefinite mask for the named
service. Set this argument to ``True`` to runtime mask the service.
.. note::
It is possible for a service to have both indefinite and runtime masks
set for it. Therefore, this state will manage a runtime or indefinite
mask independently of each other. This means that if the service is
already indefinitely masked, running this state with ``runtime`` set to
``True`` will _not_ remove the indefinite mask before setting a runtime
mask. In these cases, if it is desirable to ensure that the service is
runtime masked and not indefinitely masked, pair this state with a
:py:func:`service.unmasked <salt.states.service.unmasked>` state, like
so:
.. code-block:: yaml
mask_runtime_foo:
service.masked:
- name: foo
- runtime: True
unmask_indefinite_foo:
service.unmasked:
- name: foo
- runtime: False
.. _systemd: https://freedesktop.org/wiki/Software/systemd/ |
def run(fn, blocksize, seed, c, delta):
"""Run the encoder until the channel is broken, signalling that the
receiver has successfully reconstructed the file
"""
with open(fn, 'rb') as f:
for block in encode.encoder(f, blocksize, seed, c, delta):
sys.stdout.buffer.write(block) | Run the encoder until the channel is broken, signalling that the
receiver has successfully reconstructed the file |
def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
config_file):
'''
Execute LDAP searches and return the aggregated data
'''
config_template = None
try:
config_template = _render_template(config_file)
except jinja2.exceptions.TemplateNotFound:
log.debug('pillar_ldap: missing configuration file %s', config_file)
except Exception:
log.debug('pillar_ldap: failed to render template for %s',
config_file, exc_info=True)
if not config_template:
# We don't have a config file
return {}
import salt.utils.yaml
try:
opts = salt.utils.yaml.safe_load(config_template) or {}
opts['conf_file'] = config_file
except Exception as err:
import salt.log
msg = 'pillar_ldap: error parsing configuration file: {0} - {1}'.format(
config_file, err
)
if salt.log.is_console_configured():
log.warning(msg)
else:
print(msg)
return {}
else:
if not isinstance(opts, dict):
log.warning(
'pillar_ldap: %s is invalidly formatted, must be a YAML '
'dictionary. See the documentation for more information.',
config_file
)
return {}
if 'search_order' not in opts:
log.warning(
'pillar_ldap: search_order missing from configuration. See the '
'documentation for more information.'
)
return {}
data = {}
for source in opts['search_order']:
config = opts[source]
result = _do_search(config)
log.debug('source %s got result %s', source, result)
if result:
data = _result_to_dict(data, result, config, source)
return data | Execute LDAP searches and return the aggregated data |
async def cancel_handler(message: types.Message, state: FSMContext, raw_state: Optional[str] = None):
"""
Allow user to cancel any action
"""
if raw_state is None:
return
# Cancel state and inform user about it
await state.finish()
# And remove keyboard (just in case)
await message.reply('Canceled.', reply_markup=types.ReplyKeyboardRemove()) | Allow user to cancel any action |
def run(self, tokens):
"""Runs the current list of functions that make up the pipeline against
the passed tokens."""
for fn in self._stack:
results = []
for i, token in enumerate(tokens):
# JS ignores additional arguments to the functions but we
# force pipeline functions to declare (token, i, tokens)
# or *args
result = fn(token, i, tokens)
if not result:
continue
if isinstance(result, (list, tuple)): # simulate Array.concat
results.extend(result)
else:
results.append(result)
tokens = results
return tokens | Runs the current list of functions that make up the pipeline against
the passed tokens. |
def render(self, context):
"""Render markdown."""
import markdown
content = self.get_content_from_context(context)
return markdown.markdown(content) | Render markdown. |
def _getSyntaxByXmlFileName(self, xmlFileName):
"""Get syntax by its xml file name
"""
import qutepart.syntax.loader # delayed import for avoid cross-imports problem
with self._loadedSyntaxesLock:
if not xmlFileName in self._loadedSyntaxes:
xmlFilePath = os.path.join(os.path.dirname(__file__), "data", "xml", xmlFileName)
syntax = Syntax(self)
self._loadedSyntaxes[xmlFileName] = syntax
qutepart.syntax.loader.loadSyntax(syntax, xmlFilePath)
return self._loadedSyntaxes[xmlFileName] | Get syntax by its xml file name |
def lstring_as_obj(true_or_false=None):
"""Toggles whether lstrings should be treated as strings or as objects.
When FieldArrays is first loaded, the default is True.
Parameters
----------
true_or_false : {None|bool}
Pass True to map lstrings to objects; False otherwise. If None
provided, just returns the current state.
Return
------
current_stat : bool
The current state of lstring_as_obj.
Examples
--------
>>> from pycbc.io import FieldArray
>>> FieldArray.lstring_as_obj()
True
>>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')])
FieldArray([(0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,),
(0.0,), (0.0,)],
dtype=[('foo', 'O')])
>>> FieldArray.lstring_as_obj(False)
False
>>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')])
FieldArray([('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',),
('0.0',), ('0.0',), ('0.0',), ('0.0',)],
dtype=[('foo', 'S50')])
"""
if true_or_false is not None:
_default_types_status['lstring_as_obj'] = true_or_false
# update the typeDict
numpy.typeDict[u'lstring'] = numpy.object_ \
if _default_types_status['lstring_as_obj'] \
else 'S%i' % _default_types_status['default_strlen']
return _default_types_status['lstring_as_obj'] | Toggles whether lstrings should be treated as strings or as objects.
When FieldArrays is first loaded, the default is True.
Parameters
----------
true_or_false : {None|bool}
Pass True to map lstrings to objects; False otherwise. If None
provided, just returns the current state.
Return
------
current_stat : bool
The current state of lstring_as_obj.
Examples
--------
>>> from pycbc.io import FieldArray
>>> FieldArray.lstring_as_obj()
True
>>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')])
FieldArray([(0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,),
(0.0,), (0.0,)],
dtype=[('foo', 'O')])
>>> FieldArray.lstring_as_obj(False)
False
>>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')])
FieldArray([('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',),
('0.0',), ('0.0',), ('0.0',), ('0.0',)],
dtype=[('foo', 'S50')]) |
def LogNormSpheres(q, A, mu, sigma, N=1000):
"""Scattering of a population of non-correlated spheres (radii from a log-normal distribution)
Inputs:
-------
``q``: independent variable
``A``: scaling factor
``mu``: expectation of ``ln(R)``
``sigma``: hwhm of ``ln(R)``
Non-fittable inputs:
--------------------
``N``: the (integer) number of spheres
Formula:
--------
The integral of ``F_sphere^2(q,R) * P(R)`` where ``P(R)`` is a
log-normal distribution of the radii.
"""
Rmin = 0
Rmax = np.exp(mu + 3 * sigma)
R = np.linspace(Rmin, Rmax, N + 1)[1:]
P = 1 / np.sqrt(2 * np.pi * sigma ** 2 * R ** 2) * np.exp(-(np.log(R) - mu) ** 2 / (2 * sigma ** 2))
def Fsphere_outer(q, R):
qR = np.outer(q, R)
q1 = np.outer(q, np.ones_like(R))
return 4 * np.pi / q1 ** 3 * (np.sin(qR) - qR * np.cos(qR))
I = (Fsphere_outer(q, R) ** 2 * np.outer(np.ones_like(q), P))
return A * I.sum(1) / P.sum() | Scattering of a population of non-correlated spheres (radii from a log-normal distribution)
Inputs:
-------
``q``: independent variable
``A``: scaling factor
``mu``: expectation of ``ln(R)``
``sigma``: hwhm of ``ln(R)``
Non-fittable inputs:
--------------------
``N``: the (integer) number of spheres
Formula:
--------
The integral of ``F_sphere^2(q,R) * P(R)`` where ``P(R)`` is a
log-normal distribution of the radii. |
def print_loading(self, wait, message):
"""
print loading message on screen
.. note::
loading message only write to `sys.stdout`
:param int wait: seconds to wait
:param str message: message to print
:return: None
"""
tags = ['\\', '|', '/', '-']
for i in range(wait):
time.sleep(0.25)
sys.stdout.write("%(message)s... %(tag)s\r" % {
'message': message,
'tag': tags[i % 4]
})
sys.stdout.flush()
pass
sys.stdout.write("%s... Done...\n" % message)
sys.stdout.flush()
pass | print loading message on screen
.. note::
loading message only write to `sys.stdout`
:param int wait: seconds to wait
:param str message: message to print
:return: None |
def from_json(cls, json, image_config=None):
"""Create a model instance
Arguments:
json (:py:class:`dict`): The parsed JSON data.
image_config (:py:class:`dict`): The API image configuration
data.
Returns:
:py:class:`BaseModel`: The model instance.
"""
cls.image_config = image_config
return cls(**{
attr: json.get(attr if key is None else key)
for attr, key in cls.JSON_MAPPING.items()
}) | Create a model instance
Arguments:
json (:py:class:`dict`): The parsed JSON data.
image_config (:py:class:`dict`): The API image configuration
data.
Returns:
:py:class:`BaseModel`: The model instance. |
def generate_delete_user_command(username=None, manage_home=None):
"""Generate command to delete a user.
args:
username (str): user name
manage_home (bool): manage home directory
returns:
list: The user delete command string split into shell-like syntax
"""
command = None
remove_home = '-r' if manage_home else ''
if get_platform() in ('Linux', 'OpenBSD'):
command = '{0} {1} {2} {3}'.format(sudo_check(), LINUX_CMD_USERDEL, remove_home, username)
elif get_platform() == 'FreeBSD': # pragma: FreeBSD
command = '{0} {1} userdel {2} -n {3}'.format(sudo_check(), FREEBSD_CMD_PW, remove_home, username)
if command:
return shlex.split(str(command)) | Generate command to delete a user.
args:
username (str): user name
manage_home (bool): manage home directory
returns:
list: The user delete command string split into shell-like syntax |
def parse_known_chained(self, args=None):
"""
Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known
"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
return self._parse2subparser_funcs(kws), remainder | Parse the argument directly to the function used for setup
This function parses the command line arguments to the function that
has been used for the :meth:`setup_args` method.
Parameters
----------
args: list
The arguments parsed to the :meth:`parse_args` function
Returns
-------
argparse.Namespace
The namespace with mapping from command name to the function
return
list
The remaining arguments that could not be interpreted
See also
--------
parse_known |
def main():
"""
Entry point.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
for arg in ARGUMENTS:
if "action" in arg:
if arg["short"] is not None:
parser.add_argument(arg["short"], arg["long"], action=arg["action"], help=arg["help"])
else:
parser.add_argument(arg["long"], action=arg["action"], help=arg["help"])
else:
if arg["short"] is not None:
parser.add_argument(arg["short"], arg["long"], nargs=arg["nargs"], type=arg["type"], default=arg["default"], help=arg["help"])
else:
parser.add_argument(arg["long"], nargs=arg["nargs"], type=arg["type"], default=arg["default"], help=arg["help"])
vargs = vars(parser.parse_args())
command = vargs["command"]
string = to_unicode_string(vargs["string"])
if command not in COMMAND_MAP:
parser.print_help()
sys.exit(2)
COMMAND_MAP[command](string, vargs)
sys.exit(0) | Entry point. |
def inten(function):
"Decorator. Attempts to convert return value to int"
def wrapper(*args, **kwargs):
return coerce_to_int(function(*args, **kwargs))
return wrapper | Decorator. Attempts to convert return value to int |
def NEW_DEBUG_FRAME(self, requestHeader):
"""
Initialize a debug frame with requestHeader
Frame count is updated and will be attached to respond header
The structure of a frame: [requestHeader, statusCode, responseHeader, raw_data]
Some of them may be None
"""
if self.DEBUG_FLAG: # pragma no branch (Flag always set in tests)
new_frame = [requestHeader, None, None, None]
if self._frameCount < self.DEBUG_FRAME_BUFFER_SIZE - 1: # pragma no branch (Should be covered)
self._frameBuffer.append(new_frame)
else:
self._frameBuffer[0] = new_frame # pragma no cover (Should be covered)
self._frameCount = len(self._frameBuffer) - 1 | Initialize a debug frame with requestHeader
Frame count is updated and will be attached to respond header
The structure of a frame: [requestHeader, statusCode, responseHeader, raw_data]
Some of them may be None |
def jwt_verify_token(headers):
"""Verify the JWT token.
:param dict headers: The request headers.
:returns: The token data.
:rtype: dict
"""
# Get the token from headers
token = headers.get(
current_app.config['OAUTH2SERVER_JWT_AUTH_HEADER']
)
if token is None:
raise JWTInvalidHeaderError
# Get authentication type
authentication_type = \
current_app.config['OAUTH2SERVER_JWT_AUTH_HEADER_TYPE']
# Check if the type should be checked
if authentication_type is not None:
# Get the prefix and the token
prefix, token = token.split()
# Check if the type matches
if prefix != authentication_type:
raise JWTInvalidHeaderError
try:
# Get the token data
decode = jwt_decode_token(token)
# Check the integrity of the user
if current_user.get_id() != decode.get('sub'):
raise JWTInvalidIssuer
return decode
except _JWTDecodeError as exc:
raise_from(JWTDecodeError(), exc)
except _JWTExpiredToken as exc:
raise_from(JWTExpiredToken(), exc) | Verify the JWT token.
:param dict headers: The request headers.
:returns: The token data.
:rtype: dict |
def new_instance(cls, classname):
"""
Creates a new object from the given classname using the default constructor, None in case of error.
:param classname: the classname in Java notation (eg "weka.core.DenseInstance")
:type classname: str
:return: the Java object
:rtype: JB_Object
"""
try:
return javabridge.static_call(
"Lweka/core/Utils;", "forName",
"(Ljava/lang/Class;Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/Object;",
javabridge.class_for_name("java.lang.Object"), classname, [])
except JavaException as e:
print("Failed to instantiate " + classname + ": " + str(e))
return None | Creates a new object from the given classname using the default constructor, None in case of error.
:param classname: the classname in Java notation (eg "weka.core.DenseInstance")
:type classname: str
:return: the Java object
:rtype: JB_Object |
def _iter_avro_blocks(fo, header, codec, writer_schema, reader_schema):
"""Return iterator over avro blocks."""
sync_marker = header['sync']
read_block = BLOCK_READERS.get(codec)
if not read_block:
raise ValueError('Unrecognized codec: %r' % codec)
while True:
offset = fo.tell()
try:
num_block_records = read_long(fo)
except StopIteration:
return
block_bytes = read_block(fo)
skip_sync(fo, sync_marker)
size = fo.tell() - offset
yield Block(
block_bytes, num_block_records, codec, reader_schema,
writer_schema, offset, size
) | Return iterator over avro blocks. |
def moving_average_bias_ratio(self, date1, date2):
""" 計算乖離率(均價)
date1 - date2
:param int data1: n 日
:param int data2: m 日
:rtype: tuple (序列 舊→新, 持續天數)
"""
data1 = self.moving_average(date1)[0]
data2 = self.moving_average(date2)[0]
cal_list = []
for i in range(1, min(len(data1), len(data2)) + 1):
cal_list.append(data1[-i] - data2[-i])
cal_list.reverse()
cont = self.__cal_continue(cal_list)
return cal_list, cont | 計算乖離率(均價)
date1 - date2
:param int data1: n 日
:param int data2: m 日
:rtype: tuple (序列 舊→新, 持續天數) |
def normalize_keys(suspect, snake_case=True):
"""
take a dict and turn all of its type string keys into snake_case
"""
if not isinstance(suspect, dict):
raise TypeError('you must pass a dict.')
for key in list(suspect):
if not isinstance(key, six.string_types):
continue
if snake_case:
s1 = first_cap_re.sub(r'\1_\2', key)
new_key = all_cap_re.sub(r'\1_\2', s1).lower() # .replace('-', '_')
else:
new_key = key.lower()
value = suspect.pop(key)
if isinstance(value, dict):
suspect[new_key] = normalize_keys(value, snake_case)
elif isinstance(value, list):
for i in range(0, len(value)):
if isinstance(value[i], dict):
normalize_keys(value[i], snake_case)
suspect[new_key] = value
else:
suspect[new_key] = value
return suspect | take a dict and turn all of its type string keys into snake_case |
def _Struct_set_Poly(Poly, pos=None, extent=None, arrayorder='C',
Type='Tor', Clock=False):
""" Compute geometrical attributes of a Struct object """
# Make Poly closed, counter-clockwise, with '(cc,N)' layout and arrayorder
Poly = _GG.Poly_Order(Poly, order='C', Clock=False,
close=True, layout='(cc,N)', Test=True)
assert Poly.shape[0]==2, "Arg Poly must be a 2D polygon !"
fPfmt = np.ascontiguousarray if arrayorder=='C' else np.asfortranarray
# Get all remarkable points and moments
NP = Poly.shape[1]-1
P1Max = Poly[:,np.argmax(Poly[0,:])]
P1Min = Poly[:,np.argmin(Poly[0,:])]
P2Max = Poly[:,np.argmax(Poly[1,:])]
P2Min = Poly[:,np.argmin(Poly[1,:])]
BaryP = np.sum(Poly[:,:-1],axis=1,keepdims=False)/(Poly.shape[1]-1)
BaryL = np.array([(P1Max[0]+P1Min[0])/2., (P2Max[1]+P2Min[1])/2.])
TorP = plg.Polygon(Poly.T)
Surf = TorP.area()
BaryS = np.array(TorP.center()).flatten()
# Get lim-related indicators
noccur = int(pos.size)
Multi = noccur>1
# Get Tor-related quantities
if Type.lower()=='lin':
Vol, BaryV = None, None
else:
Vol, BaryV = _GG.Poly_VolAngTor(Poly)
msg = "Pb. with volume computation for Ves object of type 'Tor' !"
assert Vol>0., msg
# Compute the non-normalized vector of each side of the Poly
Vect = np.diff(Poly,n=1,axis=1)
Vect = fPfmt(Vect)
# Compute the normalised vectors directed inwards
Vin = np.array([Vect[1,:],-Vect[0,:]])
if not _GG.Poly_isClockwise(Poly):
Vin = -Vin
Vin = Vin/np.hypot(Vin[0,:],Vin[1,:])[np.newaxis,:]
Vin = fPfmt(Vin)
poly = _GG.Poly_Order(Poly, order=arrayorder, Clock=Clock,
close=False, layout='(cc,N)', Test=True)
# Get bounding circle
circC = BaryS
r = np.sqrt(np.sum((poly-circC[:,np.newaxis])**2,axis=0))
circr = np.max(r)
dout = {'Poly':poly, 'pos':pos, 'extent':extent,
'noccur':noccur, 'Multi':Multi, 'nP':NP,
'P1Max':P1Max, 'P1Min':P1Min, 'P2Max':P2Max, 'P2Min':P2Min,
'BaryP':BaryP, 'BaryL':BaryL, 'BaryS':BaryS, 'BaryV':BaryV,
'Surf':Surf, 'VolAng':Vol, 'Vect':Vect, 'VIn':Vin,
'circ-C':circC, 'circ-r':circr, 'Clock':Clock}
return dout | Compute geometrical attributes of a Struct object |
def get_group_gn(dim, dim_per_gp, num_groups):
"""get number of groups used by GroupNorm, based on number of channels."""
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0, \
"dim: {}, dim_per_gp: {}".format(dim, dim_per_gp)
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0, \
"dim: {}, num_groups: {}".format(dim, num_groups)
group_gn = num_groups
return group_gn | get number of groups used by GroupNorm, based on number of channels. |
def get_device_mac(self) -> str:
'''Show device MAC.'''
output, _ = self._execute(
'-s', self.device_sn, 'shell', 'cat', '/sys/class/net/wlan0/address')
return output.strip() | Show device MAC. |
def run_once(func):
"""
Simple decorator to ensure a function is ran only once
"""
def _inner(*args, **kwargs):
if func.__name__ in CTX.run_once:
LOGGER.info('skipping %s', func.__name__)
return CTX.run_once[func.__name__]
LOGGER.info('running: %s', func.__name__)
result = func(*args, **kwargs)
CTX.run_once[func.__name__] = result
return result
return _inner | Simple decorator to ensure a function is ran only once |
def _init_vocab(self, analyzed_docs):
"""Create vocabulary
"""
class SetAccum(AccumulatorParam):
def zero(self, initialValue):
return set(initialValue)
def addInPlace(self, v1, v2):
v1 |= v2
return v1
if not self.fixed_vocabulary_:
accum = analyzed_docs._rdd.context.accumulator(set(), SetAccum())
analyzed_docs.foreach(
lambda x: accum.add(set(chain.from_iterable(x))))
vocabulary = {t: i for i, t in enumerate(accum.value)}
else:
vocabulary = self.vocabulary_
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
return vocabulary | Create vocabulary |
def stalta_pick(stream, stalen, ltalen, trig_on, trig_off, freqmin=False,
freqmax=False, debug=0, show=False):
"""
Basic sta/lta picker, suggest using alternative in obspy.
Simple sta/lta (short-term average/long-term average) picker, using
obspy's :func:`obspy.signal.trigger.classic_sta_lta` routine to generate
the characteristic function.
Currently very basic quick wrapper, there are many other (better) options
in obspy in the :mod:`obspy.signal.trigger` module.
:type stream: obspy.core.stream.Stream
:param stream: The stream to pick on, can be any number of channels.
:type stalen: float
:param stalen: Length of the short-term average window in seconds.
:type ltalen: float
:param ltalen: Length of the long-term average window in seconds.
:type trig_on: float
:param trig_on: sta/lta ratio to trigger a detection/pick
:type trig_off: float
:param trig_off: sta/lta ratio to turn the trigger off - no further picks\
will be made between exceeding trig_on until trig_off is reached.
:type freqmin: float
:param freqmin: Low-cut frequency in Hz for bandpass filter
:type freqmax: float
:param freqmax: High-cut frequency in Hz for bandpass filter
:type debug: int
:param debug: Debug output level from 0-5.
:type show: bool
:param show: Show picks on waveform.
:returns: :class:`obspy.core.event.event.Event`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.picker import stalta_pick
>>> st = read()
>>> event = stalta_pick(st, stalen=0.2, ltalen=4, trig_on=10,
... trig_off=1, freqmin=3.0, freqmax=20.0)
>>> print(event.creation_info.author)
EQcorrscan
.. warning::
This function is not designed for accurate picking, rather it can give
a first idea of whether picks may be possible. Proceed with caution.
"""
event = Event()
event.origins.append(Origin())
event.creation_info = CreationInfo(author='EQcorrscan',
creation_time=UTCDateTime())
event.comments.append(Comment(text='stalta'))
picks = []
for tr in stream:
# We are going to assume, for now, that if the pick is made on the
# horizontal channel then it is an S, otherwise we will assume it is
# a P-phase: obviously a bad assumption...
if tr.stats.channel[-1] == 'Z':
phase = 'P'
else:
phase = 'S'
if freqmin and freqmax:
tr.detrend('simple')
tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax,
corners=3, zerophase=True)
df = tr.stats.sampling_rate
cft = classic_sta_lta(tr.data, int(stalen * df), int(ltalen * df))
if debug > 3:
plot_trigger(tr, cft, trig_on, trig_off)
triggers = trigger_onset(cft, trig_on, trig_off)
for trigger in triggers:
on = tr.stats.starttime + (trigger[0] / df)
# off = tr.stats.starttime + (trigger[1] / df)
wav_id = WaveformStreamID(station_code=tr.stats.station,
channel_code=tr.stats.channel,
network_code=tr.stats.network)
p = Pick(waveform_id=wav_id, phase_hint=phase, time=on)
if debug > 2:
print('Pick made:')
print(p)
picks.append(p)
# QC picks
pick_stations = list(set([pick.waveform_id.station_code
for pick in picks]))
for pick_station in pick_stations:
station_picks = [pick for pick in picks if
pick.waveform_id.station_code == pick_station]
# If P-pick is after S-picks, remove it.
p_time = [pick.time for pick in station_picks
if pick.phase_hint == 'P']
s_time = [pick.time for pick in station_picks
if pick.phase_hint == 'S']
if p_time > s_time:
p_pick = [pick for pick in station_picks if pick.phase_hint == 'P']
for pick in p_pick:
print('P pick after S pick, removing P pick')
picks.remove(pick)
if show:
plotting.pretty_template_plot(stream, picks=picks, title='Autopicks',
size=(8, 9))
event.picks = picks
if len(event.picks) > 0:
event.origins[0].time = min([pick.time for pick in event.picks]) - 1
# event.origins[0].latitude = float('nan')
# event.origins[0].longitude = float('nan')
# Set arbitrary origin time
return event | Basic sta/lta picker, suggest using alternative in obspy.
Simple sta/lta (short-term average/long-term average) picker, using
obspy's :func:`obspy.signal.trigger.classic_sta_lta` routine to generate
the characteristic function.
Currently very basic quick wrapper, there are many other (better) options
in obspy in the :mod:`obspy.signal.trigger` module.
:type stream: obspy.core.stream.Stream
:param stream: The stream to pick on, can be any number of channels.
:type stalen: float
:param stalen: Length of the short-term average window in seconds.
:type ltalen: float
:param ltalen: Length of the long-term average window in seconds.
:type trig_on: float
:param trig_on: sta/lta ratio to trigger a detection/pick
:type trig_off: float
:param trig_off: sta/lta ratio to turn the trigger off - no further picks\
will be made between exceeding trig_on until trig_off is reached.
:type freqmin: float
:param freqmin: Low-cut frequency in Hz for bandpass filter
:type freqmax: float
:param freqmax: High-cut frequency in Hz for bandpass filter
:type debug: int
:param debug: Debug output level from 0-5.
:type show: bool
:param show: Show picks on waveform.
:returns: :class:`obspy.core.event.event.Event`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.picker import stalta_pick
>>> st = read()
>>> event = stalta_pick(st, stalen=0.2, ltalen=4, trig_on=10,
... trig_off=1, freqmin=3.0, freqmax=20.0)
>>> print(event.creation_info.author)
EQcorrscan
.. warning::
This function is not designed for accurate picking, rather it can give
a first idea of whether picks may be possible. Proceed with caution. |
def get_panels(config):
"""Execute the panels phase
:param config: a Mordred config object
"""
task = TaskPanels(config)
task.execute()
task = TaskPanelsMenu(config)
task.execute()
logging.info("Panels creation finished!") | Execute the panels phase
:param config: a Mordred config object |
def create_xz(archive, compression, cmd, verbosity, interactive, filenames):
"""Create an XZ archive with the lzma Python module."""
return _create(archive, compression, cmd, 'xz', verbosity, filenames) | Create an XZ archive with the lzma Python module. |
def register(self, model, **attr):
"""Register a model or a table with this mapper
:param model: a table or a :class:`.BaseModel` class
:return: a Model class or a table
"""
metadata = self.metadata
if not isinstance(model, Table):
model_name = self._create_model(model, **attr)
if not model_name:
return
model, name = model_name
table = model.__table__
self._declarative_register[name] = model
if name in self._bases:
for model in self._bases.pop(name):
self.register(model)
else:
table = model.tometadata(metadata)
model = table
# Register engine
engine = None
label = table.info.get('bind_label')
keys = ('%s.%s' % (label, table.key),
label, None) if label else (None,)
#
# Find the engine for this table
for key in keys:
engine = self.get_engine(key)
if engine:
break
assert engine
self.binds[table] = engine
return model | Register a model or a table with this mapper
:param model: a table or a :class:`.BaseModel` class
:return: a Model class or a table |
def incrementSub(self, amount=1):
"""
Increments the sub-progress bar by amount.
"""
self._subProgressBar.setValue(self.subValue() + amount)
QApplication.instance().processEvents() | Increments the sub-progress bar by amount. |
def nth_combination(iterable, r, index):
"""Equivalent to ``list(combinations(iterable, r))[index]``.
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`nth_combination` computes the subsequence at
sort position *index* directly, without computing the previous
subsequences.
"""
pool = tuple(iterable)
n = len(pool)
if (r < 0) or (r > n):
raise ValueError
c = 1
k = min(r, n - r)
for i in range(1, k + 1):
c = c * (n - k + i) // i
if index < 0:
index += c
if (index < 0) or (index >= c):
raise IndexError
result = []
while r:
c, n, r = c * r // n, n - 1, r - 1
while index >= c:
index -= c
c, n = c * (n - r) // n, n - 1
result.append(pool[-1 - n])
return tuple(result) | Equivalent to ``list(combinations(iterable, r))[index]``.
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`nth_combination` computes the subsequence at
sort position *index* directly, without computing the previous
subsequences. |
def nl_socket_add_memberships(sk, *group):
"""Join groups.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/socket.c#L417
Joins the specified groups using the modern socket option. The list of groups has to be terminated by 0.
Make sure to use the correct group definitions as the older bitmask definitions for nl_join_groups() are likely to
still be present for backward compatibility reasons.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
group -- group identifier (integer).
Returns:
0 on success or a negative error code.
"""
if sk.s_fd == -1:
return -NLE_BAD_SOCK
for grp in group:
if not grp:
break
if grp < 0:
return -NLE_INVAL
try:
sk.socket_instance.setsockopt(SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, grp)
except OSError as exc:
return -nl_syserr2nlerr(exc.errno)
return 0 | Join groups.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/socket.c#L417
Joins the specified groups using the modern socket option. The list of groups has to be terminated by 0.
Make sure to use the correct group definitions as the older bitmask definitions for nl_join_groups() are likely to
still be present for backward compatibility reasons.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
group -- group identifier (integer).
Returns:
0 on success or a negative error code. |
def set_inputs(self, inputs):
"""Assign input voltages."""
if len(inputs) != len(self.inputs):
raise RuntimeError(
"Number of inputs {0:d} does not match number of input nodes {1:d}".format(
len(inputs), len(self.inputs)))
for i, v in zip(self.inputs, inputs):
self.input_values[i] = v | Assign input voltages. |
def ping(self, destination, length=20):
""" send ICMPv6 echo request with a given length to a unicast destination
address
Args:
destination: the unicast destination address of ICMPv6 echo request
length: the size of ICMPv6 echo request payload
"""
print '%s call ping' % self.port
print 'destination: %s' %destination
try:
cmd = 'ping %s %s' % (destination, str(length))
print cmd
self._sendline(cmd)
self._expect(cmd)
# wait echo reply
time.sleep(1)
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("ping() Error: " + str(e)) | send ICMPv6 echo request with a given length to a unicast destination
address
Args:
destination: the unicast destination address of ICMPv6 echo request
length: the size of ICMPv6 echo request payload |
def create_node(participant_id):
"""Send a POST request to the node table.
This makes a new node for the participant, it calls:
1. exp.get_network_for_participant
2. exp.create_node
3. exp.add_node_to_network
4. exp.node_post_request
"""
exp = experiment(session)
# Get the participant.
try:
participant = models.Participant.\
query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(error_type="/node POST no participant found",
status=403)
# replace any duplicate assignments
check_for_duplicate_assignments(participant)
# Make sure the participant status is working
if participant.status != "working":
error_type = "/node POST, status = {}".format(participant.status)
return error_response(error_type=error_type,
participant=participant)
try:
# execute the request
network = exp.get_network_for_participant(participant=participant)
if network is None:
return Response(dumps({"status": "error"}), status=403)
node = exp.create_node(
participant=participant,
network=network)
assign_properties(node)
exp.add_node_to_network(
node=node,
network=network)
session.commit()
# ping the experiment
exp.node_post_request(participant=participant, node=node)
session.commit()
except:
return error_response(error_type="/node POST server error",
status=403,
participant=participant)
# return the data
return success_response(field="node",
data=node.__json__(),
request_type="/node POST") | Send a POST request to the node table.
This makes a new node for the participant, it calls:
1. exp.get_network_for_participant
2. exp.create_node
3. exp.add_node_to_network
4. exp.node_post_request |
def to_delete(datetimes,
years=0, months=0, weeks=0, days=0,
hours=0, minutes=0, seconds=0,
firstweekday=SATURDAY, now=None):
"""
Return a set of datetimes that should be deleted, out of ``datetimes``.
See ``to_keep`` for a description of arguments.
"""
datetimes = set(datetimes)
return datetimes - to_keep(datetimes,
years=years, months=months,
weeks=weeks, days=days,
hours=hours, minutes=minutes, seconds=seconds,
firstweekday=firstweekday, now=now) | Return a set of datetimes that should be deleted, out of ``datetimes``.
See ``to_keep`` for a description of arguments. |
def pfunc(func):
"""
pf = pfunc(func)
Returns a function that can be called just like func; however its arguments may be
PyMC objects or containers of PyMC objects, and its return value will be a deterministic.
Example:
>>> A = pymc.Normal('A',0,1,size=10)
>>> pprod = pymc.pfunc(numpy.prod)
>>> B = pprod(A, axis=0)
>>> B
<pymc.PyMCObjects.Deterministic 'prod(A_0)' at 0x3ce49b0>
>>> B.value
-0.0049333289649554912
>>> numpy.prod(A.value)
-0.0049333289649554912
"""
if isinstance(func, np.ufunc):
return pufunc(func)
elif not inspect.isfunction(func):
if func.__name__ == '__call__':
raise ValueError(
'Cannot get argspec of call method. Is it builtin?')
try:
return pfunc(func.__call__)
except:
cls, inst, tb = sys.exc_info()
inst = cls(
'Failed to create pfunc wrapper from object %s. Original error message:\n\n%s' %
(func, inst.message))
six.reraise(cls, inst, tb)
fargs, fdefaults = get_signature(func)
n_fargs = len(fargs)
def dtrm_generator(*args, **kwds):
name = func.__name__ + '(' + '_'.join([str(arg)
for arg in list(args) +
list(kwds.values())]) + ')'
doc_str = 'A deterministic returning %s(%s, %s)' % (
func.__name__,
', '.join([str(arg) for arg in args]),
', '.join(['%s=%s' % (key,
str(val)) for key,
val in six.iteritems(kwds)]))
parents = {}
varargs = []
for kwd, val in six.iteritems(kwds):
parents[kwd] = val
for i in xrange(len(args)):
if i < n_fargs:
parents[fargs[i]] = args[i]
else:
varargs.append(args[i])
if len(varargs) == 0:
eval_fun = func
else:
parents['varargs'] = varargs
def wrapper(**wkwds_in):
wkwds = copy(wkwds_in)
wargs = []
for arg in fargs:
wargs.append(wkwds.pop(arg))
wargs.extend(wkwds.pop('varargs'))
return func(*wargs, **wkwds)
eval_fun = wrapper
return pm.Deterministic(
eval_fun, doc_str, name, parents, trace=False, plot=False)
dtrm_generator.__name__ = func.__name__ + '_deterministic_generator'
dtrm_generator.__doc__ = """
Deterministic-generating wrapper for %s. Original docstring:
%s
%s
""" % (func.__name__, '_' * 60, func.__doc__)
return dtrm_generator | pf = pfunc(func)
Returns a function that can be called just like func; however its arguments may be
PyMC objects or containers of PyMC objects, and its return value will be a deterministic.
Example:
>>> A = pymc.Normal('A',0,1,size=10)
>>> pprod = pymc.pfunc(numpy.prod)
>>> B = pprod(A, axis=0)
>>> B
<pymc.PyMCObjects.Deterministic 'prod(A_0)' at 0x3ce49b0>
>>> B.value
-0.0049333289649554912
>>> numpy.prod(A.value)
-0.0049333289649554912 |
def fuller_scaling(target, DABo, To, Po, temperature='pore.temperature',
pressure='pore.pressure'):
r"""
Uses Fuller model to adjust a diffusion coefficient for gases from
reference conditions to conditions of interest
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
DABo : float, array_like
Diffusion coefficient at reference conditions
Po, To : float, array_like
Pressure & temperature at reference conditions, respectively
pressure : string
The dictionary key containing the pressure values in Pascals (Pa)
temperature : string
The dictionary key containing the temperature values in Kelvin (K)
"""
Ti = target[temperature]
Pi = target[pressure]
value = DABo*(Ti/To)**1.75*(Po/Pi)
return value | r"""
Uses Fuller model to adjust a diffusion coefficient for gases from
reference conditions to conditions of interest
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
DABo : float, array_like
Diffusion coefficient at reference conditions
Po, To : float, array_like
Pressure & temperature at reference conditions, respectively
pressure : string
The dictionary key containing the pressure values in Pascals (Pa)
temperature : string
The dictionary key containing the temperature values in Kelvin (K) |
def _termination_callback(self, returncode):
"""
Called when the process has stopped.
:param returncode: Process returncode
"""
if self.started:
log.info("QEMU process has stopped, return code: %d", returncode)
yield from self.stop()
# A return code of 1 seem fine on Windows
if returncode != 0 and (returncode != 1 or not sys.platform.startswith("win")):
self.project.emit("log.error", {"message": "QEMU process has stopped, return code: {}\n{}".format(returncode, self.read_stdout())}) | Called when the process has stopped.
:param returncode: Process returncode |
def get_between_ngrams(c, attrib="words", n_min=1, n_max=1, lower=True):
"""Return the ngrams *between* two unary Mentions of a binary-Mention Candidate.
Get the ngrams *between* two unary Mentions of a binary-Mention Candidate,
where both share the same sentence Context.
:param c: The binary-Mention Candidate to evaluate.
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If 'True', all ngrams will be returned in lower case
:rtype: a *generator* of ngrams
"""
if len(c) != 2:
raise ValueError("Only applicable to binary Candidates")
span0 = _to_span(c[0])
span1 = _to_span(c[1])
if span0.sentence != span1.sentence:
raise ValueError(
"Only applicable to Candidates where both spans are \
from the same immediate Context."
)
distance = abs(span0.get_word_start_index() - span1.get_word_start_index())
if span0.get_word_start_index() < span1.get_word_start_index():
for ngram in get_right_ngrams(
span0,
window=distance - 1,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower,
):
yield ngram
else: # span0.get_word_start_index() > span1.get_word_start_index()
for ngram in get_right_ngrams(
span1,
window=distance - 1,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower,
):
yield ngram | Return the ngrams *between* two unary Mentions of a binary-Mention Candidate.
Get the ngrams *between* two unary Mentions of a binary-Mention Candidate,
where both share the same sentence Context.
:param c: The binary-Mention Candidate to evaluate.
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If 'True', all ngrams will be returned in lower case
:rtype: a *generator* of ngrams |
def _generate_comparator(cls, field_names):
"""
Construct a comparator function based on the field names. The comparator
returns the first non-zero comparison value.
Inputs:
field_names (iterable of strings): The field names to sort on.
Returns:
A comparator function.
"""
# Ensure that field names is a list and not a tuple.
field_names = list(field_names)
# For fields that start with a '-', reverse the ordering of the
# comparison.
reverses = [1] * len(field_names)
for i, field_name in enumerate(field_names):
if field_name[0] == '-':
reverses[i] = -1
field_names[i] = field_name[1:]
field_names = [f.replace(LOOKUP_SEP, '.') for f in field_names]
def comparator(i1, i2):
# Get a tuple of values for comparison.
v1 = attrgetter(*field_names)(i1)
v2 = attrgetter(*field_names)(i2)
# If there's only one arg supplied, attrgetter returns a single
# item, directly return the result in this case.
if len(field_names) == 1:
return cls._cmp(v1, v2) * reverses[0]
# Compare each field for the two items, reversing if necessary.
order = multiply_iterables(list(map(cls._cmp, v1, v2)), reverses)
try:
# The first non-zero element.
return next(dropwhile(__not__, order))
except StopIteration:
# Everything was equivalent.
return 0
return comparator | Construct a comparator function based on the field names. The comparator
returns the first non-zero comparison value.
Inputs:
field_names (iterable of strings): The field names to sort on.
Returns:
A comparator function. |
def kline_echarts(self, code=None):
def kline_formater(param):
return param.name + ':' + vars(param)
"""plot the market_data"""
if code is None:
path_name = '.' + os.sep + 'QA_' + self.type + \
'_codepackage_' + self.if_fq + '.html'
kline = Kline(
'CodePackage_' + self.if_fq + '_' + self.type,
width=1360,
height=700,
page_title='QUANTAXIS'
)
bar = Bar()
data_splits = self.splits()
for ds in data_splits:
data = []
axis = []
if ds.type[-3:] == 'day':
datetime = np.array(ds.date.map(str))
else:
datetime = np.array(ds.datetime.map(str))
ohlc = np.array(
ds.data.loc[:,
['open',
'close',
'low',
'high']]
)
kline.add(
ds.code[0],
datetime,
ohlc,
mark_point=["max",
"min"],
is_datazoom_show=True,
datazoom_orient='horizontal'
)
return kline
else:
data = []
axis = []
ds = self.select_code(code)
data = []
#axis = []
if self.type[-3:] == 'day':
datetime = np.array(ds.date.map(str))
else:
datetime = np.array(ds.datetime.map(str))
ohlc = np.array(ds.data.loc[:, ['open', 'close', 'low', 'high']])
vol = np.array(ds.volume)
kline = Kline(
'{}__{}__{}'.format(code,
self.if_fq,
self.type),
width=1360,
height=700,
page_title='QUANTAXIS'
)
bar = Bar()
kline.add(self.code, datetime, ohlc,
mark_point=["max", "min"],
# is_label_show=True,
is_datazoom_show=True,
is_xaxis_show=False,
# is_toolbox_show=True,
tooltip_formatter='{b}:{c}', # kline_formater,
# is_more_utils=True,
datazoom_orient='horizontal')
bar.add(
self.code,
datetime,
vol,
is_datazoom_show=True,
datazoom_xaxis_index=[0,
1]
)
grid = Grid(width=1360, height=700, page_title='QUANTAXIS')
grid.add(bar, grid_top="80%")
grid.add(kline, grid_bottom="30%")
return grid | plot the market_data |
def pprint(self):
"""Print tag key=value pairs."""
strings = []
for key in sorted(self.keys()):
values = self[key]
for value in values:
strings.append("%s=%s" % (key, value))
return "\n".join(strings) | Print tag key=value pairs. |
def post_process(self, tagnum2name):
"""Map the tag name instead of tag number to the tag value.
"""
for tag, value in self.raw_ifd.items():
try:
tag_name = tagnum2name[tag]
except KeyError:
# Ok, we don't recognize this tag. Just use the numeric id.
msg = 'Unrecognized Exif tag ({tag}).'.format(tag=tag)
warnings.warn(msg, UserWarning)
tag_name = tag
self.processed_ifd[tag_name] = value | Map the tag name instead of tag number to the tag value. |
def write_source_description(
self, capability_lists=None, outfile=None, links=None):
"""Write a ResourceSync Description document to outfile or STDOUT."""
rsd = SourceDescription(ln=links)
rsd.pretty_xml = self.pretty_xml
if (capability_lists is not None):
for uri in capability_lists:
rsd.add_capability_list(uri)
if (outfile is None):
print(rsd.as_xml())
else:
rsd.write(basename=outfile) | Write a ResourceSync Description document to outfile or STDOUT. |
def parseFASTACommandLineOptions(args):
"""
Examine parsed command-line options and return a Reads instance.
@param args: An argparse namespace, as returned by the argparse
C{parse_args} function.
@return: A C{Reads} subclass instance, depending on the type of FASTA file
given.
"""
# Set default FASTA type.
if not (args.fasta or args.fastq or args.fasta_ss):
args.fasta = True
readClass = readClassNameToClass[args.readClass]
if args.fasta:
from dark.fasta import FastaReads
return FastaReads(args.fastaFile, readClass=readClass)
elif args.fastq:
from dark.fastq import FastqReads
return FastqReads(args.fastaFile, readClass=readClass)
else:
from dark.fasta_ss import SSFastaReads
return SSFastaReads(args.fastaFile, readClass=readClass) | Examine parsed command-line options and return a Reads instance.
@param args: An argparse namespace, as returned by the argparse
C{parse_args} function.
@return: A C{Reads} subclass instance, depending on the type of FASTA file
given. |
def _set_default_cfg_profile(self):
"""Set default network config profile.
Check whether the default_cfg_profile value exist in the current
version of DCNM. If not, set it to new default value which is supported
by latest version.
"""
try:
cfgplist = self.config_profile_list()
if self.default_cfg_profile not in cfgplist:
self.default_cfg_profile = ('defaultNetworkUniversalEfProfile'
if self._is_iplus else
'defaultNetworkIpv4EfProfile')
except dexc.DfaClientRequestFailed:
LOG.error("Failed to send request to DCNM.")
self.default_cfg_profile = 'defaultNetworkIpv4EfProfile' | Set default network config profile.
Check whether the default_cfg_profile value exist in the current
version of DCNM. If not, set it to new default value which is supported
by latest version. |
def _get_samples_to_process(fn, out_dir, config, force_single, separators):
"""parse csv file with one line per file. It will merge
all files that have the same description name"""
out_dir = os.path.abspath(out_dir)
samples = defaultdict(list)
with open(fn) as handle:
for l in handle:
if l.find("description") > 0:
logger.info("Skipping header.")
continue
cols = l.strip().split(",")
if len(cols) > 0:
if len(cols) < 2:
raise ValueError("Line needs 2 values: file and name.")
if utils.file_exists(cols[0]) or is_gsm(cols[0]) or is_srr(cols[0]):
if cols[0].find(" ") > -1:
new_name = os.path.abspath(cols[0].replace(" ", "_"))
logger.warning("Space finds in %s. Linked to %s." % (cols[0], new_name))
logger.warning("Please, avoid names with spaces in the future.")
utils.symlink_plus(os.path.abspath(cols[0]), new_name)
cols[0] = new_name
samples[cols[1]].append(cols)
else:
logger.info("skipping %s, File doesn't exist." % cols[0])
for sample, items in samples.items():
if is_fastq(items[0][0], True):
fn = "fq_merge"
ext = ".fastq.gz"
elif is_bam(items[0][0]):
fn = "bam_merge"
ext = ".bam"
elif is_gsm(items[0][0]):
fn = "query_gsm"
ext = ".fastq.gz"
elif is_srr(items[0][0]):
fn = "query_srr"
ext = ".fastq.gz"
files = [os.path.abspath(fn_file[0]) if utils.file_exists(fn_file[0]) else fn_file[0] for fn_file in items]
samples[sample] = [{'files': _check_paired(files, force_single, separators),
'out_file': os.path.join(out_dir, sample + ext),
'fn': fn, 'anno': items[0][2:], 'config': config,
'name': sample, 'out_dir': out_dir}]
return [samples[sample] for sample in samples] | parse csv file with one line per file. It will merge
all files that have the same description name |
def force_hashable(obj, recursive=True):
"""Force frozenset() command to freeze the order and contents of mutables and iterables like lists, dicts, generators
Useful for memoization and constructing dicts or hashtables where keys must be immutable.
FIXME: Rename function because "hashable" is misleading.
A better name might be `force_immutable`.
because some hashable objects (generators) are tuplized by this function
`tuplized` is probably a better name, but strings are left alone, so not quite right
>>> force_hashable([1,2.,['3','four'],'five', {'s': 'ix'}])
(1, 2.0, ('3', 'four'), 'five', (('s', 'ix'),))
>>> force_hashable(i for i in range(4))
(0, 1, 2, 3)
>>> sorted(force_hashable(Counter('abbccc'))) == [('a', 1), ('b', 2), ('c', 3)]
True
"""
# if it's already hashable, and isn't a generator (which are also hashable, but also mutable?)
if hasattr(obj, '__hash__') and not hasattr(obj, 'next') and not hasattr(obj, '__next__'):
try:
hash(obj)
return obj
except (IndexError, ValueError, AttributeError, TypeError):
pass
if hasattr(obj, '__iter__'):
# looks like a Mapping if it has .get() and .items(), so should treat it like one
if hasattr(obj, 'get') and hasattr(obj, 'items'):
# FIXME: prevent infinite recursion:
# tuples don't have 'items' method so this will recurse forever
# if elements within new tuple aren't hashable and recurse has not been set!
return force_hashable(tuple(obj.items()))
if recursive:
return tuple(force_hashable(item) for item in obj)
return tuple(obj)
# strings are hashable so this ends the recursion for any object without an __iter__ method (strings do not)
return str(obj) | Force frozenset() command to freeze the order and contents of mutables and iterables like lists, dicts, generators
Useful for memoization and constructing dicts or hashtables where keys must be immutable.
FIXME: Rename function because "hashable" is misleading.
A better name might be `force_immutable`.
because some hashable objects (generators) are tuplized by this function
`tuplized` is probably a better name, but strings are left alone, so not quite right
>>> force_hashable([1,2.,['3','four'],'five', {'s': 'ix'}])
(1, 2.0, ('3', 'four'), 'five', (('s', 'ix'),))
>>> force_hashable(i for i in range(4))
(0, 1, 2, 3)
>>> sorted(force_hashable(Counter('abbccc'))) == [('a', 1), ('b', 2), ('c', 3)]
True |
def _legal_operations(self, model, tabu_list=[], max_indegree=None):
"""Generates a list of legal (= not in tabu_list) graph modifications
for a given model, together with their score changes. Possible graph modifications:
(1) add, (2) remove, or (3) flip a single edge. For details on scoring
see Koller & Fridman, Probabilistic Graphical Models, Section 18.4.3.3 (page 818).
If a number `max_indegree` is provided, only modifications that keep the number
of parents for each node below `max_indegree` are considered."""
local_score = self.scoring_method.local_score
nodes = self.state_names.keys()
potential_new_edges = (set(permutations(nodes, 2)) -
set(model.edges()) -
set([(Y, X) for (X, Y) in model.edges()]))
for (X, Y) in potential_new_edges: # (1) add single edge
if nx.is_directed_acyclic_graph(nx.DiGraph(list(model.edges()) + [(X, Y)])):
operation = ('+', (X, Y))
if operation not in tabu_list:
old_parents = model.get_parents(Y)
new_parents = old_parents + [X]
if max_indegree is None or len(new_parents) <= max_indegree:
score_delta = local_score(Y, new_parents) - local_score(Y, old_parents)
yield(operation, score_delta)
for (X, Y) in model.edges(): # (2) remove single edge
operation = ('-', (X, Y))
if operation not in tabu_list:
old_parents = model.get_parents(Y)
new_parents = old_parents[:]
new_parents.remove(X)
score_delta = local_score(Y, new_parents) - local_score(Y, old_parents)
yield(operation, score_delta)
for (X, Y) in model.edges(): # (3) flip single edge
new_edges = list(model.edges()) + [(Y, X)]
new_edges.remove((X, Y))
if nx.is_directed_acyclic_graph(nx.DiGraph(new_edges)):
operation = ('flip', (X, Y))
if operation not in tabu_list and ('flip', (Y, X)) not in tabu_list:
old_X_parents = model.get_parents(X)
old_Y_parents = model.get_parents(Y)
new_X_parents = old_X_parents + [Y]
new_Y_parents = old_Y_parents[:]
new_Y_parents.remove(X)
if max_indegree is None or len(new_X_parents) <= max_indegree:
score_delta = (local_score(X, new_X_parents) +
local_score(Y, new_Y_parents) -
local_score(X, old_X_parents) -
local_score(Y, old_Y_parents))
yield(operation, score_delta) | Generates a list of legal (= not in tabu_list) graph modifications
for a given model, together with their score changes. Possible graph modifications:
(1) add, (2) remove, or (3) flip a single edge. For details on scoring
see Koller & Fridman, Probabilistic Graphical Models, Section 18.4.3.3 (page 818).
If a number `max_indegree` is provided, only modifications that keep the number
of parents for each node below `max_indegree` are considered. |
def prov(self):
"""
Provenance stored for this document as :py:class:`prov.model.ProvDocument`
"""
if self._prov:
return self._prov
elif not self.abstract:
return self.read_prov()
raise EmptyDocumentException() | Provenance stored for this document as :py:class:`prov.model.ProvDocument` |
def read_math_env(src, expr):
r"""Read the environment from buffer.
Advances the buffer until right after the end of the environment. Adds
parsed content to the expression automatically.
:param Buffer src: a buffer of tokens
:param TexExpr expr: expression for the environment
:rtype: TexExpr
"""
content = src.forward_until(lambda s: s == expr.end)
if not src.startswith(expr.end):
end = src.peek()
explanation = 'Instead got %s' % end if end else 'Reached end of file.'
raise EOFError('Expecting %s. %s' % (expr.end, explanation))
else:
src.forward(1)
expr.append(content)
return expr | r"""Read the environment from buffer.
Advances the buffer until right after the end of the environment. Adds
parsed content to the expression automatically.
:param Buffer src: a buffer of tokens
:param TexExpr expr: expression for the environment
:rtype: TexExpr |
def _change_sample_name(in_file, sample_name, data=None):
"""Fix name in feature counts log file to get the same
name in multiqc report.
"""
out_file = append_stem(in_file, "_fixed")
with file_transaction(data, out_file) as tx_out:
with open(tx_out, "w") as out_handle:
with open(in_file) as in_handle:
for line in in_handle:
if line.startswith("Status"):
line = "Status\t%s.bam" % sample_name
out_handle.write("%s\n" % line.strip())
return out_file | Fix name in feature counts log file to get the same
name in multiqc report. |
def fullname(self):
"""Return the object's fullname.
A fullname is an object's kind mapping like `t3` followed by an
underscore and the object's base36 id, e.g., `t1_c5s96e0`.
"""
by_object = self.reddit_session.config.by_object
return '{0}_{1}'.format(by_object[self.__class__], self.id) | Return the object's fullname.
A fullname is an object's kind mapping like `t3` followed by an
underscore and the object's base36 id, e.g., `t1_c5s96e0`. |
def safe_process_files(path, files, args, state):
"""
Process a number of files in a directory. Catches any exception from the
processing and checks if we should fail directly or keep going.
"""
for fn in files:
full_fn = os.path.join(path, fn)
try:
if not process_file(path, fn, args, state):
return False
except Exception, e:
sys.stderr.write("error: %s\n%s\n" % (os.path.join(path, fn), traceback.format_exc()))
state.log_failed(full_fn)
if state.should_quit():
return False
return True | Process a number of files in a directory. Catches any exception from the
processing and checks if we should fail directly or keep going. |
def add_commands(self):
""" You can override this method in order to add your command line
arguments to the argparse parser. The configuration file was
reloaded at this time."""
self.parser.add_argument(
'-d',
action="count",
**self.config.default.debug.get_arg_parse_arguments()) | You can override this method in order to add your command line
arguments to the argparse parser. The configuration file was
reloaded at this time. |
def flatten(iterable):
'''This function allows a simple a way to iterate over a "complex" iterable, for example,
if the input [12, [23], (4, 3), "lkjasddf"], this will return an Iterable that returns
12, 23, 4, 3 and "lkjasddf".
Args:
iterable (Iterable) - A complex iterable that will be flattened
Returns:
(Iterable): An Iterable that flattens multiple interables'''
return itertools.chain.from_iterable(a if isinstance(a,Iterable) and not isinstance(a, str) else [a] for a in iterable) | This function allows a simple a way to iterate over a "complex" iterable, for example,
if the input [12, [23], (4, 3), "lkjasddf"], this will return an Iterable that returns
12, 23, 4, 3 and "lkjasddf".
Args:
iterable (Iterable) - A complex iterable that will be flattened
Returns:
(Iterable): An Iterable that flattens multiple interables |
def outputs_of(self, partition_index):
"""The outputs of the partition at ``partition_index``.
Note that this returns a tuple of element indices, since coarse-
grained blackboxes may have multiple outputs.
"""
partition = self.partition[partition_index]
outputs = set(partition).intersection(self.output_indices)
return tuple(sorted(outputs)) | The outputs of the partition at ``partition_index``.
Note that this returns a tuple of element indices, since coarse-
grained blackboxes may have multiple outputs. |
def euclideanDistance(instance1, instance2, considerDimensions):
"""
Calculate Euclidean Distance between two samples
Example use:
data1 = [2, 2, 2, 'class_a']
data2 = [4, 4, 4, 'class_b']
distance = euclideanDistance(data1, data2, 3)
:param instance1: list of attributes
:param instance2: list of attributes
:param considerDimensions: a list of dimensions to consider
:return: float euclidean distance between data1 & 2
"""
distance = 0
for x in considerDimensions:
distance += pow((instance1[x] - instance2[x]), 2)
return math.sqrt(distance) | Calculate Euclidean Distance between two samples
Example use:
data1 = [2, 2, 2, 'class_a']
data2 = [4, 4, 4, 'class_b']
distance = euclideanDistance(data1, data2, 3)
:param instance1: list of attributes
:param instance2: list of attributes
:param considerDimensions: a list of dimensions to consider
:return: float euclidean distance between data1 & 2 |
def build_catalog(site, datasets, format=None):
'''Build the DCAT catalog for this site'''
site_url = url_for('site.home_redirect', _external=True)
catalog_url = url_for('site.rdf_catalog', _external=True)
graph = Graph(namespace_manager=namespace_manager)
catalog = graph.resource(URIRef(catalog_url))
catalog.set(RDF.type, DCAT.Catalog)
catalog.set(DCT.title, Literal(site.title))
catalog.set(DCT.language,
Literal(current_app.config['DEFAULT_LANGUAGE']))
catalog.set(FOAF.homepage, URIRef(site_url))
publisher = graph.resource(BNode())
publisher.set(RDF.type, FOAF.Organization)
publisher.set(FOAF.name, Literal(current_app.config['SITE_AUTHOR']))
catalog.set(DCT.publisher, publisher)
for dataset in datasets:
catalog.add(DCAT.dataset, dataset_to_rdf(dataset, graph))
if isinstance(datasets, Paginable):
if not format:
raise ValueError('Pagination requires format')
catalog.add(RDF.type, HYDRA.Collection)
catalog.set(HYDRA.totalItems, Literal(datasets.total))
kwargs = {
'format': format,
'page_size': datasets.page_size,
'_external': True,
}
first_url = url_for('site.rdf_catalog_format', page=1, **kwargs)
page_url = url_for('site.rdf_catalog_format',
page=datasets.page, **kwargs)
last_url = url_for('site.rdf_catalog_format',
page=datasets.pages, **kwargs)
pagination = graph.resource(URIRef(page_url))
pagination.set(RDF.type, HYDRA.PartialCollectionView)
pagination.set(HYDRA.first, URIRef(first_url))
pagination.set(HYDRA.last, URIRef(last_url))
if datasets.has_next:
next_url = url_for('site.rdf_catalog_format',
page=datasets.page + 1, **kwargs)
pagination.set(HYDRA.next, URIRef(next_url))
if datasets.has_prev:
prev_url = url_for('site.rdf_catalog_format',
page=datasets.page - 1, **kwargs)
pagination.set(HYDRA.previous, URIRef(prev_url))
catalog.set(HYDRA.view, pagination)
return catalog | Build the DCAT catalog for this site |
def write_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg) | Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None |
def compare(self, textOrFingerprint1, textOrFingerprint2):
"""Returns the semantic similarity of texts or fingerprints. Each argument can be eiter a text or a fingerprint.
Args:
textOrFingerprint1, str OR list of integers
textOrFingerprint2, str OR list of integers
Returns:
float: the semantic similarity in the range [0;1]
Raises:
CorticalioException: if the request was not successful
"""
compareList = [self._createDictionary(textOrFingerprint1), self._createDictionary(textOrFingerprint2)]
metric = self._fullClient.compare(json.dumps(compareList))
return metric.cosineSimilarity | Returns the semantic similarity of texts or fingerprints. Each argument can be eiter a text or a fingerprint.
Args:
textOrFingerprint1, str OR list of integers
textOrFingerprint2, str OR list of integers
Returns:
float: the semantic similarity in the range [0;1]
Raises:
CorticalioException: if the request was not successful |
def sign(self, signer: Signer):
""" Sign message using signer. """
message_data = self._data_to_sign()
self.signature = signer.sign(data=message_data) | Sign message using signer. |
def difference(self, boolean_switches):
"""
[COMPATIBILITY]
Make a copy of the current instance, and then discard all options that are in boolean_switches.
:param set boolean_switches: A collection of Boolean switches to disable.
:return: A new SimStateOptions instance.
"""
ops = SimStateOptions(self)
for key in boolean_switches:
ops.discard(key)
return ops | [COMPATIBILITY]
Make a copy of the current instance, and then discard all options that are in boolean_switches.
:param set boolean_switches: A collection of Boolean switches to disable.
:return: A new SimStateOptions instance. |
def open_macros(self, filepath):
"""Loads macros from file and marks grid as changed
Parameters
----------
filepath: String
\tPath to macro file
"""
try:
wx.BeginBusyCursor()
self.main_window.grid.Disable()
with open(filepath) as macro_infile:
# Enter safe mode
self.main_window.grid.actions.enter_safe_mode()
post_command_event(self.main_window, self.SafeModeEntryMsg)
# Mark content as changed
post_command_event(self.main_window, self.ContentChangedMsg)
macrocode = macro_infile.read()
self.grid.code_array.macros += "\n" + macrocode.strip("\n")
self.grid.main_window.macro_panel.codetext_ctrl.SetText(
self.grid.code_array.macros)
except IOError:
msg = _("Error opening file {filepath}.").format(filepath=filepath)
post_command_event(self.main_window, self.StatusBarMsg, text=msg)
return False
finally:
self.main_window.grid.Enable()
wx.EndBusyCursor()
# Mark content as changed
try:
post_command_event(self.main_window, self.ContentChangedMsg)
except TypeError:
# The main window does not exist any more
pass | Loads macros from file and marks grid as changed
Parameters
----------
filepath: String
\tPath to macro file |
def fetch_path(self, name):
"""
Fetch contents from the path retrieved via lookup_path.
No caching will be done.
"""
with codecs.open(self.lookup_path(name), encoding='utf-8') as fd:
return fd.read() | Fetch contents from the path retrieved via lookup_path.
No caching will be done. |
def begin(self):
"""Initialize library, must be called once before other functions are
called.
"""
resp = ws.ws2811_init(self._leds)
if resp != 0:
str_resp = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_init failed with code {0} ({1})'.format(resp, str_resp)) | Initialize library, must be called once before other functions are
called. |
def resolver(schema):
"""Default implementation of a schema name resolver function
"""
name = schema.__name__
if name.endswith("Schema"):
return name[:-6] or name
return name | Default implementation of a schema name resolver function |
def add_worksheet(self, name=None):
""" Adds a new worksheet """
url = self.build_url(self._endpoints.get('get_worksheets'))
response = self.session.post(url, data={'name': name} if name else None)
if not response:
return None
data = response.json()
return self.worksheet_constructor(parent=self, **{self._cloud_data_key: data}) | Adds a new worksheet |
def restore(self, remotepath):
''' Usage: restore <remotepath> - \
restore a file from the recycle bin
remotepath - the remote path to restore
'''
rpath = get_pcs_path(remotepath)
# by default, only 1000 items, more than that sounds a bit crazy
pars = {
'method' : 'listrecycle' }
self.pd("Searching for fs_id to restore")
return self.__get(pcsurl + 'file', pars, self.__restore_search_act, rpath) | Usage: restore <remotepath> - \
restore a file from the recycle bin
remotepath - the remote path to restore |
def Default(self, *statements):
"""c-like default of switch statement
"""
assert self.parentStm is None
self.rank += 1
self.default = []
self._register_stements(statements, self.default)
return self | c-like default of switch statement |
def mvn(*args, **kwargs):
"""Convenience function to efficiently construct a MultivariateNormalDiag."""
# Faster than using `tfd.MultivariateNormalDiag`.
return tfd.Independent(tfd.Normal(*args, **kwargs),
reinterpreted_batch_ndims=1) | Convenience function to efficiently construct a MultivariateNormalDiag. |
async def deaths(self, root):
"""Causes of death in the nation, as percentages.
Returns
-------
an :class:`ApiQuery` of dict with keys of str and values of float
"""
return {
elem.get('type'): float(elem.text)
for elem in root.find('DEATHS')
} | Causes of death in the nation, as percentages.
Returns
-------
an :class:`ApiQuery` of dict with keys of str and values of float |
def scan_cnproxy(self):
"""Scan candidate (mainland) proxies from http://cn-proxy.com"""
self.logger.info(
'start scanning http://cn-proxy.com for proxy list...')
response = requests.get('http://cn-proxy.com')
soup = BeautifulSoup(response.content, 'lxml')
tables = soup.find_all('table', class_='sortable')
for table in tables:
for tr in table.tbody.find_all('tr'):
info = tr.find_all('td')
addr = '{}:{}'.format(info[0].string, info[1].string)
self.proxy_queue.put({'addr': addr, 'protocol': 'http'}) | Scan candidate (mainland) proxies from http://cn-proxy.com |
def get_instantiated_service(self, name):
""" Get instantiated service by name """
if name not in self.instantiated_services:
raise UninstantiatedServiceException
return self.instantiated_services[name] | Get instantiated service by name |
def close(self):
"""
Close the connection and all associated cursors. This will implicitly
roll back any uncommitted operations.
"""
for c in self.cursors:
c.close()
self.cursors = []
self.impl = None | Close the connection and all associated cursors. This will implicitly
roll back any uncommitted operations. |
def manual_configure():
"""
Function to manually configure jackal.
"""
print("Manual configuring jackal")
mapping = { '1': 'y', '0': 'n'}
config = Config()
# Host
host = input_with_default("What is the Elasticsearch host?", config.get('jackal', 'host'))
config.set('jackal', 'host', host)
# SSL
if input_with_default("Use SSL?", mapping[config.get('jackal', 'use_ssl')]) == 'y':
config.set('jackal', 'use_ssl', '1')
if input_with_default("Setup custom server cert?", 'y') == 'y':
ca_certs = input_with_default("Server certificate location?", config.get('jackal', 'ca_certs'))
config.set('jackal', 'ca_certs', ca_certs)
else:
config.set('jackal', 'ca_certs', '')
else:
config.set('jackal', 'use_ssl', '0')
if input_with_default("Setup client certificates?", mapping[config.get('jackal', 'client_certs')]) == 'y':
config.set('jackal', 'client_certs', '1')
client_cert = input_with_default("Client cert location?", config.get('jackal', 'client_cert'))
config.set('jackal', 'client_cert', client_cert)
client_key = input_with_default("Client key location?", config.get('jackal', 'client_key'))
config.set('jackal', 'client_key', client_key)
else:
config.set('jackal', 'client_certs', '0')
# Index
index = input_with_default("What index prefix should jackal use?", config.get('jackal', 'index'))
config.set('jackal', 'index', index)
initialize_indices = (input_with_default("Do you want to initialize the indices?", 'y').lower() == 'y')
# Nmap
nmap_dir = input_with_default("What directory do you want to place the nmap results in?", config.get('nmap', 'directory'))
if not os.path.exists(nmap_dir):
os.makedirs(nmap_dir)
config.set('nmap', 'directory', nmap_dir)
nmap_options = input_with_default("What nmap options do you want to set for 'custom' (for example '-p 22,445')?", config.get('nmap', 'options'))
config.set('nmap', 'options', nmap_options)
# Nessus
configure_nessus = (input_with_default("Do you want to setup nessus?", 'n').lower() == 'y')
if configure_nessus:
nessus_host = input_with_default("What is the nessus host?", config.get('nessus', 'host'))
nessus_template = input_with_default("What template should jackal use?", config.get('nessus', 'template_name'))
nessus_access = input_with_default("What api access key should jackal use?", config.get('nessus', 'access_key'))
nessus_secret = input_with_default("What api secret key should jackal use?", config.get('nessus', 'secret_key'))
config.set('nessus', 'host', nessus_host)
config.set('nessus', 'template_name', nessus_template)
config.set('nessus', 'access_key', nessus_access)
config.set('nessus', 'secret_key', nessus_secret)
# Named pipes
configure_pipes = (input_with_default("Do you want to setup named pipes?", 'n').lower() == 'y')
if configure_pipes:
directory = input_with_default("What directory do you want to place the named pipes in?", config.get('pipes', 'directory'))
config.set('pipes', 'directory', directory)
config_file = input_with_default("What is the name of the named pipe config?", config.get('pipes', 'config_file'))
config.set('pipes', 'config_file', config_file)
if not os.path.exists(directory):
create = (input_with_default("Do you want to create the directory?", 'n').lower() == 'y')
if create:
os.makedirs(directory)
if not os.path.exists(os.path.join(config.config_dir, config_file)):
f = open(os.path.join(config.config_dir, config_file), 'a')
f.close()
config.write_config(initialize_indices) | Function to manually configure jackal. |
def on_message(self, *args, accept_query=False, matcher=None, **kwargs):
"""
Convenience wrapper of `Client.on_message` pre-bound with `channel=self.name`.
"""
if accept_query:
def new_matcher(msg: Message):
ret = True
if matcher:
ret = matcher(msg)
if ret is None or ret is False:
return ret
if msg.recipient is not self and not isinstance(msg.sender, User):
return False
return ret
else:
kwargs.setdefault("channel", self.name)
new_matcher = matcher
return self.client.on_message(*args, matcher=new_matcher, **kwargs) | Convenience wrapper of `Client.on_message` pre-bound with `channel=self.name`. |
def t_prepro_ID(self, t):
r'[_a-zA-Z][_a-zA-Z0-9]*' # preprocessor directives
t.type = reserved_directives.get(t.value.lower(), 'ID')
if t.type == 'DEFINE':
t.lexer.begin('define')
elif t.type == 'PRAGMA':
t.lexer.begin('pragma')
return t | r'[_a-zA-Z][_a-zA-Z0-9]* |
def unlink(self):
"""
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
if self._closed:
self._raise_closed()
self._accessor.unlink(self) | Remove this file or link.
If the path is a directory, use rmdir() instead. |
def __create_canvas(self, dimension, pairs, position, **kwargs):
"""!
@brief Create new canvas with user defined parameters to display cluster or chunk of cluster on it.
@param[in] dimension (uint): Data-space dimension.
@param[in] pairs (list): Pair of coordinates that will be displayed on the canvas. If empty than label will not
be displayed on the canvas.
@param[in] position (uint): Index position of canvas on a grid.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'visible_axis' 'visible_labels', 'visible_grid').
<b>Keyword Args:</b><br>
- visible_axis (bool): Defines visibility of axes on each canvas, if True - axes are visible.
By default axis are not displayed.
- visible_labels (bool): Defines visibility of labels on each canvas, if True - labels is displayed.
By default labels are displayed.
- visible_grid (bool): Defines visibility of grid on each canvas, if True - grid is displayed.
By default grid is displayed.
@return (matplotlib.Axis) Canvas to display cluster of chuck of cluster.
"""
visible_grid = kwargs.get('visible_grid', True)
visible_labels = kwargs.get('visible_labels', True)
visible_axis = kwargs.get('visible_axis', False)
ax = self.__figure.add_subplot(self.__grid_spec[position])
if dimension > 1:
if visible_labels:
ax.set_xlabel("x%d" % pairs[position][0])
ax.set_ylabel("x%d" % pairs[position][1])
else:
ax.set_ylim(-0.5, 0.5)
ax.set_yticklabels([])
if visible_grid:
ax.grid(True)
if not visible_axis:
ax.set_yticklabels([])
ax.set_xticklabels([])
return ax | !
@brief Create new canvas with user defined parameters to display cluster or chunk of cluster on it.
@param[in] dimension (uint): Data-space dimension.
@param[in] pairs (list): Pair of coordinates that will be displayed on the canvas. If empty than label will not
be displayed on the canvas.
@param[in] position (uint): Index position of canvas on a grid.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'visible_axis' 'visible_labels', 'visible_grid').
<b>Keyword Args:</b><br>
- visible_axis (bool): Defines visibility of axes on each canvas, if True - axes are visible.
By default axis are not displayed.
- visible_labels (bool): Defines visibility of labels on each canvas, if True - labels is displayed.
By default labels are displayed.
- visible_grid (bool): Defines visibility of grid on each canvas, if True - grid is displayed.
By default grid is displayed.
@return (matplotlib.Axis) Canvas to display cluster of chuck of cluster. |
def _find_bounds_1d(data, x):
"""
Find the index of the lower bound where ``x`` should be inserted
into ``a`` to maintain order.
The index of the upper bound is the index of the lower bound
plus 2. Both bound indices must be within the array.
Parameters
----------
data : 1D `~numpy.ndarray`
The 1D array to search.
x : float
The value to insert.
Returns
-------
index : int
The index of the lower bound.
"""
idx = np.searchsorted(data, x)
if idx == 0:
idx0 = 0
elif idx == len(data): # pragma: no cover
idx0 = idx - 2
else:
idx0 = idx - 1
return idx0 | Find the index of the lower bound where ``x`` should be inserted
into ``a`` to maintain order.
The index of the upper bound is the index of the lower bound
plus 2. Both bound indices must be within the array.
Parameters
----------
data : 1D `~numpy.ndarray`
The 1D array to search.
x : float
The value to insert.
Returns
-------
index : int
The index of the lower bound. |
def _list_dir(self, path):
"""returns absolute paths for all entries in a directory"""
try:
elements = [
os.path.join(path, x) for x in os.listdir(path)
] if os.path.isdir(path) else []
elements.sort()
except OSError:
elements = None
return elements | returns absolute paths for all entries in a directory |
def load_all_methods(self):
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets obj:`all_methods_P` as a
set of methods for which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
'''
methods_P = [IDEAL]
# no point in getting Tmin, Tmax
if all((self.Tc, self.Pc, self.omega)):
methods_P.extend([TSONOPOULOS_EXTENDED, TSONOPOULOS, ABBOTT,
PITZER_CURL])
if self.eos:
methods_P.append(EOS)
if self.CASRN in CRC_virial_data.index:
methods_P.append(CRC_VIRIAL)
self.CRC_VIRIAL_coeffs = _CRC_virial_data_values[CRC_virial_data.index.get_loc(self.CASRN)].tolist()[1:]
if has_CoolProp and self.CASRN in coolprop_dict:
methods_P.append(COOLPROP)
self.CP_f = coolprop_fluids[self.CASRN]
self.all_methods_P = set(methods_P) | r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets obj:`all_methods_P` as a
set of methods for which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters. |
def metric(self, name, count, elapsed):
"""A metric function that writes a single CSV file
:arg str name: name of the metric
:arg int count: number of items
:arg float elapsed: time in seconds
"""
if name is None:
warnings.warn("Ignoring unnamed metric", stacklevel=3)
return
with self.lock:
self.writer.writerow((name, count, "%f"%elapsed)) | A metric function that writes a single CSV file
:arg str name: name of the metric
:arg int count: number of items
:arg float elapsed: time in seconds |
def gen_search_gzh_url(keyword, page=1):
"""拼接搜索 公众号 URL
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
Returns
-------
str
search_gzh_url
"""
assert isinstance(page, int) and page > 0
qs_dict = OrderedDict()
qs_dict['type'] = _search_type_gzh
qs_dict['page'] = page
qs_dict['ie'] = 'utf8'
qs_dict['query'] = keyword
return 'http://weixin.sogou.com/weixin?{}'.format(urlencode(qs_dict)) | 拼接搜索 公众号 URL
Parameters
----------
keyword : str or unicode
搜索文字
page : int, optional
页数 the default is 1
Returns
-------
str
search_gzh_url |
def rollback(self, revision=None, annotations=None):
"""
Performs a rollback of the Deployment.
If the 'revision' parameter is omitted, we fetch the Deployment's system-generated
annotation containing the current revision, and revert to the version immediately
preceding the current version.
:param revision: The revision to rollback to.
:param annotations: Annotations we'd like to update.
:return: self
"""
rollback = DeploymentRollback()
rollback.name = self.name
rollback_config = RollbackConfig()
# to the specified revision
if revision is not None:
rollback_config.revision = revision
# to the revision immediately preceding the current revision
else:
current_revision = int(self.get_annotation(self.REVISION_ANNOTATION))
rev = max(current_revision - 1, 0)
rollback_config.revision = rev
rollback.rollback_to = rollback_config
if annotations is not None:
rollback.updated_annotations = annotations
url = '{base}/{name}/rollback'.format(base=self.base_url, name=self.name)
state = self.request(
method='POST',
url=url,
data=rollback.serialize())
if not state.get('success'):
status = state.get('status', '')
reason = state.get('data', dict()).get('message', None)
message = 'K8sDeployment: ROLLBACK failed : HTTP {0} : {1}'.format(status, reason)
raise BadRequestException(message)
time.sleep(0.2)
self._wait_for_desired_replicas()
self.get()
return self | Performs a rollback of the Deployment.
If the 'revision' parameter is omitted, we fetch the Deployment's system-generated
annotation containing the current revision, and revert to the version immediately
preceding the current version.
:param revision: The revision to rollback to.
:param annotations: Annotations we'd like to update.
:return: self |
def stem(self, word):
"""Return the S-stemmed form of a word.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = SStemmer()
>>> stmr.stem('summaries')
'summary'
>>> stmr.stem('summary')
'summary'
>>> stmr.stem('towers')
'tower'
>>> stmr.stem('reading')
'reading'
>>> stmr.stem('census')
'census'
"""
lowered = word.lower()
if lowered[-3:] == 'ies' and lowered[-4:-3] not in {'e', 'a'}:
return word[:-3] + ('Y' if word[-1:].isupper() else 'y')
if lowered[-2:] == 'es' and lowered[-3:-2] not in {'a', 'e', 'o'}:
return word[:-1]
if lowered[-1:] == 's' and lowered[-2:-1] not in {'u', 's'}:
return word[:-1]
return word | Return the S-stemmed form of a word.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = SStemmer()
>>> stmr.stem('summaries')
'summary'
>>> stmr.stem('summary')
'summary'
>>> stmr.stem('towers')
'tower'
>>> stmr.stem('reading')
'reading'
>>> stmr.stem('census')
'census' |
def vecs_to_datmesh(x, y):
"""
Converts input arguments x and y to a 2d meshgrid,
suitable for calling Means, Covariances and Realizations.
"""
x, y = meshgrid(x, y)
out = zeros(x.shape + (2,), dtype=float)
out[:, :, 0] = x
out[:, :, 1] = y
return out | Converts input arguments x and y to a 2d meshgrid,
suitable for calling Means, Covariances and Realizations. |
def handle_GET(self):
"""
Overwrite this method to handle a GET request. The default
action is to respond with "error 404 (not found)".
"""
self.send_response(404)
self.end_headers()
self.wfile.write('not found'.encode('utf8')) | Overwrite this method to handle a GET request. The default
action is to respond with "error 404 (not found)". |
def commit(self, snapshot: Tuple[Hash32, UUID]) -> None:
"""
Commit the journal to the point where the snapshot was taken. This
will merge in any changesets that were recorded *after* the snapshot changeset.
"""
_, account_snapshot = snapshot
self._account_db.commit(account_snapshot) | Commit the journal to the point where the snapshot was taken. This
will merge in any changesets that were recorded *after* the snapshot changeset. |
def get_authentication_statement(self, subject, ticket):
"""
Build an AuthenticationStatement XML block for a SAML 1.1
Assertion.
"""
authentication_statement = etree.Element('AuthenticationStatement')
authentication_statement.set('AuthenticationInstant',
self.instant(instant=ticket.consumed))
authentication_statement.set('AuthenticationMethod',
self.authn_method_password)
authentication_statement.append(subject)
return authentication_statement | Build an AuthenticationStatement XML block for a SAML 1.1
Assertion. |
def _on_state(self, state, client):
"""
Launch forward prediction for the new state given by some client.
"""
def cb(outputs):
try:
distrib, value = outputs.result()
except CancelledError:
logger.info("Client {} cancelled.".format(client.ident))
return
assert np.all(np.isfinite(distrib)), distrib
action = np.random.choice(len(distrib), p=distrib)
client.memory.append(TransitionExperience(
state, action, reward=None, value=value, prob=distrib[action]))
self.send_queue.put([client.ident, dumps(action)])
self.async_predictor.put_task([state], cb) | Launch forward prediction for the new state given by some client. |
def setup(cli):
"""Everything to make skypipe ready to use"""
if not cli.global_config.loaded:
setup_dotcloud_account(cli)
discover_satellite(cli)
cli.success("Skypipe is ready for action") | Everything to make skypipe ready to use |
def update_playlist_song(self, playlist_id, song_id, op):
"""从播放列表删除或者增加一首歌曲
如果歌曲不存在与歌单中,删除时返回 True;如果歌曲已经存在于
歌单,添加时也返回 True。
"""
action = 'mtop.alimusic.music.list.collectservice.{}songs'.format(
'delete' if op == 'del' else 'add')
payload = {
'listId': playlist_id,
'songIds': [song_id]
}
code, msg, rv = self.request(action, payload)
return rv['data']['data']['success'] == 'true' | 从播放列表删除或者增加一首歌曲
如果歌曲不存在与歌单中,删除时返回 True;如果歌曲已经存在于
歌单,添加时也返回 True。 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.