code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def get_connection(self, host, port):
'''Open a socket connection to a given host and port and writes the Hadoop header
The Hadoop RPC protocol looks like this when creating a connection:
+---------------------------------------------------------------------+
| Header, 4 bytes ("hrpc") |
+---------------------------------------------------------------------+
| Version, 1 byte (default verion 9) |
+---------------------------------------------------------------------+
| RPC service class, 1 byte (0x00) |
+---------------------------------------------------------------------+
| Auth protocol, 1 byte (Auth method None = 0) |
+---------------------------------------------------------------------+
| Length of the RpcRequestHeaderProto + length of the |
| of the IpcConnectionContextProto (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Serialized delimited RpcRequestHeaderProto |
+---------------------------------------------------------------------+
| Serialized delimited IpcConnectionContextProto |
+---------------------------------------------------------------------+
'''
log.debug("############## CONNECTING ##############")
# Open socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sock.settimeout(self.sock_connect_timeout / 1000)
# Connect socket to server - defined by host and port arguments
self.sock.connect((host, port))
self.sock.settimeout(self.sock_request_timeout / 1000)
# Send RPC headers
self.write(self.RPC_HEADER) # header
self.write(struct.pack('B', self.version)) # version
self.write(struct.pack('B', self.RPC_SERVICE_CLASS)) # RPC service class
if self.use_sasl:
self.write(struct.pack('B', self.AUTH_PROTOCOL_SASL)) # serialization type (protobuf = 0xDF)
else:
self.write(struct.pack('B', self.AUTH_PROTOCOL_NONE)) # serialization type (protobuf = 0)
if self.use_sasl:
sasl = SaslRpcClient(self, hdfs_namenode_principal=self.hdfs_namenode_principal)
sasl_connected = sasl.connect()
if not sasl_connected:
raise TransientException("SASL is configured, but cannot get connected")
rpc_header = self.create_rpc_request_header()
context = self.create_connection_context()
header_length = len(rpc_header) + encoder._VarintSize(len(rpc_header)) +len(context) + encoder._VarintSize(len(context))
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Header length: %s (%s)" % (header_length, format_bytes(struct.pack('!I', header_length))))
self.write(struct.pack('!I', header_length))
self.write_delimited(rpc_header)
self.write_delimited(context) | Open a socket connection to a given host and port and writes the Hadoop header
The Hadoop RPC protocol looks like this when creating a connection:
+---------------------------------------------------------------------+
| Header, 4 bytes ("hrpc") |
+---------------------------------------------------------------------+
| Version, 1 byte (default verion 9) |
+---------------------------------------------------------------------+
| RPC service class, 1 byte (0x00) |
+---------------------------------------------------------------------+
| Auth protocol, 1 byte (Auth method None = 0) |
+---------------------------------------------------------------------+
| Length of the RpcRequestHeaderProto + length of the |
| of the IpcConnectionContextProto (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Serialized delimited RpcRequestHeaderProto |
+---------------------------------------------------------------------+
| Serialized delimited IpcConnectionContextProto |
+---------------------------------------------------------------------+ |
def get_name_DID_info(self, name):
"""
Get a name's DID info
Returns None if not found
"""
db = get_db_state(self.working_dir)
did_info = db.get_name_DID_info(name)
if did_info is None:
return {'error': 'No such name', 'http_status': 404}
return did_info | Get a name's DID info
Returns None if not found |
def ColorWithLightness(self, lightness):
'''Create a new instance based on this one with a new lightness value.
Parameters:
:lightness:
The lightness of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25)
(0.5, 0.25, 0.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25).hsl
(30, 1, 0.25)
'''
h, s, l = self.__hsl
return Color((h, s, lightness), 'hsl', self.__a, self.__wref) | Create a new instance based on this one with a new lightness value.
Parameters:
:lightness:
The lightness of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25)
(0.5, 0.25, 0.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25).hsl
(30, 1, 0.25) |
def _cast_float(temp_dt):
"""returns utc timestamp"""
if type(temp_dt) == str:
fmt = '%Y-%m-%dT%H:%M:00'
base_dt = temp_dt[0:19]
tz_offset = eval(temp_dt[19:22])
temp_dt = datetime.datetime.strptime(base_dt, fmt) - \
datetime.timedelta(hours=tz_offset)
return (temp_dt - datetime.datetime(1970, 1, 1)).total_seconds() | returns utc timestamp |
def unique_str(self):
""" A string that (ideally) uniquely represents this GC object. This
helps with naming files for caching. 'Unique' is defined as 'If
GC1 != GC2, then GC1.unique_str() != GC2.unique_str()'; conversely,
'If GC1 == GC2, then GC1.unique_str() == GC2.unique_str()'.
The string should be filename-safe (no \/:*?"<>|).
..note::Because of length/readability restrictions, this fxn ignores
wkt.
Example output:
"-180.000_0.250_0.000_90.000_0.000_-0.251_512_612_2013-05-21_12_32_52.945000"
"""
unique_str = "_".join(["%.3f" % f for f in self.geotransform] +
["%d" % d for d in self.x_size, self.y_size]
)
if self.date is not None:
unique_str += '_' + str(self.date)
if self.time is not None:
unique_str += '_' + str(self.time)
return unique_str.replace(':', '_') | A string that (ideally) uniquely represents this GC object. This
helps with naming files for caching. 'Unique' is defined as 'If
GC1 != GC2, then GC1.unique_str() != GC2.unique_str()'; conversely,
'If GC1 == GC2, then GC1.unique_str() == GC2.unique_str()'.
The string should be filename-safe (no \/:*?"<>|).
..note::Because of length/readability restrictions, this fxn ignores
wkt.
Example output:
"-180.000_0.250_0.000_90.000_0.000_-0.251_512_612_2013-05-21_12_32_52.945000" |
def idf(self, term, transform=None):
r"""Calculate the Inverse Document Frequency of a term in the corpus.
Parameters
----------
term : str
The term to calculate the IDF of
transform : function
A function to apply to each document term before checking for the
presence of term
Returns
-------
float
The IDF
Examples
--------
>>> tqbf = 'The quick brown fox jumped over the lazy dog.\n\n'
>>> tqbf += 'And then it slept.\n\n And the dog ran off.'
>>> corp = Corpus(tqbf)
>>> print(corp.docs())
[[['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy',
'dog.']],
[['And', 'then', 'it', 'slept.']],
[['And', 'the', 'dog', 'ran', 'off.']]]
>>> round(corp.idf('dog'), 10)
0.4771212547
>>> round(corp.idf('the'), 10)
0.1760912591
"""
docs_with_term = 0
docs = self.docs_of_words()
for doc in docs:
doc_set = set(doc)
if transform:
transformed_doc = []
for word in doc_set:
transformed_doc.append(transform(word))
doc_set = set(transformed_doc)
if term in doc_set:
docs_with_term += 1
if docs_with_term == 0:
return float('inf')
return log10(len(docs) / docs_with_term) | r"""Calculate the Inverse Document Frequency of a term in the corpus.
Parameters
----------
term : str
The term to calculate the IDF of
transform : function
A function to apply to each document term before checking for the
presence of term
Returns
-------
float
The IDF
Examples
--------
>>> tqbf = 'The quick brown fox jumped over the lazy dog.\n\n'
>>> tqbf += 'And then it slept.\n\n And the dog ran off.'
>>> corp = Corpus(tqbf)
>>> print(corp.docs())
[[['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy',
'dog.']],
[['And', 'then', 'it', 'slept.']],
[['And', 'the', 'dog', 'ran', 'off.']]]
>>> round(corp.idf('dog'), 10)
0.4771212547
>>> round(corp.idf('the'), 10)
0.1760912591 |
def ev_to_s(offset_us, source_to_detector_m, array):
# delay values is normal 2.99 us with NONE actual MCP delay settings
"""convert energy (eV) to time (us)
Parameters:
===========
array: array (in eV)
offset_us: float. Delay of detector in us
source_to_detector_m: float. Distance source to detector in m
Returns:
========
time: array in s
"""
# 1000 is used to convert eV to meV
time_s = np.sqrt(81.787 / (array * 1000.)) * source_to_detector_m / 3956.
time_record_s = time_s - offset_us * 1e-6
return time_record_s | convert energy (eV) to time (us)
Parameters:
===========
array: array (in eV)
offset_us: float. Delay of detector in us
source_to_detector_m: float. Distance source to detector in m
Returns:
========
time: array in s |
def step1(self, username, password):
"""First authentication step."""
self._check_initialized()
context = AtvSRPContext(
str(username), str(password),
prime=constants.PRIME_2048,
generator=constants.PRIME_2048_GEN)
self.session = SRPClientSession(
context, binascii.hexlify(self._auth_private).decode()) | First authentication step. |
def mass_tot(self, rho0, Rs):
"""
total mass within the profile
:param rho0:
:param a:
:param s:
:return:
"""
m_tot = 2*np.pi*rho0*Rs**3
return m_tot | total mass within the profile
:param rho0:
:param a:
:param s:
:return: |
def list_commands(self, ctx):
"""List commands from folder."""
rv = []
files = [_ for _ in next(os.walk(self.folder))[2] if not _.startswith("_") and _.endswith(".py")]
for filename in files:
rv.append(filename[:-3])
rv.sort()
return rv | List commands from folder. |
def query_string(cls,
query,
default_field=None,
default_operator=None,
analyzer=None,
allow_leading_wildcard=None,
lowercase_expanded_terms=None,
enable_position_increments=None,
fuzzy_prefix_length=None,
fuzzy_min_sim=None,
phrase_slop=None,
boost=None,
analyze_wildcard=None,
auto_generate_phrase_queries=None,
minimum_should_match=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/query-string-query.html
A query that uses a query parser in order to parse its content.
> query = ElasticQuery().query_string('this AND that OR thus', default_field='content')
'''
instance = cls(query_string={'query': query})
if default_field is not None:
instance['query_string']['default_field'] = default_field
if default_operator is not None:
instance['query_string']['default_operator'] = default_operator
if analyzer is not None:
instance['query_string']['analyzer'] = analyzer
if allow_leading_wildcard is not None:
instance['query_string']['allow_leading_wildcard'] = allow_leading_wildcard
if lowercase_expanded_terms is not None:
instance['query_string']['lowercase_expanded_terms'] = lowercase_expanded_terms
if enable_position_increments is not None:
instance['query_string']['enable_position_increments'] = enable_position_increments
if fuzzy_prefix_length is not None:
instance['query_string']['fuzzy_prefix_length'] = fuzzy_prefix_length
if fuzzy_min_sim is not None:
instance['query_string']['fuzzy_min_sim'] = fuzzy_min_sim
if phrase_slop is not None:
instance['query_string']['phrase_slop'] = phrase_slop
if boost is not None:
instance['query_string']['boost'] = boost
if analyze_wildcard is not None:
instance['query_string']['analyze_wildcard'] = analyze_wildcard
if auto_generate_phrase_queries is not None:
instance['query_string']['auto_generate_phrase_queries'] = auto_generate_phrase_queries
if minimum_should_match is not None:
instance['query_string']['minimum_should_match'] = minimum_should_match
return instance | http://www.elasticsearch.org/guide/reference/query-dsl/query-string-query.html
A query that uses a query parser in order to parse its content.
> query = ElasticQuery().query_string('this AND that OR thus', default_field='content') |
def get(self, fragment_info):
"""
Get the value associated with the given key.
:param fragment_info: the text key
:type fragment_info: tuple of str ``(language, text)``
:raises: KeyError if the key is not present in the cache
"""
if not self.is_cached(fragment_info):
raise KeyError(u"Attempt to get text not cached")
return self.cache[fragment_info] | Get the value associated with the given key.
:param fragment_info: the text key
:type fragment_info: tuple of str ``(language, text)``
:raises: KeyError if the key is not present in the cache |
def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None):
"""
Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to
`dist_dir`, to be submitted to spark. Same for `extra_data`.
Parameters
----------
tasks_pkg (str): Path to the python package containing tasks
dist_dir (str): Path to the directory where the packaged code should be stored
custom_main (str): Path to a custom 'main' python file.
extra_data (List[str]): List containing paths to files/directories that should also be packaged
and submitted to spark
"""
logging.info('Packaging application')
# Package tasks
tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg))
shutil.make_archive(os.path.join(dist_dir, 'tasks'),
'zip',
tasks_dir_splits[0],
tasks_dir_splits[1])
# Package main.py
if custom_main is None:
from . import _main
main_path = _main.__file__
if main_path[-3:] == 'pyc':
main_path = main_path[:-1]
shutil.copy(os.path.realpath(main_path),
os.path.join(dist_dir, 'main.py'))
else:
shutil.copy(os.path.realpath(custom_main),
os.path.join(dist_dir, 'main.py'))
# Package _framework
shutil.make_archive(os.path.join(dist_dir, '_framework'),
'zip',
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'),
'./sparklanes/')
# Package extra data
if extra_data:
for dat in extra_data:
real_path = os.path.realpath(dat)
target = os.path.join(dist_dir, os.path.split(real_path)[1])
if os.path.isfile(real_path):
shutil.copy(real_path, target)
elif os.path.isdir(real_path):
shutil.copytree(real_path, target)
else:
raise IOError('File `%s` not found at `%s`.' % (dat, real_path)) | Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to
`dist_dir`, to be submitted to spark. Same for `extra_data`.
Parameters
----------
tasks_pkg (str): Path to the python package containing tasks
dist_dir (str): Path to the directory where the packaged code should be stored
custom_main (str): Path to a custom 'main' python file.
extra_data (List[str]): List containing paths to files/directories that should also be packaged
and submitted to spark |
def parse_spec(spec, relative_to=None, subproject_roots=None):
"""Parses a target address spec and returns the path from the root of the repo to this Target
and Target name.
:API: public
:param string spec: Target address spec.
:param string relative_to: path to use for sibling specs, ie: ':another_in_same_build_family',
interprets the missing spec_path part as `relative_to`.
:param list subproject_roots: Paths that correspond with embedded build roots under
the current build root.
For Example::
some_target(name='mytarget',
dependencies=['path/to/buildfile:targetname']
)
Where ``path/to/buildfile:targetname`` is the dependent target address spec
In case the target name is empty it returns the last component of the path as target name, ie::
spec_path, target_name = parse_spec('path/to/buildfile/foo')
Will return spec_path as 'path/to/buildfile/foo' and target_name as 'foo'.
Optionally, specs can be prefixed with '//' to denote an absolute spec path. This is normally
not significant except when a spec referring to a root level target is needed from deeper in
the tree. For example, in ``path/to/buildfile/BUILD``::
some_target(name='mytarget',
dependencies=[':targetname']
)
The ``targetname`` spec refers to a target defined in ``path/to/buildfile/BUILD*``. If instead
you want to reference ``targetname`` in a root level BUILD file, use the absolute form.
For example::
some_target(name='mytarget',
dependencies=['//:targetname']
)
"""
def normalize_absolute_refs(ref):
return strip_prefix(ref, '//')
subproject = longest_dir_prefix(relative_to, subproject_roots) if subproject_roots else None
def prefix_subproject(spec_path):
if not subproject:
return spec_path
elif spec_path:
return os.path.join(subproject, spec_path)
else:
return os.path.normpath(subproject)
spec_parts = spec.rsplit(':', 1)
if len(spec_parts) == 1:
default_target_spec = spec_parts[0]
spec_path = prefix_subproject(normalize_absolute_refs(default_target_spec))
target_name = os.path.basename(spec_path)
else:
spec_path, target_name = spec_parts
if not spec_path and relative_to:
spec_path = fast_relpath(relative_to, subproject) if subproject else relative_to
spec_path = prefix_subproject(normalize_absolute_refs(spec_path))
return spec_path, target_name | Parses a target address spec and returns the path from the root of the repo to this Target
and Target name.
:API: public
:param string spec: Target address spec.
:param string relative_to: path to use for sibling specs, ie: ':another_in_same_build_family',
interprets the missing spec_path part as `relative_to`.
:param list subproject_roots: Paths that correspond with embedded build roots under
the current build root.
For Example::
some_target(name='mytarget',
dependencies=['path/to/buildfile:targetname']
)
Where ``path/to/buildfile:targetname`` is the dependent target address spec
In case the target name is empty it returns the last component of the path as target name, ie::
spec_path, target_name = parse_spec('path/to/buildfile/foo')
Will return spec_path as 'path/to/buildfile/foo' and target_name as 'foo'.
Optionally, specs can be prefixed with '//' to denote an absolute spec path. This is normally
not significant except when a spec referring to a root level target is needed from deeper in
the tree. For example, in ``path/to/buildfile/BUILD``::
some_target(name='mytarget',
dependencies=[':targetname']
)
The ``targetname`` spec refers to a target defined in ``path/to/buildfile/BUILD*``. If instead
you want to reference ``targetname`` in a root level BUILD file, use the absolute form.
For example::
some_target(name='mytarget',
dependencies=['//:targetname']
) |
def validate_hier_intervals(intervals_hier):
'''Validate a hierarchical segment annotation.
Parameters
----------
intervals_hier : ordered list of segmentations
Raises
------
ValueError
If any segmentation does not span the full duration of the top-level
segmentation.
If any segmentation does not start at 0.
'''
# Synthesize a label array for the top layer.
label_top = util.generate_labels(intervals_hier[0])
boundaries = set(util.intervals_to_boundaries(intervals_hier[0]))
for level, intervals in enumerate(intervals_hier[1:], 1):
# Make sure this level is consistent with the root
label_current = util.generate_labels(intervals)
validate_structure(intervals_hier[0], label_top,
intervals, label_current)
# Make sure all previous boundaries are accounted for
new_bounds = set(util.intervals_to_boundaries(intervals))
if boundaries - new_bounds:
warnings.warn('Segment hierarchy is inconsistent '
'at level {:d}'.format(level))
boundaries |= new_bounds | Validate a hierarchical segment annotation.
Parameters
----------
intervals_hier : ordered list of segmentations
Raises
------
ValueError
If any segmentation does not span the full duration of the top-level
segmentation.
If any segmentation does not start at 0. |
def _parse_caption(self, node, state):
"""Parse a Caption of the node.
:param node: The lxml node to parse
:param state: The global state necessary to place the node in context
of the document as a whole.
"""
if node.tag not in ["caption", "figcaption"]: # captions used in Tables
return state
# Add a Caption
parent = state["parent"][node]
stable_id = (
f"{state['document'].name}"
f"::"
f"{'caption'}"
f":"
f"{state['caption']['idx']}"
)
# Set name for Section
name = node.attrib["name"] if "name" in node.attrib else None
if isinstance(parent, Table):
state["context"][node] = Caption(
document=state["document"],
table=parent,
figure=None,
stable_id=stable_id,
name=name,
position=state["caption"]["idx"],
)
elif isinstance(parent, Figure):
state["context"][node] = Caption(
document=state["document"],
table=None,
figure=parent,
stable_id=stable_id,
name=name,
position=state["caption"]["idx"],
)
else:
raise NotImplementedError("Caption must be a child of Table or Figure.")
state["caption"]["idx"] += 1
return state | Parse a Caption of the node.
:param node: The lxml node to parse
:param state: The global state necessary to place the node in context
of the document as a whole. |
def guest_inspect_stats(self, userid_list):
"""Get the statistics including cpu and mem of the guests
:param userid_list: a single userid string or a list of guest userids
:returns: dictionary describing the cpu statistics of the vm
in the form {'UID1':
{
'guest_cpus': xx,
'used_cpu_time_us': xx,
'elapsed_cpu_time_us': xx,
'min_cpu_count': xx,
'max_cpu_limit': xx,
'samples_cpu_in_use': xx,
'samples_cpu_delay': xx,
'used_mem_kb': xx,
'max_mem_kb': xx,
'min_mem_kb': xx,
'shared_mem_kb': xx
},
'UID2':
{
'guest_cpus': xx,
'used_cpu_time_us': xx,
'elapsed_cpu_time_us': xx,
'min_cpu_count': xx,
'max_cpu_limit': xx,
'samples_cpu_in_use': xx,
'samples_cpu_delay': xx,
'used_mem_kb': xx,
'max_mem_kb': xx,
'min_mem_kb': xx,
'shared_mem_kb': xx
}
}
for the guests that are shutdown or not exist, no data
returned in the dictionary
"""
if not isinstance(userid_list, list):
userid_list = [userid_list]
action = "get the statistics of guest '%s'" % str(userid_list)
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._monitor.inspect_stats(userid_list) | Get the statistics including cpu and mem of the guests
:param userid_list: a single userid string or a list of guest userids
:returns: dictionary describing the cpu statistics of the vm
in the form {'UID1':
{
'guest_cpus': xx,
'used_cpu_time_us': xx,
'elapsed_cpu_time_us': xx,
'min_cpu_count': xx,
'max_cpu_limit': xx,
'samples_cpu_in_use': xx,
'samples_cpu_delay': xx,
'used_mem_kb': xx,
'max_mem_kb': xx,
'min_mem_kb': xx,
'shared_mem_kb': xx
},
'UID2':
{
'guest_cpus': xx,
'used_cpu_time_us': xx,
'elapsed_cpu_time_us': xx,
'min_cpu_count': xx,
'max_cpu_limit': xx,
'samples_cpu_in_use': xx,
'samples_cpu_delay': xx,
'used_mem_kb': xx,
'max_mem_kb': xx,
'min_mem_kb': xx,
'shared_mem_kb': xx
}
}
for the guests that are shutdown or not exist, no data
returned in the dictionary |
def read (self, size = -1): # File-like object.
"""This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. """
if size == 0:
return self._empty_buffer
if size < 0:
self.expect (self.delimiter) # delimiter default is EOF
return self.before
# I could have done this more directly by not using expect(), but
# I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistant behavior.
# It's a little less efficient, but there is less for me to
# worry about if I have to later modify read() or expect().
# Note, it's OK if size==-1 in the regex. That just means it
# will never match anything in which case we stop only on EOF.
if self._buffer_type is bytes:
pat = (u'.{%d}' % size).encode('ascii')
else:
pat = u'.{%d}' % size
cre = re.compile(pat, re.DOTALL)
index = self.expect ([cre, self.delimiter]) # delimiter default is EOF
if index == 0:
return self.after ### self.before should be ''. Should I assert this?
return self.before | This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. |
async def event_wait(event: asyncio.Event, timeout=None):
'''
Wait on an an asyncio event with an optional timeout
Returns:
true if the event got set, None if timed out
'''
if timeout is None:
await event.wait()
return True
try:
await asyncio.wait_for(event.wait(), timeout)
except asyncio.TimeoutError:
return False
return True | Wait on an an asyncio event with an optional timeout
Returns:
true if the event got set, None if timed out |
def parse_peddy_csv(self, f, pattern):
""" Parse csv output from peddy """
parsed_data = dict()
headers = None
s_name_idx = None
for l in f['f'].splitlines():
s = l.split(",")
if headers is None:
headers = s
try:
s_name_idx = [headers.index("sample_id")]
except ValueError:
try:
s_name_idx = [headers.index("sample_a"), headers.index("sample_b")]
except ValueError:
log.warn("Could not find sample name in Peddy output: {}".format(f['fn']))
return None
else:
s_name = '-'.join([s[idx] for idx in s_name_idx])
parsed_data[s_name] = dict()
for i, v in enumerate(s):
if i not in s_name_idx:
if headers[i] == "error" and pattern == "sex_check":
v = "True" if v == "False" else "False"
try:
# add the pattern as a suffix to key
parsed_data[s_name][headers[i] + "_" + pattern] = float(v)
except ValueError:
# add the pattern as a suffix to key
parsed_data[s_name][headers[i] + "_" + pattern] = v
if len(parsed_data) == 0:
return None
return parsed_data | Parse csv output from peddy |
def __generate(self):
"""Generates dates patterns"""
base = []
texted = []
for pat in ALL_PATTERNS:
data = pat.copy()
data['pattern'] = data['pattern']
data['right'] = True
data['basekey'] = data['key']
base.append(data)
data = pat.copy()
data['basekey'] = data['key']
data['key'] += ':time_1'
data['right'] = True
data['pattern'] = data['pattern'] + Optional(Literal(",")).suppress() + BASE_TIME_PATTERNS['pat:time:minutes']
data['time_format'] = '%H:%M'
data['length'] = {'min' : data['length']['min'] + 5, 'max' : data['length']['max'] + 8}
base.append(data)
data = pat.copy()
data['basekey'] = data['key']
data['right'] = True
data['key'] += ':time_2'
data['pattern'] = data['pattern'] + Optional(oneOf([',', '|'])).suppress() + BASE_TIME_PATTERNS['pat:time:full']
data['time_format'] = '%H:%M:%S'
data['length'] = {'min' : data['length']['min'] + 9, 'max' : data['length']['max'] + 9}
base.append(data)
for pat in base:
# Right
data = pat.copy()
data['key'] += ':t_right'
data['pattern'] = lineStart + data['pattern'] + Optional(oneOf([',', '|', ':', ')'])).suppress() + restOfLine.suppress()
data['length'] = {'min' : data['length']['min'] + 1, 'max' : data['length']['max'] + 90}
texted.append(data)
base.extend(texted)
self.patterns = base | Generates dates patterns |
def fmtval(value, colorstr=None, precision=None, spacing=True, trunc=True,
end=' '):
''' Formats and returns a given number according to specifications. '''
colwidth = opts.colwidth
# get precision
if precision is None:
precision = opts.precision
fmt = '%%.%sf' % precision
# format with decimal mark, separators
result = locale.format(fmt, value, True)
if spacing:
result = '%%%ss' % colwidth % result
if trunc:
if len(result) > colwidth: # truncate w/ellipsis
result = truncstr(result, colwidth)
# Add color if needed
if opts.incolor and colorstr:
return colorstr % result + end
else:
return result + end | Formats and returns a given number according to specifications. |
def _combine_ranges_on_length(self, data_len, first, second):
'''
Combines a first range with a second range, where the second
range is considered within the scope of the first.
'''
first = get_true_slice(first, data_len)
second = get_true_slice(second, data_len)
final_start, final_step, final_stop = (None, None, None)
# Get our start
if first.start == None and second.start == None:
final_start = None
else:
final_start = (first.start if first.start else 0)+(second.start if second.start else 0)
# Get our stop
if second.stop == None:
final_stop = first.stop
elif first.stop == None:
final_stop = (first.start if first.start else 0) + second.stop
else:
final_stop = min(first.stop, (first.start if first.start else 0) + second.stop)
# Get our step
if first.step == None and second.step == None:
final_step = None
else:
final_step = (first.step if first.step else 1)*(second.step if second.step else 1)
# If we have a start above our stop, set them to be equal
if final_start > final_stop:
final_start = final_stop
return slice(final_start, final_stop, final_step) | Combines a first range with a second range, where the second
range is considered within the scope of the first. |
def handle(self, message):
"""
Dispatches messages to appropriate handler based on opcode
Args:
message (dict): Full message from Discord websocket connection
"""
opcode = message['op']
if opcode == 10:
self.on_hello(message)
elif opcode == 11:
self.on_heartbeat(message)
elif opcode == 0:
self.on_message(message)
else:
logger.debug("Not a message we handle: OPCODE {}".format(opcode))
return | Dispatches messages to appropriate handler based on opcode
Args:
message (dict): Full message from Discord websocket connection |
def usable_id(cls, id):
""" Retrieve id from input which can be num or id."""
try:
qry_id = int(id)
except Exception:
qry_id = None
if not qry_id:
msg = 'unknown identifier %s' % id
cls.error(msg)
return qry_id | Retrieve id from input which can be num or id. |
def fit_tomography_data(tomo_data, method='wizard', options=None):
"""
Reconstruct a density matrix or process-matrix from tomography data.
If the input data is state_tomography_data the returned operator will
be a density matrix. If the input data is process_tomography_data the
returned operator will be a Choi-matrix in the column-vectorization
convention.
Args:
tomo_data (dict): process tomography measurement data.
method (str): the fitting method to use.
Available methods:
- 'wizard' (default)
- 'leastsq'
options (dict or None): additional options for fitting method.
Returns:
numpy.array: The fitted operator.
Available methods:
- 'wizard' (Default): The returned operator will be constrained to be
positive-semidefinite.
Options:
- 'trace': the trace of the returned operator.
The default value is 1.
- 'beta': hedging parameter for computing frequencies from
zero-count data. The default value is 0.50922.
- 'epsilon: threshold for truncating small eigenvalues to zero.
The default value is 0
- 'leastsq': Fitting without positive-semidefinite constraint.
Options:
- 'trace': Same as for 'wizard' method.
- 'beta': Same as for 'wizard' method.
Raises:
Exception: if the `method` parameter is not valid.
"""
if isinstance(method, str) and method.lower() in ['wizard', 'leastsq']:
# get options
trace = __get_option('trace', options)
beta = __get_option('beta', options)
# fit state
rho = __leastsq_fit(tomo_data, trace=trace, beta=beta)
if method == 'wizard':
# Use wizard method to constrain positivity
epsilon = __get_option('epsilon', options)
rho = __wizard(rho, epsilon=epsilon)
return rho
else:
raise Exception('Invalid reconstruction method "%s"' % method) | Reconstruct a density matrix or process-matrix from tomography data.
If the input data is state_tomography_data the returned operator will
be a density matrix. If the input data is process_tomography_data the
returned operator will be a Choi-matrix in the column-vectorization
convention.
Args:
tomo_data (dict): process tomography measurement data.
method (str): the fitting method to use.
Available methods:
- 'wizard' (default)
- 'leastsq'
options (dict or None): additional options for fitting method.
Returns:
numpy.array: The fitted operator.
Available methods:
- 'wizard' (Default): The returned operator will be constrained to be
positive-semidefinite.
Options:
- 'trace': the trace of the returned operator.
The default value is 1.
- 'beta': hedging parameter for computing frequencies from
zero-count data. The default value is 0.50922.
- 'epsilon: threshold for truncating small eigenvalues to zero.
The default value is 0
- 'leastsq': Fitting without positive-semidefinite constraint.
Options:
- 'trace': Same as for 'wizard' method.
- 'beta': Same as for 'wizard' method.
Raises:
Exception: if the `method` parameter is not valid. |
def respond_via_request(self, task):
"""
Handle response after 55 second.
:param task:
:return:
"""
warn(f"Detected slow response into webhook. "
f"(Greater than {RESPONSE_TIMEOUT} seconds)\n"
f"Recommended to use 'async_task' decorator from Dispatcher for handler with long timeouts.",
TimeoutWarning)
dispatcher = self.get_dispatcher()
loop = dispatcher.loop
try:
results = task.result()
except Exception as e:
loop.create_task(
dispatcher.errors_handlers.notify(dispatcher, types.Update.get_current(), e))
else:
response = self.get_response(results)
if response is not None:
asyncio.ensure_future(response.execute_response(dispatcher.bot), loop=loop) | Handle response after 55 second.
:param task:
:return: |
def to_python(self,value):
"""
Validates that the value is in self.choices and can be coerced to the right type.
"""
if value==self.emptyValue or value in EMPTY_VALUES:
return self.emptyValue
try:
value=self.coerce(value)
except(ValueError,TypeError,ValidationError):
raise ValidationError(self.error_messages['invalid_choice']%{'value':value})
return value | Validates that the value is in self.choices and can be coerced to the right type. |
def default_exchange_proposed_fn(prob_exchange):
"""Default exchange proposal function, for replica exchange MC.
With probability `prob_exchange` propose combinations of replica for exchange.
When exchanging, create combinations of adjacent replicas in
[Replica Exchange Monte Carlo](
https://en.wikipedia.org/wiki/Parallel_tempering)
```
exchange_fn = default_exchange_proposed_fn(prob_exchange=0.5)
exchange_proposed = exchange_fn(num_replica=3)
exchange_proposed.eval()
==> [[0, 1]] # 1 exchange, 0 <--> 1
exchange_proposed.eval()
==> [] # 0 exchanges
```
Args:
prob_exchange: Scalar `Tensor` giving probability that any exchanges will
be generated.
Returns:
default_exchange_proposed_fn_: Python callable which take a number of
replicas (a Python integer), and return combinations of replicas for
exchange as an [n, 2] integer `Tensor`, `0 <= n <= num_replica // 2`,
with *unique* values in the set `{0, ..., num_replica}`.
"""
def default_exchange_proposed_fn_(num_replica, seed=None):
"""Default function for `exchange_proposed_fn` of `kernel`."""
seed_stream = distributions.SeedStream(seed, 'default_exchange_proposed_fn')
zero_start = tf.random.uniform([], seed=seed_stream()) > 0.5
if num_replica % 2 == 0:
def _exchange():
flat_exchange = tf.range(num_replica)
if num_replica > 2:
start = tf.cast(~zero_start, dtype=tf.int32)
end = num_replica - start
flat_exchange = flat_exchange[start:end]
return tf.reshape(flat_exchange, [tf.size(input=flat_exchange) // 2, 2])
else:
def _exchange():
start = tf.cast(zero_start, dtype=tf.int32)
end = num_replica - tf.cast(~zero_start, dtype=tf.int32)
flat_exchange = tf.range(num_replica)[start:end]
return tf.reshape(flat_exchange, [tf.size(input=flat_exchange) // 2, 2])
def _null_exchange():
return tf.reshape(tf.cast([], dtype=tf.int32), shape=[0, 2])
return tf.cond(
pred=tf.random.uniform([], seed=seed_stream()) < prob_exchange,
true_fn=_exchange,
false_fn=_null_exchange)
return default_exchange_proposed_fn_ | Default exchange proposal function, for replica exchange MC.
With probability `prob_exchange` propose combinations of replica for exchange.
When exchanging, create combinations of adjacent replicas in
[Replica Exchange Monte Carlo](
https://en.wikipedia.org/wiki/Parallel_tempering)
```
exchange_fn = default_exchange_proposed_fn(prob_exchange=0.5)
exchange_proposed = exchange_fn(num_replica=3)
exchange_proposed.eval()
==> [[0, 1]] # 1 exchange, 0 <--> 1
exchange_proposed.eval()
==> [] # 0 exchanges
```
Args:
prob_exchange: Scalar `Tensor` giving probability that any exchanges will
be generated.
Returns:
default_exchange_proposed_fn_: Python callable which take a number of
replicas (a Python integer), and return combinations of replicas for
exchange as an [n, 2] integer `Tensor`, `0 <= n <= num_replica // 2`,
with *unique* values in the set `{0, ..., num_replica}`. |
def get_relationship_admin_session_for_family(self, family_id):
"""Gets the ``OsidSession`` associated with the relationship administration service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family``
return: (osid.relationship.RelationshipAdminSession) - a
``RelationshipAdminSession``
raise: NotFound - no family found by the given ``Id``
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_relationship_admin()`` and
``supports_visible_federation()`` are ``true``*
"""
if not self.supports_relationship_admin():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.RelationshipAdminSession(family_id, runtime=self._runtime) | Gets the ``OsidSession`` associated with the relationship administration service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family``
return: (osid.relationship.RelationshipAdminSession) - a
``RelationshipAdminSession``
raise: NotFound - no family found by the given ``Id``
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_relationship_admin()`` and
``supports_visible_federation()`` are ``true``* |
def topk(timestep: int,
batch_size: int,
beam_size: int,
inactive: mx.nd.NDArray,
scores: mx.nd.NDArray,
hypotheses: List[ConstrainedHypothesis],
best_ids: mx.nd.NDArray,
best_word_ids: mx.nd.NDArray,
seq_scores: mx.nd.NDArray) -> Tuple[np.array, np.array, np.array, List[ConstrainedHypothesis], mx.nd.NDArray]:
"""
Builds a new topk list such that the beam contains hypotheses having completed different numbers of constraints.
These items are built from three different types: (1) the best items across the whole
scores matrix, (2) the set of words that must follow existing constraints, and (3) k-best items from each row.
:param timestep: The current decoder timestep.
:param batch_size: The number of segments in the batch.
:param beam_size: The length of the beam for each segment.
:param inactive: Array listing inactive rows (shape: (beam_size,)).
:param scores: The scores array (shape: (batch_size if t==1 else beam_size, target_vocab_size)).
:param hypotheses: The list of hypothesis objects.
:param best_ids: The current list of best hypotheses (shape: (beam_size,)).
:param best_word_ids: The parallel list of best word IDs (shape: (beam_size,)).
:param seq_scores: (shape: (beam_size, 1)).
:return: A tuple containing the best hypothesis rows, the best hypothesis words, the scores,
the updated constrained hypotheses, and the updated set of inactive hypotheses.
"""
for sentno in range(batch_size):
rows = slice(sentno * beam_size, sentno * beam_size + beam_size)
if hypotheses[rows.start] is not None and hypotheses[rows.start].size() > 0:
best_ids[rows], best_word_ids[rows], seq_scores[rows], \
hypotheses[rows], inactive[rows] = _sequential_topk(timestep,
beam_size,
inactive[rows],
scores[rows],
hypotheses[rows],
best_ids[rows] - rows.start,
best_word_ids[rows],
seq_scores[rows])
# offsetting since the returned smallest_k() indices were slice-relative
best_ids[rows] += rows.start
else:
# If there are no constraints for this sentence in the batch, everything stays
# the same, except we need to mark all hypotheses as active
inactive[rows] = 0
return best_ids, best_word_ids, seq_scores, hypotheses, inactive | Builds a new topk list such that the beam contains hypotheses having completed different numbers of constraints.
These items are built from three different types: (1) the best items across the whole
scores matrix, (2) the set of words that must follow existing constraints, and (3) k-best items from each row.
:param timestep: The current decoder timestep.
:param batch_size: The number of segments in the batch.
:param beam_size: The length of the beam for each segment.
:param inactive: Array listing inactive rows (shape: (beam_size,)).
:param scores: The scores array (shape: (batch_size if t==1 else beam_size, target_vocab_size)).
:param hypotheses: The list of hypothesis objects.
:param best_ids: The current list of best hypotheses (shape: (beam_size,)).
:param best_word_ids: The parallel list of best word IDs (shape: (beam_size,)).
:param seq_scores: (shape: (beam_size, 1)).
:return: A tuple containing the best hypothesis rows, the best hypothesis words, the scores,
the updated constrained hypotheses, and the updated set of inactive hypotheses. |
def expool(name):
"""
Confirm the existence of a kernel variable in the kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/expool_c.html
:param name: Name of the variable whose value is to be returned.
:type name: str
:return: True when the variable is in the pool.
:rtype: bool
"""
name = stypes.stringToCharP(name)
found = ctypes.c_int()
libspice.expool_c(name, ctypes.byref(found))
return bool(found.value) | Confirm the existence of a kernel variable in the kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/expool_c.html
:param name: Name of the variable whose value is to be returned.
:type name: str
:return: True when the variable is in the pool.
:rtype: bool |
def parse_date(self, value):
"""A lazy method to parse anything to date.
If input data type is:
- string: parse date from it
- integer: use from ordinal
- datetime: use date part
- date: just return it
"""
if value is None:
raise Exception("Unable to parse date from %r" % value)
elif isinstance(value, string_types):
return self.str2date(value)
elif isinstance(value, int):
return date.fromordinal(value)
elif isinstance(value, datetime):
return value.date()
elif isinstance(value, date):
return value
else:
raise Exception("Unable to parse date from %r" % value) | A lazy method to parse anything to date.
If input data type is:
- string: parse date from it
- integer: use from ordinal
- datetime: use date part
- date: just return it |
def _update_armed_status(self, message=None, status=None, status_stay=None):
"""
Uses the provided message to update the armed state.
:param message: message to use to update
:type message: :py:class:`~alarmdecoder.messages.Message`
:param status: armed status, overrides message bits
:type status: bool
:param status_stay: armed stay status, overrides message bits
:type status_stay: bool
:returns: bool indicating the new status
"""
arm_status = status
stay_status = status_stay
if isinstance(message, Message):
arm_status = message.armed_away
stay_status = message.armed_home
if arm_status is None or stay_status is None:
return
self._armed_status, old_status = arm_status, self._armed_status
self._armed_stay, old_stay = stay_status, self._armed_stay
if arm_status != old_status or stay_status != old_stay:
if old_status is not None or message is None:
if self._armed_status or self._armed_stay:
self.on_arm(stay=stay_status)
else:
self.on_disarm()
return self._armed_status or self._armed_stay | Uses the provided message to update the armed state.
:param message: message to use to update
:type message: :py:class:`~alarmdecoder.messages.Message`
:param status: armed status, overrides message bits
:type status: bool
:param status_stay: armed stay status, overrides message bits
:type status_stay: bool
:returns: bool indicating the new status |
def play(quiet, session_file, shell, speed, prompt, commentecho):
"""Play a session file."""
run(
session_file.readlines(),
shell=shell,
speed=speed,
quiet=quiet,
test_mode=TESTING,
prompt_template=prompt,
commentecho=commentecho,
) | Play a session file. |
def get_log(db, job_id):
"""
Extract the logs as a big string
:param db: a :class:`openquake.server.dbapi.Db` instance
:param job_id: a job ID
"""
logs = db('SELECT * FROM log WHERE job_id=?x ORDER BY id', job_id)
out = []
for log in logs:
time = str(log.timestamp)[:-4] # strip decimals
out.append('[%s #%d %s] %s' % (time, job_id, log.level, log.message))
return out | Extract the logs as a big string
:param db: a :class:`openquake.server.dbapi.Db` instance
:param job_id: a job ID |
def list(self, request, *args, **kwargs):
"""
Each customer is associated with a group of users that represent customer owners. The link is maintained
through **api/customer-permissions/** endpoint.
To list all visible links, run a **GET** query against a list.
Response will contain a list of customer owners and their brief data.
To add a new user to the customer, **POST** a new relationship to **customer-permissions** endpoint:
.. code-block:: http
POST /api/customer-permissions/ HTTP/1.1
Accept: application/json
Authorization: Token 95a688962bf68678fd4c8cec4d138ddd9493c93b
Host: example.com
{
"customer": "http://example.com/api/customers/6c9b01c251c24174a6691a1f894fae31/",
"role": "owner",
"user": "http://example.com/api/users/82cec6c8e0484e0ab1429412fe4194b7/"
}
"""
return super(CustomerPermissionViewSet, self).list(request, *args, **kwargs) | Each customer is associated with a group of users that represent customer owners. The link is maintained
through **api/customer-permissions/** endpoint.
To list all visible links, run a **GET** query against a list.
Response will contain a list of customer owners and their brief data.
To add a new user to the customer, **POST** a new relationship to **customer-permissions** endpoint:
.. code-block:: http
POST /api/customer-permissions/ HTTP/1.1
Accept: application/json
Authorization: Token 95a688962bf68678fd4c8cec4d138ddd9493c93b
Host: example.com
{
"customer": "http://example.com/api/customers/6c9b01c251c24174a6691a1f894fae31/",
"role": "owner",
"user": "http://example.com/api/users/82cec6c8e0484e0ab1429412fe4194b7/"
} |
def authorization_middleware(get_response):
""" Django middleware to parse incoming access tokens, validate them and
set an authorization function on the request.
The decision to use a generic middleware rather than an
AuthenticationMiddleware is explicitly made, because inctances of the
latter come with a number of assumptions (i.e. that user.is_authorized()
exists, or that request.user uses the User model).
Example usage:
::
request.is_authorized_for()
:param get_response: callable that creates the response object
:return: response
:todo:
Two things needs to be done when we can completely remove the Django
JWT plugin:
- Nested function 'middleware' allows both 'JWT' (not IANA-registered)
and 'Bearer' as Authorization header prefix; JWT should not be
accepted.
- The Django JWT middleware does not include the authz claim, so this
plugin does not fail if it is not present; this behavior is wrong
when we no longer use the Django JWT plugin.
"""
middleware_settings = settings()
logger = _create_logger(middleware_settings)
min_scope = middleware_settings['MIN_SCOPE']
def get_token_subject(sub):
return sub
def always_ok(*args, **kwargs):
return True
def authorize_function(scopes, token_signature, x_unique_id=None):
""" Creates a partial around :func:`levels.is_authorized`
that wraps the current user's scopes.
:return func:
"""
log_msg_scopes = 'Granted access (needed: {}, granted: {}, token: {})'
def is_authorized(*needed_scopes):
granted_scopes = set(scopes)
needed_scopes = set(needed_scopes)
result = needed_scopes.issubset(granted_scopes)
if result:
msg = log_msg_scopes.format(needed_scopes, granted_scopes, token_signature)
if x_unique_id:
msg += ' X-Unique-ID: {}'.format(x_unique_id)
logger.info(msg)
return result
return is_authorized
def authorize_forced_anonymous(_):
""" Authorize function for routes that are forced anonymous"""
raise Exception(
'Should not call is_authorized_for in anonymous routes')
def insufficient_scope():
"""Returns an HttpResponse object with a 401."""
msg = 'Bearer realm="datapunt", error="insufficient_scope"'
response = http.HttpResponse('Unauthorized', status=401)
response['WWW-Authenticate'] = msg
return response
def expired_token():
""" Returns an HttpResponse object with a 401
"""
msg = 'Bearer realm="datapunt", error="expired_token"'
response = http.HttpResponse('Unauthorized', status=401)
response['WWW-Authenticate'] = msg
return response
def invalid_token():
""" Returns an HttpResponse object with a 401
"""
msg = 'Bearer realm="datapunt", error="invalid_token"'
response = http.HttpResponse('Unauthorized', status=401)
response['WWW-Authenticate'] = msg
return response
def invalid_request():
""" Returns an HttpResponse object with a 400
"""
msg = (
"Bearer realm=\"datapunt\", error=\"invalid_request\", "
"error_description=\"Invalid Authorization header format; "
"should be: 'Bearer [token]'\"")
response = http.HttpResponse('Bad Request', status=400)
response['WWW-Authenticate'] = msg
return response
def token_data(authorization):
""" Get the token data present in the given authorization header.
"""
try:
prefix, token = authorization.split()
except ValueError:
logger.warning(
'Invalid Authorization header: {}'.format(authorization))
raise _AuthorizationHeaderError(invalid_request())
if prefix.lower() != 'bearer':
logger.warning(
'Invalid Authorization header: {}'.format(authorization))
raise _AuthorizationHeaderError(invalid_request())
try:
header = jwt.get_unverified_header(token)
except jwt.ExpiredSignatureError:
logger.info("Expired token")
raise _AuthorizationHeaderError(expired_token())
except (jwt.InvalidTokenError, jwt.DecodeError):
logger.exception("API authz problem: JWT decode error while reading header")
raise _AuthorizationHeaderError(invalid_token())
if 'kid' not in header:
logger.exception("Did not get a valid key identifier")
raise _AuthorizationHeaderError(invalid_token())
keys = middleware_settings['JWKS'].verifiers
if header['kid'] not in keys:
logger.exception("Unknown key identifier: {}".format(header['kid']))
raise _AuthorizationHeaderError(invalid_token())
key = keys[header['kid']]
try:
decoded = jwt.decode(token, key=key.key, algorithms=(key.alg,))
except jwt.InvalidTokenError:
logger.exception('API authz problem: could not decode access '
'token {}'.format(token))
raise _AuthorizationHeaderError(invalid_token())
if 'scopes' not in decoded:
logger.warning('API authz problem: access token misses '
'authz and scopes claim: {}'.format(token))
raise _AuthorizationHeaderError(invalid_token())
else:
scopes = decoded['scopes']
if 'sub' in decoded:
sub = decoded['sub']
else:
sub = None
token_signature = token.split('.')[2]
return scopes, token_signature, sub
def middleware(request):
""" Parses the Authorization header, decodes and validates the JWT and
adds the is_authorized_for function to the request.
"""
request_path = request.path
forced_anonymous = any(
request_path.startswith(route)
for route in middleware_settings['FORCED_ANONYMOUS_ROUTES'])
if middleware_settings['ALWAYS_OK']:
logger.warning('API authz DISABLED')
request.is_authorized_for = always_ok
request.get_token_subject = 'ALWAYS_OK'
return get_response(request)
is_options = request.method == 'OPTIONS'
if forced_anonymous or is_options:
authz_func = authorize_forced_anonymous
subject = None
else:
authorization = request.META.get('HTTP_AUTHORIZATION')
token_signature = ''
sub = None
if authorization:
try:
scopes, token_signature, sub = token_data(authorization)
except _AuthorizationHeaderError as e:
return e.response
else:
scopes = []
x_unique_id = request.META.get('HTTP_X_UNIQUE_ID')
authz_func = authorize_function(scopes, token_signature, x_unique_id)
subject = get_token_subject(sub)
if len(min_scope) > 0 and not authz_func(min_scope):
return insufficient_scope()
request.is_authorized_for = authz_func
request.get_token_subject = subject
response = get_response(request)
return response
return middleware | Django middleware to parse incoming access tokens, validate them and
set an authorization function on the request.
The decision to use a generic middleware rather than an
AuthenticationMiddleware is explicitly made, because inctances of the
latter come with a number of assumptions (i.e. that user.is_authorized()
exists, or that request.user uses the User model).
Example usage:
::
request.is_authorized_for()
:param get_response: callable that creates the response object
:return: response
:todo:
Two things needs to be done when we can completely remove the Django
JWT plugin:
- Nested function 'middleware' allows both 'JWT' (not IANA-registered)
and 'Bearer' as Authorization header prefix; JWT should not be
accepted.
- The Django JWT middleware does not include the authz claim, so this
plugin does not fail if it is not present; this behavior is wrong
when we no longer use the Django JWT plugin. |
def _parse(jsonOutput):
'''
Parses JSON response from Tika REST API server
:param jsonOutput: JSON output from Tika Server
:return: a dictionary having 'metadata' and 'content' values
'''
parsed={}
if not jsonOutput:
return parsed
parsed["status"] = jsonOutput[0]
if jsonOutput[1] == None or jsonOutput[1] == "":
return parsed
realJson = json.loads(jsonOutput[1])
content = ""
for js in realJson:
if "X-TIKA:content" in js:
content += js["X-TIKA:content"]
if content == "":
content = None
parsed["content"] = content
parsed["metadata"] = {}
for js in realJson:
for n in js:
if n != "X-TIKA:content":
if n in parsed["metadata"]:
if not isinstance(parsed["metadata"][n], list):
parsed["metadata"][n] = [parsed["metadata"][n]]
parsed["metadata"][n].append(js[n])
else:
parsed["metadata"][n] = js[n]
return parsed | Parses JSON response from Tika REST API server
:param jsonOutput: JSON output from Tika Server
:return: a dictionary having 'metadata' and 'content' values |
def _set_lsp_reoptimize_timer(self, v, load=False):
"""
Setter method for lsp_reoptimize_timer, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_reoptimize_timer (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_reoptimize_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_reoptimize_timer() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'300..65535']}), is_leaf=True, yang_name="lsp-reoptimize-timer", rest_name="reoptimize-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Reoptimization timer', u'cli-full-no': None, u'alt-name': u'reoptimize-timer'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_reoptimize_timer must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'300..65535']}), is_leaf=True, yang_name="lsp-reoptimize-timer", rest_name="reoptimize-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Reoptimization timer', u'cli-full-no': None, u'alt-name': u'reoptimize-timer'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__lsp_reoptimize_timer = t
if hasattr(self, '_set'):
self._set() | Setter method for lsp_reoptimize_timer, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_reoptimize_timer (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_reoptimize_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_reoptimize_timer() directly. |
def is_Linear(self):
"""Returns True if expression is linear (a polynomial with degree 1 or 0) (read-only)."""
coeff_dict = self.expression.as_coefficients_dict()
for key in coeff_dict.keys():
if len(key.free_symbols) < 2 and (key.is_Add or key.is_Mul or key.is_Atom):
pass
else:
return False
if key.is_Pow and key.args[1] != 1:
return False
else:
return True | Returns True if expression is linear (a polynomial with degree 1 or 0) (read-only). |
def send(message, request_context=None, binary=False):
"""Sends a message to websocket.
:param str message: data to send
:param request_context:
:raises IOError: If unable to send a message.
"""
if binary:
return uwsgi.websocket_send_binary(message, request_context)
return uwsgi.websocket_send(message, request_context) | Sends a message to websocket.
:param str message: data to send
:param request_context:
:raises IOError: If unable to send a message. |
def interact_GxG(pheno,snps1,snps2=None,K=None,covs=None):
"""
Epistasis test between two sets of SNPs
Args:
pheno: [N x 1] SP.array of 1 phenotype for N individuals
snps1: [N x S1] SP.array of S1 SNPs for N individuals
snps2: [N x S2] SP.array of S2 SNPs for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
Returns:
pv: [S2 x S1] SP.array of P values for epistasis tests beten all SNPs in
snps1 and snps2
"""
if K is None:
K=SP.eye(N)
N=snps1.shape[0]
if snps2 is None:
snps2 = snps1
return interact_GxE(snps=snps1,pheno=pheno,env=snps2,covs=covs,K=K) | Epistasis test between two sets of SNPs
Args:
pheno: [N x 1] SP.array of 1 phenotype for N individuals
snps1: [N x S1] SP.array of S1 SNPs for N individuals
snps2: [N x S2] SP.array of S2 SNPs for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
Returns:
pv: [S2 x S1] SP.array of P values for epistasis tests beten all SNPs in
snps1 and snps2 |
def is_identity(self):
"""If `self` is I, returns True, otherwise False."""
if not self.terms:
return True
return len(self.terms) == 1 and not self.terms[0].ops and self.terms[0].coeff == 1.0 | If `self` is I, returns True, otherwise False. |
def send_data(self):
"""Send data packets from the local file to the server"""
if not self.connection._sock:
raise err.InterfaceError("(0, '')")
conn = self.connection
try:
with open(self.filename, 'rb') as open_file:
packet_size = min(conn.max_allowed_packet, 16*1024) # 16KB is efficient enough
while True:
chunk = open_file.read(packet_size)
if not chunk:
break
conn.write_packet(chunk)
except IOError:
raise err.OperationalError(1017, "Can't find file '{0}'".format(self.filename))
finally:
# send the empty packet to signify we are done sending data
conn.write_packet(b'') | Send data packets from the local file to the server |
def doc_files(self):
"""Returns list of doc files that should be used for %doc in specfile.
Returns:
List of doc files from the archive - only basenames, not full
paths.
"""
doc_files = []
for doc_file_re in settings.DOC_FILES_RE:
doc_files.extend(
self.archive.get_files_re(doc_file_re, ignorecase=True))
return ['/'.join(x.split('/')[1:]) for x in doc_files] | Returns list of doc files that should be used for %doc in specfile.
Returns:
List of doc files from the archive - only basenames, not full
paths. |
def build_dir():
'''
Build the directory used for templates.
'''
tag_arr = ['add', 'edit', 'view', 'list', 'infolist']
path_arr = [os.path.join(CRUD_PATH, x) for x in tag_arr]
for wpath in path_arr:
if os.path.exists(wpath):
continue
os.makedirs(wpath) | Build the directory used for templates. |
def _convert_scalar_indexer(self, key, kind=None):
"""
We don't allow integer or float indexing on datetime-like when using
loc.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ['loc'] and (is_int or is_flt):
self._invalid_indexer('index', key)
elif kind in ['ix', 'getitem'] and is_flt:
self._invalid_indexer('index', key)
return super()._convert_scalar_indexer(key, kind=kind) | We don't allow integer or float indexing on datetime-like when using
loc.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None |
def _replaces(self):
"""tge"""
return {concat(a, c, b[1:])
for a, b in self.slices[:-1]
for c in ALPHABET} | tge |
def _parse_row(self, i):
"""Parses row
:param i: index of row to parse
"""
row = self.data[i]
for j in range(len(row)):
self.data[i][j] = self._parse_value(self.data[i][j]) | Parses row
:param i: index of row to parse |
def add_project(self, path):
"""
Adds a project.
:param path: Project path.
:type path: unicode
:return: Method success.
:rtype: bool
"""
if not foundations.common.path_exists(path):
return False
path = os.path.normpath(path)
if self.__model.get_project_nodes(path):
self.__engine.notifications_manager.warnify(
"{0} | '{1}' project is already opened!".format(self.__class__.__name__, path))
return False
LOGGER.info("{0} | Adding '{1}' project!".format(self.__class__.__name__, path))
project_node = self.__model.register_project(path)
if not project_node:
return False
self.__model.set_project_nodes(project_node)
return True | Adds a project.
:param path: Project path.
:type path: unicode
:return: Method success.
:rtype: bool |
def check_for_lime(self, pattern):
"""
Check to see if LiME has loaded on the remote system
:type pattern: str
:param pattern: pattern to check output against
:type listen_port: int
:param listen_port: port LiME is listening for connections on
"""
check = self.commands.lime_check.value
lime_loaded = False
result = self.shell.execute(check)
stdout = self.shell.decode(result['stdout'])
connections = self.net_parser.parse(stdout)
for conn in connections:
local_addr, remote_addr = conn
if local_addr == pattern:
lime_loaded = True
break
return lime_loaded | Check to see if LiME has loaded on the remote system
:type pattern: str
:param pattern: pattern to check output against
:type listen_port: int
:param listen_port: port LiME is listening for connections on |
def pick_peaks(nc, L=16):
"""Obtain peaks from a novelty curve using an adaptive threshold."""
offset = nc.mean() / 20.
nc = filters.gaussian_filter1d(nc, sigma=4) # Smooth out nc
th = filters.median_filter(nc, size=L) + offset
#th = filters.gaussian_filter(nc, sigma=L/2., mode="nearest") + offset
peaks = []
for i in range(1, nc.shape[0] - 1):
# is it a peak?
if nc[i - 1] < nc[i] and nc[i] > nc[i + 1]:
# is it above the threshold?
if nc[i] > th[i]:
peaks.append(i)
#plt.plot(nc)
#plt.plot(th)
#for peak in peaks:
#plt.axvline(peak)
#plt.show()
return peaks | Obtain peaks from a novelty curve using an adaptive threshold. |
def blank_dc(self, n_coarse_chan):
""" Blank DC bins in coarse channels.
Note: currently only works if entire file is read
"""
if n_coarse_chan < 1:
logger.warning('Coarse channel number < 1, unable to blank DC bin.')
return None
if not n_coarse_chan % int(n_coarse_chan) == 0:
logger.warning('Selection does not contain an interger number of coarse channels, unable to blank DC bin.')
return None
n_coarse_chan = int(n_coarse_chan)
n_chan = self.data.shape[-1]
n_chan_per_coarse = int(n_chan / n_coarse_chan)
mid_chan = int(n_chan_per_coarse / 2)
for ii in range(n_coarse_chan):
ss = ii*n_chan_per_coarse
self.data[..., ss+mid_chan] = np.median(self.data[..., ss+mid_chan+5:ss+mid_chan+10]) | Blank DC bins in coarse channels.
Note: currently only works if entire file is read |
def bam_needs_processing(data):
"""Check if a work input needs processing for parallelization.
"""
return ((data.get("work_bam") or data.get("align_bam")) and
(any(tz.get_in(["config", "algorithm", x], data) for x in
["variantcaller", "mark_duplicates", "recalibrate", "realign", "svcaller",
"jointcaller", "variant_regions"])
or any(k in data for k in ["cwl_keys", "output_cwl_keys"]))) | Check if a work input needs processing for parallelization. |
def timing(name, delta, rate=1, tags=None):
"""Sends new timing information. `delta` is in milliseconds.
>>> import statsdecor
>>> statsdecor.timing('my.metric', 314159265359)
"""
return client().timing(name, delta, rate=rate, tags=tags) | Sends new timing information. `delta` is in milliseconds.
>>> import statsdecor
>>> statsdecor.timing('my.metric', 314159265359) |
def get_subset(self, subset):
"""
Retrieve a subset of items
Accepts a single argument: a list of item IDs
"""
if len(subset) > 50:
raise ze.TooManyItems("You may only retrieve 50 items per call")
# remember any url parameters that have been set
params = self.url_params
retr = []
for itm in subset:
retr.extend(self.item(itm))
self.url_params = params
# clean up URL params when we're finished
self.url_params = None
return retr | Retrieve a subset of items
Accepts a single argument: a list of item IDs |
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, msg, _= SubAccPush.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_sub_acc_push(ret_code, msg)
return ret_code, msg | receive response callback function |
def _keplerian_to_keplerian_mean(cls, coord, center):
"""Conversion from Keplerian to Keplerian Mean
The difference is the use of Mean anomaly instead of True anomaly
"""
a, e, i, Ω, ω, ν = coord
if e < 1:
# Elliptic case
cos_E = (e + cos(ν)) / (1 + e * cos(ν))
sin_E = (sin(ν) * sqrt(1 - e ** 2)) / (1 + e * cos(ν))
E = arctan2(sin_E, cos_E) % (2 * np.pi)
M = E - e * sin(E) # Mean anomaly
else:
# Hyperbolic case
H = arccosh((e + cos(ν)) / (1 + e * cos(ν)))
M = e * sinh(H) - H
return np.array([a, e, i, Ω, ω, M], dtype=float) | Conversion from Keplerian to Keplerian Mean
The difference is the use of Mean anomaly instead of True anomaly |
def stmt_star_handler(
self,
stmts,
prev_node_to_avoid=None
):
"""Handle stmt* expressions in an AST node.
Links all statements together in a list of statements, accounting for statements with multiple last nodes.
"""
break_nodes = list()
cfg_statements = list()
self.prev_nodes_to_avoid.append(prev_node_to_avoid)
self.last_control_flow_nodes.append(None)
first_node = None
node_not_to_step_past = self.nodes[-1]
for stmt in stmts:
node = self.visit(stmt)
if isinstance(node, ControlFlowNode) and not isinstance(node.test, TryNode):
self.last_control_flow_nodes.append(node.test)
else:
self.last_control_flow_nodes.append(None)
if isinstance(node, ControlFlowNode):
break_nodes.extend(node.break_statements)
elif isinstance(node, BreakNode):
break_nodes.append(node)
if not isinstance(node, IgnoredNode):
cfg_statements.append(node)
if not first_node:
if isinstance(node, ControlFlowNode):
first_node = node.test
else:
first_node = get_first_node(
node,
node_not_to_step_past
)
self.prev_nodes_to_avoid.pop()
self.last_control_flow_nodes.pop()
connect_nodes(cfg_statements)
if cfg_statements:
if first_node:
first_statement = first_node
else:
first_statement = get_first_statement(cfg_statements[0])
last_statements = get_last_statements(cfg_statements)
return ConnectStatements(
first_statement=first_statement,
last_statements=last_statements,
break_statements=break_nodes
)
else: # When body of module only contains ignored nodes
return IgnoredNode() | Handle stmt* expressions in an AST node.
Links all statements together in a list of statements, accounting for statements with multiple last nodes. |
def add(self, parent, obj_type, **attributes):
""" IXN API add command
@param parent: object parent - object will be created under this parent.
@param object_type: object type.
@param attributes: additional attributes.
@return: IXN object reference.
"""
return self.ixn.add(parent.obj_ref(), obj_type, *self._get_args_list(**attributes)) | IXN API add command
@param parent: object parent - object will be created under this parent.
@param object_type: object type.
@param attributes: additional attributes.
@return: IXN object reference. |
def start_consuming(self, to_tuple=False, auto_decode=True):
"""Start consuming messages.
:param bool to_tuple: Should incoming messages be converted to a
tuple before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
while not self.is_closed:
self.process_data_events(
to_tuple=to_tuple,
auto_decode=auto_decode
)
if self.consumer_tags:
sleep(IDLE_WAIT)
continue
break | Start consuming messages.
:param bool to_tuple: Should incoming messages be converted to a
tuple before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return: |
def _height_is_big_enough(image, height):
"""Check that the image height is superior to `height`"""
if height > image.size[1]:
raise ImageSizeError(image.size[1], height) | Check that the image height is superior to `height` |
def rotate(data, axis=(1., 0, 0), angle=0., center=None, mode="constant", interpolation="linear"):
"""
rotates data around axis by a given angle
Parameters
----------
data: ndarray
3d array
axis: tuple
axis to rotate by angle about
axis = (x,y,z)
angle: float
center: tuple or None
origin of rotation (cz,cy,cx) in pixels
if None, center is the middle of data
mode: string
boundary mode, one of the following:
'constant'
pads with zeros
'edge'
pads with edge values
'wrap'
pads with the repeated version of the input
interpolation, string
interpolation mode, one of the following
'linear'
'nearest'
Returns
-------
res: ndarray
rotated array (same shape as input)
"""
if center is None:
center = tuple([s // 2 for s in data.shape])
cx, cy, cz = center
m = np.dot(mat4_translate(cx, cy, cz),
np.dot(mat4_rotate(angle, *axis),
mat4_translate(-cx, -cy, -cz)))
m = np.linalg.inv(m)
return affine(data, m, mode=mode, interpolation=interpolation) | rotates data around axis by a given angle
Parameters
----------
data: ndarray
3d array
axis: tuple
axis to rotate by angle about
axis = (x,y,z)
angle: float
center: tuple or None
origin of rotation (cz,cy,cx) in pixels
if None, center is the middle of data
mode: string
boundary mode, one of the following:
'constant'
pads with zeros
'edge'
pads with edge values
'wrap'
pads with the repeated version of the input
interpolation, string
interpolation mode, one of the following
'linear'
'nearest'
Returns
-------
res: ndarray
rotated array (same shape as input) |
def get_deviations(self):
"""get the deviations of the ensemble value from the mean vector
Returns
-------
en : pyemu.Ensemble
Ensemble of deviations from the mean
"""
mean_vec = self.mean()
df = self.loc[:,:].copy()
for col in df.columns:
df.loc[:,col] -= mean_vec[col]
return type(self).from_dataframe(pst=self.pst,df=df) | get the deviations of the ensemble value from the mean vector
Returns
-------
en : pyemu.Ensemble
Ensemble of deviations from the mean |
def start_archive(self, session_id, has_audio=True, has_video=True, name=None, output_mode=OutputModes.composed, resolution=None):
"""
Starts archiving an OpenTok session.
Clients must be actively connected to the OpenTok session for you to successfully start
recording an archive.
You can only record one archive at a time for a given session. You can only record archives
of sessions that use the OpenTok Media Router (sessions with the media mode set to routed);
you cannot archive sessions with the media mode set to relayed.
For more information on archiving, see the
`OpenTok archiving <https://tokbox.com/opentok/tutorials/archiving/>`_ programming guide.
:param String session_id: The session ID of the OpenTok session to archive.
:param String name: This is the name of the archive. You can use this name
to identify the archive. It is a property of the Archive object, and it is a property
of archive-related events in the OpenTok.js library.
:param Boolean has_audio: if set to True, an audio track will be inserted to the archive.
has_audio is an optional parameter that is set to True by default. If you set both
has_audio and has_video to False, the call to the start_archive() method results in
an error.
:param Boolean has_video: if set to True, a video track will be inserted to the archive.
has_video is an optional parameter that is set to True by default.
:param OutputModes output_mode: Whether all streams in the archive are recorded
to a single file (OutputModes.composed, the default) or to individual files
(OutputModes.individual).
:param String resolution (Optional): The resolution of the archive, either "640x480" (the default)
or "1280x720". This parameter only applies to composed archives. If you set this
parameter and set the output_mode parameter to OutputModes.individual, the call to the
start_archive() method results in an error.
:rtype: The Archive object, which includes properties defining the archive,
including the archive ID.
"""
if not isinstance(output_mode, OutputModes):
raise OpenTokException(u('Cannot start archive, {0} is not a valid output mode').format(output_mode))
if resolution and output_mode == OutputModes.individual:
raise OpenTokException(u('Invalid parameters: Resolution cannot be supplied for individual output mode.'))
payload = {'name': name,
'sessionId': session_id,
'hasAudio': has_audio,
'hasVideo': has_video,
'outputMode': output_mode.value,
'resolution': resolution,
}
response = requests.post(self.endpoints.archive_url(), data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 400:
"""
The HTTP response has a 400 status code in the following cases:
You do not pass in a session ID or you pass in an invalid session ID.
No clients are actively connected to the OpenTok session.
You specify an invalid resolution value.
The outputMode property is set to "individual" and you set the resolution property and (which is not supported in individual stream archives).
"""
raise RequestError(response.json().get("message"))
elif response.status_code == 404:
raise NotFoundError("Session not found")
elif response.status_code == 409:
raise ArchiveError(response.json().get("message"))
else:
raise RequestError("An unexpected error occurred", response.status_code) | Starts archiving an OpenTok session.
Clients must be actively connected to the OpenTok session for you to successfully start
recording an archive.
You can only record one archive at a time for a given session. You can only record archives
of sessions that use the OpenTok Media Router (sessions with the media mode set to routed);
you cannot archive sessions with the media mode set to relayed.
For more information on archiving, see the
`OpenTok archiving <https://tokbox.com/opentok/tutorials/archiving/>`_ programming guide.
:param String session_id: The session ID of the OpenTok session to archive.
:param String name: This is the name of the archive. You can use this name
to identify the archive. It is a property of the Archive object, and it is a property
of archive-related events in the OpenTok.js library.
:param Boolean has_audio: if set to True, an audio track will be inserted to the archive.
has_audio is an optional parameter that is set to True by default. If you set both
has_audio and has_video to False, the call to the start_archive() method results in
an error.
:param Boolean has_video: if set to True, a video track will be inserted to the archive.
has_video is an optional parameter that is set to True by default.
:param OutputModes output_mode: Whether all streams in the archive are recorded
to a single file (OutputModes.composed, the default) or to individual files
(OutputModes.individual).
:param String resolution (Optional): The resolution of the archive, either "640x480" (the default)
or "1280x720". This parameter only applies to composed archives. If you set this
parameter and set the output_mode parameter to OutputModes.individual, the call to the
start_archive() method results in an error.
:rtype: The Archive object, which includes properties defining the archive,
including the archive ID. |
def _get_float_remainder(fvalue, signs=9):
"""
Get remainder of float, i.e. 2.05 -> '05'
@param fvalue: input value
@type fvalue: C{integer types}, C{float} or C{Decimal}
@param signs: maximum number of signs
@type signs: C{integer types}
@return: remainder
@rtype: C{str}
@raise ValueError: fvalue is negative
@raise ValueError: signs overflow
"""
check_positive(fvalue)
if isinstance(fvalue, six.integer_types):
return "0"
if isinstance(fvalue, Decimal) and fvalue.as_tuple()[2] == 0:
# Decimal.as_tuple() -> (sign, digit_tuple, exponent)
# если экспонента "0" -- значит дробной части нет
return "0"
signs = min(signs, len(FRACTIONS))
# нужно remainder в строке, потому что дробные X.0Y
# будут "ломаться" до X.Y
remainder = str(fvalue).split('.')[1]
iremainder = int(remainder)
orig_remainder = remainder
factor = len(str(remainder)) - signs
if factor > 0:
# после запятой цифр больше чем signs, округляем
iremainder = int(round(iremainder / (10.0**factor)))
format = "%%0%dd" % min(len(remainder), signs)
remainder = format % iremainder
if len(remainder) > signs:
# при округлении цифр вида 0.998 ругаться
raise ValueError("Signs overflow: I can't round only fractional part \
of %s to fit %s in %d signs" % \
(str(fvalue), orig_remainder, signs))
return remainder | Get remainder of float, i.e. 2.05 -> '05'
@param fvalue: input value
@type fvalue: C{integer types}, C{float} or C{Decimal}
@param signs: maximum number of signs
@type signs: C{integer types}
@return: remainder
@rtype: C{str}
@raise ValueError: fvalue is negative
@raise ValueError: signs overflow |
def get_encrypted_field(base_class):
"""
A get or create method for encrypted fields, we cache the field in
the module to avoid recreation. This also allows us to always return
the same class reference for a field.
:type base_class: models.Field[T]
:rtype: models.Field[EncryptedMixin, T]
"""
assert not isinstance(base_class, models.Field)
field_name = 'Encrypted' + base_class.__name__
if base_class not in FIELD_CACHE:
FIELD_CACHE[base_class] = type(field_name,
(EncryptedMixin, base_class), {
'base_class': base_class,
})
return FIELD_CACHE[base_class] | A get or create method for encrypted fields, we cache the field in
the module to avoid recreation. This also allows us to always return
the same class reference for a field.
:type base_class: models.Field[T]
:rtype: models.Field[EncryptedMixin, T] |
def parse_data_line(self, sline):
"""
Parses the data line and builds the dictionary.
:param sline: a split data line to parse
:returns: the number of rows to jump and parse the next data line or return the code error -1
"""
# if there are less values founded than headers, it's an error
if len(sline) != len(self._columns):
self.err("One data line has the wrong number of items")
return -1
values = {}
remark = ''
date = ''
resid = ''
for idx, result in enumerate(sline):
if self._columns[idx] == 'Date':
date = self.csvDate2BikaDate(result)
elif self._columns[idx] == 'Patient no.':
resid = result
elif self._columns[idx] == 'Customer no.':
remark = result
elif self._columns[idx] != '':
values[self._columns[idx]] = {
'result': result,
'DefaultResult': 'result',
'Remarks': remark,
'DateTime': date,
}
self._addRawResult(resid, values, False)
return 0 | Parses the data line and builds the dictionary.
:param sline: a split data line to parse
:returns: the number of rows to jump and parse the next data line or return the code error -1 |
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True | Check whether pid exists in the current process table. |
def setup_mturk_connection(self):
''' Connect to turk '''
if ((self.aws_access_key_id == 'YourAccessKeyId') or
(self.aws_secret_access_key == 'YourSecretAccessKey')):
print "AWS access key not set in ~/.psiturkconfig; please enter a valid access key."
assert False
if self.is_sandbox:
endpoint_url = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
else:
endpoint_url = 'https://mturk-requester.us-east-1.amazonaws.com'
self.mtc = boto3.client('mturk',
region_name='us-east-1',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
endpoint_url=endpoint_url)
return True | Connect to turk |
def has_axon(neuron, treefun=_read_neurite_type):
'''Check if a neuron has an axon
Arguments:
neuron(Neuron): The neuron object to test
treefun: Optional function to calculate the tree type of
neuron's neurites
Returns:
CheckResult with result
'''
return CheckResult(NeuriteType.axon in (treefun(n) for n in neuron.neurites)) | Check if a neuron has an axon
Arguments:
neuron(Neuron): The neuron object to test
treefun: Optional function to calculate the tree type of
neuron's neurites
Returns:
CheckResult with result |
def forget(self, obj):
'''
Forgets about an entity (automatically called when an entity is
deleted). Call this to ensure that an entity that you've modified is
not automatically saved on ``session.commit()`` .
'''
self._init()
self.known.pop(obj._pk, None)
self.wknown.pop(obj._pk, None) | Forgets about an entity (automatically called when an entity is
deleted). Call this to ensure that an entity that you've modified is
not automatically saved on ``session.commit()`` . |
def IntegerLike(msg=None):
'''
Checks whether a value is:
- int, or
- long, or
- float without a fractional part, or
- str or unicode composed only of digits
'''
def fn(value):
if not any([
isinstance(value, numbers.Integral),
(isinstance(value, float) and value.is_integer()),
(isinstance(value, basestring) and value.isdigit())
]):
raise Invalid(msg or (
'Invalid input <{0}>; expected an integer'.format(value))
)
else:
return value
return fn | Checks whether a value is:
- int, or
- long, or
- float without a fractional part, or
- str or unicode composed only of digits |
def add_middleware(middleware: EFBMiddleware):
"""
Register a middleware with the coordinator.
Args:
middleware (EFBMiddleware): Middleware to register
"""
global middlewares
if isinstance(middleware, EFBMiddleware):
middlewares.append(middleware)
else:
raise TypeError("Middleware instance is expected") | Register a middleware with the coordinator.
Args:
middleware (EFBMiddleware): Middleware to register |
def attach(self, media: typing.Union[InputMedia, typing.Dict]):
"""
Attach media
:param media:
"""
if isinstance(media, dict):
if 'type' not in media:
raise ValueError(f"Invalid media!")
media_type = media['type']
if media_type == 'photo':
media = InputMediaPhoto(**media)
elif media_type == 'video':
media = InputMediaVideo(**media)
# elif media_type == 'document':
# media = InputMediaDocument(**media)
# elif media_type == 'audio':
# media = InputMediaAudio(**media)
# elif media_type == 'animation':
# media = InputMediaAnimation(**media)
else:
raise TypeError(f"Invalid media type '{media_type}'!")
elif not isinstance(media, InputMedia):
raise TypeError(f"Media must be an instance of InputMedia or dict, not {type(media).__name__}")
elif media.type in ['document', 'audio', 'animation']:
raise ValueError(f"This type of media is not supported by media groups!")
self.media.append(media) | Attach media
:param media: |
def server_powerstatus(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
return the power status for the passed module
CLI Example:
.. code-block:: bash
salt dell drac.server_powerstatus
'''
ret = __execute_ret('serveraction powerstatus',
host=host, admin_username=admin_username,
admin_password=admin_password,
module=module)
result = {'retcode': 0}
if ret['stdout'] == 'ON':
result['status'] = True
result['comment'] = 'Power is on'
if ret['stdout'] == 'OFF':
result['status'] = False
result['comment'] = 'Power is on'
if ret['stdout'].startswith('ERROR'):
result['status'] = False
result['comment'] = ret['stdout']
return result | return the power status for the passed module
CLI Example:
.. code-block:: bash
salt dell drac.server_powerstatus |
def _append_params(oauth_params, params):
"""Append OAuth params to an existing set of parameters.
Both params and oauth_params is must be lists of 2-tuples.
Per `section 3.5.2`_ and `3.5.3`_ of the spec.
.. _`section 3.5.2`: https://tools.ietf.org/html/rfc5849#section-3.5.2
.. _`3.5.3`: https://tools.ietf.org/html/rfc5849#section-3.5.3
"""
merged = list(params)
merged.extend(oauth_params)
# The request URI / entity-body MAY include other request-specific
# parameters, in which case, the protocol parameters SHOULD be appended
# following the request-specific parameters, properly separated by an "&"
# character (ASCII code 38)
merged.sort(key=lambda i: i[0].startswith('oauth_'))
return merged | Append OAuth params to an existing set of parameters.
Both params and oauth_params is must be lists of 2-tuples.
Per `section 3.5.2`_ and `3.5.3`_ of the spec.
.. _`section 3.5.2`: https://tools.ietf.org/html/rfc5849#section-3.5.2
.. _`3.5.3`: https://tools.ietf.org/html/rfc5849#section-3.5.3 |
def get_and_write(self, iso_path, local_path, blocksize=8192):
# type: (str, str, int) -> None
'''
(deprecated) Fetch a single file from the ISO and write it out to the
specified file. Note that this will overwrite the contents of the local
file if it already exists. Also note that 'iso_path' must be an
absolute path to the file. Finally, the 'iso_path' can be an ISO9660
path, a Rock Ridge path, or a Joliet path. In the case of ambiguity,
the Joliet path is tried first, followed by the ISO9660 path, followed
by the Rock Ridge path. It is recommended to use the get_file_from_iso
API instead to resolve this ambiguity.
Parameters:
iso_path - The absolute path to the file to get data from.
local_path - The local filename to write the contents to.
blocksize - The blocksize to use when copying data; the default is 8192.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
with open(local_path, 'wb') as fp:
self._get_and_write_fp(utils.normpath(iso_path), fp, blocksize) | (deprecated) Fetch a single file from the ISO and write it out to the
specified file. Note that this will overwrite the contents of the local
file if it already exists. Also note that 'iso_path' must be an
absolute path to the file. Finally, the 'iso_path' can be an ISO9660
path, a Rock Ridge path, or a Joliet path. In the case of ambiguity,
the Joliet path is tried first, followed by the ISO9660 path, followed
by the Rock Ridge path. It is recommended to use the get_file_from_iso
API instead to resolve this ambiguity.
Parameters:
iso_path - The absolute path to the file to get data from.
local_path - The local filename to write the contents to.
blocksize - The blocksize to use when copying data; the default is 8192.
Returns:
Nothing. |
def readmegen():
"""
Build documentation.
"""
hitchpylibrarytoolkit.readmegen(
_storybook(), DIR.project, DIR.key, DIR.gen, "commandlib"
) | Build documentation. |
def comparison(self, lhs, rhs):
"""
(2.6, 2.7) comparison: expr (comp_op expr)*
(3.0, 3.1) comparison: star_expr (comp_op star_expr)*
(3.2-) comparison: expr (comp_op expr)*
"""
if len(rhs) > 0:
return ast.Compare(left=lhs, ops=list(map(lambda x: x[0], rhs)),
comparators=list(map(lambda x: x[1], rhs)),
loc=lhs.loc.join(rhs[-1][1].loc))
else:
return lhs | (2.6, 2.7) comparison: expr (comp_op expr)*
(3.0, 3.1) comparison: star_expr (comp_op star_expr)*
(3.2-) comparison: expr (comp_op expr)* |
def get_bug_report():
"""Generate information for a bug report
:return: information for bug report
"""
platform_info = BugReporter.get_platform_info()
module_info = {
'version': hal_version.__version__,
'build': hal_version.__build__
}
return {
'platform': platform_info,
'pyhal': module_info
} | Generate information for a bug report
:return: information for bug report |
def close_document(self, path):
"""
Closes a text document.
:param path: Path of the document to close.
"""
to_close = []
for widget in self.widgets(include_clones=True):
p = os.path.normpath(os.path.normcase(widget.file.path))
path = os.path.normpath(os.path.normcase(path))
if p == path:
to_close.append(widget)
for widget in to_close:
tw = widget.parent_tab_widget
tw.remove_tab(tw.indexOf(widget)) | Closes a text document.
:param path: Path of the document to close. |
def sense_dep(self, target):
"""Search for a DEP Target in active or passive communication mode.
"""
# Set timeout for PSL_RES and ATR_RES
self.chipset.rf_configuration(0x02, b"\x0B\x0B\x0A")
return super(Device, self).sense_dep(target) | Search for a DEP Target in active or passive communication mode. |
def _prepare_uimodules(self):
"""Prepare the UI Modules from a list of namespaced paths."""
for key, value in self._config.get(config.UI_MODULES, {}).iteritems():
self._config[config.UI_MODULES][key] = self._import_class(value)
self._config[config.UI_MODULES] = dict(self._config[config.UI_MODULES] or {}) | Prepare the UI Modules from a list of namespaced paths. |
async def load_message_field(obj, msg, field, field_archiver=None):
"""
Loads message field from the object. Field is defined by the message field specification.
Returns loaded value, supports field reference.
:param reader:
:param msg:
:param field:
:param field_archiver:
:return:
"""
fname, ftype, params = field[0], field[1], field[2:]
field_archiver = field_archiver if field_archiver else load_field
await field_archiver(obj[fname], ftype, params, eref(msg, fname)) | Loads message field from the object. Field is defined by the message field specification.
Returns loaded value, supports field reference.
:param reader:
:param msg:
:param field:
:param field_archiver:
:return: |
def clear(self):
""" clear all tree data
"""
self._delete_child_storage(self.root_node)
self._delete_node_storage(self.root_node)
self.root_node = BLANK_NODE | clear all tree data |
def sevenths_inv(reference_labels, estimated_labels):
"""Compare chords along MIREX 'sevenths' rules. Chords with qualities
outside [maj, maj7, 7, min, min7, N] are ignored.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.sevenths_inv(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut.
"""
validate(reference_labels, estimated_labels)
seventh_qualities = ['maj', 'min', 'maj7', '7', 'min7', '']
valid_semitones = np.array([QUALITIES[name] for name in seventh_qualities])
ref_roots, ref_semitones, ref_basses = encode_many(reference_labels, False)
est_roots, est_semitones, est_basses = encode_many(estimated_labels, False)
eq_roots_basses = (ref_roots == est_roots) * (ref_basses == est_basses)
eq_semitones = np.all(np.equal(ref_semitones, est_semitones), axis=1)
comparison_scores = (eq_roots_basses * eq_semitones).astype(np.float)
# Test for Major / Minor / No-chord
is_valid = np.array([np.all(np.equal(ref_semitones, semitones), axis=1)
for semitones in valid_semitones])
comparison_scores[np.sum(is_valid, axis=0) == 0] = -1
# Disable inversions that are not part of the quality
valid_inversion = np.ones(ref_basses.shape, dtype=bool)
bass_idx = ref_basses >= 0
valid_inversion[bass_idx] = ref_semitones[bass_idx, ref_basses[bass_idx]]
comparison_scores[valid_inversion == 0] = -1
return comparison_scores | Compare chords along MIREX 'sevenths' rules. Chords with qualities
outside [maj, maj7, 7, min, min7, N] are ignored.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.sevenths_inv(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut. |
def create_folder(self, folder_name, parent_kind_str, parent_uuid):
"""
Send POST to /folders to create a new folder with specified name and parent.
:param folder_name: str name of the new folder
:param parent_kind_str: str type of parent folder has(dds-folder,dds-project)
:param parent_uuid: str uuid of the parent object
:return: requests.Response containing the successful result
"""
data = {
'name': folder_name,
'parent': {
'kind': parent_kind_str,
'id': parent_uuid
}
}
return self._post("/folders", data) | Send POST to /folders to create a new folder with specified name and parent.
:param folder_name: str name of the new folder
:param parent_kind_str: str type of parent folder has(dds-folder,dds-project)
:param parent_uuid: str uuid of the parent object
:return: requests.Response containing the successful result |
def _one_q_sic_prep(index, qubit):
"""Prepare the index-th SIC basis state."""
if index == 0:
return Program()
theta = 2 * np.arccos(1 / np.sqrt(3))
zx_plane_rotation = Program([
RX(-pi / 2, qubit),
RZ(theta - pi, qubit),
RX(-pi / 2, qubit),
])
if index == 1:
return zx_plane_rotation
elif index == 2:
return zx_plane_rotation + RZ(-2 * pi / 3, qubit)
elif index == 3:
return zx_plane_rotation + RZ(2 * pi / 3, qubit)
raise ValueError(f'Bad SIC index: {index}') | Prepare the index-th SIC basis state. |
def volume_show(name, profile=None, **kwargs):
'''
Create a block storage volume
name
Name of the volume
profile
Profile to use
CLI Example:
.. code-block:: bash
salt '*' nova.volume_show myblock profile=openstack
'''
conn = _auth(profile, **kwargs)
return conn.volume_show(name) | Create a block storage volume
name
Name of the volume
profile
Profile to use
CLI Example:
.. code-block:: bash
salt '*' nova.volume_show myblock profile=openstack |
def libvlc_vlm_del_media(p_instance, psz_name):
'''Delete a media (VOD or broadcast).
@param p_instance: the instance.
@param psz_name: the media to delete.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_del_media', None) or \
_Cfunction('libvlc_vlm_del_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name) | Delete a media (VOD or broadcast).
@param p_instance: the instance.
@param psz_name: the media to delete.
@return: 0 on success, -1 on error. |
def script(experiment, projects):
"""
Prepare a slurm script that executes the experiment for a given project.
Args:
experiment: The experiment we want to execute
projects: All projects we generate an array job for.
"""
benchbuild_c = local[local.path(sys.argv[0])]
slurm_script = local.cwd / experiment.name + "-" + str(
CFG['slurm']['script'])
srun = local["srun"]
srun_args = []
if not CFG["slurm"]["multithread"]:
srun_args.append("--hint=nomultithread")
if not CFG["slurm"]["turbo"]:
srun_args.append("--pstate-turbo=off")
srun = srun[srun_args]
srun = srun[benchbuild_c["run"]]
return __save__(slurm_script, srun, experiment, projects) | Prepare a slurm script that executes the experiment for a given project.
Args:
experiment: The experiment we want to execute
projects: All projects we generate an array job for. |
def grab_literal(template, l_del):
"""Parse a literal from the template"""
global _CURRENT_LINE
try:
# Look for the next tag and move the template to it
literal, template = template.split(l_del, 1)
_CURRENT_LINE += literal.count('\n')
return (literal, template)
# There are no more tags in the template?
except ValueError:
# Then the rest of the template is a literal
return (template, '') | Parse a literal from the template |
def append_ISO19115_keywords(keywords):
"""Append ISO19115 from setting to keywords.
:param keywords: The keywords destination.
:type keywords: dict
"""
# Map setting's key and metadata key
ISO19115_mapping = {
'ISO19115_ORGANIZATION': 'organisation',
'ISO19115_URL': 'url',
'ISO19115_EMAIL': 'email',
'ISO19115_LICENSE': 'license'
}
ISO19115_keywords = {}
# Getting value from setting.
for key, value in list(ISO19115_mapping.items()):
ISO19115_keywords[value] = setting(key, expected_type=str)
keywords.update(ISO19115_keywords) | Append ISO19115 from setting to keywords.
:param keywords: The keywords destination.
:type keywords: dict |
def _parseLine(cls, line):
"""Parsers a single line of text and returns an AudioClipSpec
Line format:
<number> <number> [<text>]
Returns: list(AudioClipSpec) or None
"""
r = cls._PROG.match(line)
if not r:
raise ValueError("Error: parsing '%s'. Correct: \"<number> <number> [<text>]\"" % line)
d = r.groupdict()
if len(d['begin']) == 0 or len(d['end']) == 0:
raise ValueError("Error: parsing '%s'. Correct: \"<number> <number> [<text>]\"" % line)
return AudioClipSpec(d['begin'], d['end'], d['text'].strip()) | Parsers a single line of text and returns an AudioClipSpec
Line format:
<number> <number> [<text>]
Returns: list(AudioClipSpec) or None |
def directory_files(path):
"""Yield directory file names."""
for entry in os.scandir(path):
if not entry.name.startswith('.') and entry.is_file():
yield entry.name | Yield directory file names. |
def permute_point(p, permutation=None):
"""
Permutes the point according to the permutation keyword argument. The
default permutation is "012" which does not change the order of the
coordinate. To rotate counterclockwise, use "120" and to rotate clockwise
use "201"."""
if not permutation:
return p
return [p[int(permutation[i])] for i in range(len(p))] | Permutes the point according to the permutation keyword argument. The
default permutation is "012" which does not change the order of the
coordinate. To rotate counterclockwise, use "120" and to rotate clockwise
use "201". |
def poll(self, poll_rate=None, timeout=None):
"""Return the status of a task or timeout.
There are several API calls that trigger asynchronous tasks, such as
synchronizing a repository, or publishing or promoting a content view.
It is possible to check on the status of a task if you know its UUID.
This method polls a task once every ``poll_rate`` seconds and, upon
task completion, returns information about that task.
:param poll_rate: Delay between the end of one task check-up and
the start of the next check-up. Defaults to
``nailgun.entity_mixins.TASK_POLL_RATE``.
:param timeout: Maximum number of seconds to wait until timing out.
Defaults to ``nailgun.entity_mixins.TASK_TIMEOUT``.
:returns: Information about the asynchronous task.
:raises: ``nailgun.entity_mixins.TaskTimedOutError`` if the task
completes with any result other than "success".
:raises: ``nailgun.entity_mixins.TaskFailedError`` if the task finishes
with any result other than "success".
:raises: ``requests.exceptions.HTTPError`` If the API returns a message
with an HTTP 4XX or 5XX status code.
"""
# See nailgun.entity_mixins._poll_task for an explanation of why a
# private method is called.
return _poll_task(
self.id, # pylint:disable=no-member
self._server_config,
poll_rate,
timeout
) | Return the status of a task or timeout.
There are several API calls that trigger asynchronous tasks, such as
synchronizing a repository, or publishing or promoting a content view.
It is possible to check on the status of a task if you know its UUID.
This method polls a task once every ``poll_rate`` seconds and, upon
task completion, returns information about that task.
:param poll_rate: Delay between the end of one task check-up and
the start of the next check-up. Defaults to
``nailgun.entity_mixins.TASK_POLL_RATE``.
:param timeout: Maximum number of seconds to wait until timing out.
Defaults to ``nailgun.entity_mixins.TASK_TIMEOUT``.
:returns: Information about the asynchronous task.
:raises: ``nailgun.entity_mixins.TaskTimedOutError`` if the task
completes with any result other than "success".
:raises: ``nailgun.entity_mixins.TaskFailedError`` if the task finishes
with any result other than "success".
:raises: ``requests.exceptions.HTTPError`` If the API returns a message
with an HTTP 4XX or 5XX status code. |
def _vcap_from_service_definition(service_def):
"""Turn a service definition into a vcap services
containing a single service.
"""
if 'credentials' in service_def:
credentials = service_def['credentials']
else:
credentials = service_def
service = {}
service['credentials'] = credentials
service['name'] = _name_from_service_definition(service_def)
vcap = {'streaming-analytics': [service]}
return vcap | Turn a service definition into a vcap services
containing a single service. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.