Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
2,700 | def get_connection(self, host, port):
log.debug("
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sock.settimeout(self.sock_connect_timeout / 1000)
self.sock.connect((host, port))
self.sock.settimeout(self.sock_request_timeout / 1000)
self.write(self.RPC_HEADER)
self.write(struct.pack(, self.version))
self.write(struct.pack(, self.RPC_SERVICE_CLASS))
if self.use_sasl:
self.write(struct.pack(, self.AUTH_PROTOCOL_SASL))
else:
self.write(struct.pack(, self.AUTH_PROTOCOL_NONE))
if self.use_sasl:
sasl = SaslRpcClient(self, hdfs_namenode_principal=self.hdfs_namenode_principal)
sasl_connected = sasl.connect()
if not sasl_connected:
raise TransientException("SASL is configured, but cannot get connected")
rpc_header = self.create_rpc_request_header()
context = self.create_connection_context()
header_length = len(rpc_header) + encoder._VarintSize(len(rpc_header)) +len(context) + encoder._VarintSize(len(context))
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Header length: %s (%s)" % (header_length, format_bytes(struct.pack(, header_length))))
self.write(struct.pack(, header_length))
self.write_delimited(rpc_header)
self.write_delimited(context) | Open a socket connection to a given host and port and writes the Hadoop header
The Hadoop RPC protocol looks like this when creating a connection:
+---------------------------------------------------------------------+
| Header, 4 bytes ("hrpc") |
+---------------------------------------------------------------------+
| Version, 1 byte (default verion 9) |
+---------------------------------------------------------------------+
| RPC service class, 1 byte (0x00) |
+---------------------------------------------------------------------+
| Auth protocol, 1 byte (Auth method None = 0) |
+---------------------------------------------------------------------+
| Length of the RpcRequestHeaderProto + length of the |
| of the IpcConnectionContextProto (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Serialized delimited RpcRequestHeaderProto |
+---------------------------------------------------------------------+
| Serialized delimited IpcConnectionContextProto |
+---------------------------------------------------------------------+ |
2,701 | def get_name_DID_info(self, name):
db = get_db_state(self.working_dir)
did_info = db.get_name_DID_info(name)
if did_info is None:
return {: , : 404}
return did_info | Get a name's DID info
Returns None if not found |
2,702 | def ColorWithLightness(self, lightness):
h, s, l = self.__hsl
return Color((h, s, lightness), , self.__a, self.__wref) | Create a new instance based on this one with a new lightness value.
Parameters:
:lightness:
The lightness of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25)
(0.5, 0.25, 0.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25).hsl
(30, 1, 0.25) |
2,703 | def _cast_float(temp_dt):
if type(temp_dt) == str:
fmt =
base_dt = temp_dt[0:19]
tz_offset = eval(temp_dt[19:22])
temp_dt = datetime.datetime.strptime(base_dt, fmt) - \
datetime.timedelta(hours=tz_offset)
return (temp_dt - datetime.datetime(1970, 1, 1)).total_seconds() | returns utc timestamp |
2,704 | def unique_str(self):
unique_str = "_".join(["%.3f" % f for f in self.geotransform] +
["%d" % d for d in self.x_size, self.y_size]
)
if self.date is not None:
unique_str += + str(self.date)
if self.time is not None:
unique_str += + str(self.time)
return unique_str.replace(, ) | A string that (ideally) uniquely represents this GC object. This
helps with naming files for caching. 'Unique' is defined as 'If
GC1 != GC2, then GC1.unique_str() != GC2.unique_str()'; conversely,
'If GC1 == GC2, then GC1.unique_str() == GC2.unique_str()'.
The string should be filename-safe (no \/:*?"<>|).
..note::Because of length/readability restrictions, this fxn ignores
wkt.
Example output:
"-180.000_0.250_0.000_90.000_0.000_-0.251_512_612_2013-05-21_12_32_52.945000" |
2,705 | def idf(self, term, transform=None):
r
docs_with_term = 0
docs = self.docs_of_words()
for doc in docs:
doc_set = set(doc)
if transform:
transformed_doc = []
for word in doc_set:
transformed_doc.append(transform(word))
doc_set = set(transformed_doc)
if term in doc_set:
docs_with_term += 1
if docs_with_term == 0:
return float()
return log10(len(docs) / docs_with_term) | r"""Calculate the Inverse Document Frequency of a term in the corpus.
Parameters
----------
term : str
The term to calculate the IDF of
transform : function
A function to apply to each document term before checking for the
presence of term
Returns
-------
float
The IDF
Examples
--------
>>> tqbf = 'The quick brown fox jumped over the lazy dog.\n\n'
>>> tqbf += 'And then it slept.\n\n And the dog ran off.'
>>> corp = Corpus(tqbf)
>>> print(corp.docs())
[[['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy',
'dog.']],
[['And', 'then', 'it', 'slept.']],
[['And', 'the', 'dog', 'ran', 'off.']]]
>>> round(corp.idf('dog'), 10)
0.4771212547
>>> round(corp.idf('the'), 10)
0.1760912591 |
2,706 | def ev_to_s(offset_us, source_to_detector_m, array):
time_s = np.sqrt(81.787 / (array * 1000.)) * source_to_detector_m / 3956.
time_record_s = time_s - offset_us * 1e-6
return time_record_s | convert energy (eV) to time (us)
Parameters:
===========
array: array (in eV)
offset_us: float. Delay of detector in us
source_to_detector_m: float. Distance source to detector in m
Returns:
========
time: array in s |
2,707 | def step1(self, username, password):
self._check_initialized()
context = AtvSRPContext(
str(username), str(password),
prime=constants.PRIME_2048,
generator=constants.PRIME_2048_GEN)
self.session = SRPClientSession(
context, binascii.hexlify(self._auth_private).decode()) | First authentication step. |
2,708 | def mass_tot(self, rho0, Rs):
m_tot = 2*np.pi*rho0*Rs**3
return m_tot | total mass within the profile
:param rho0:
:param a:
:param s:
:return: |
2,709 | def list_commands(self, ctx):
rv = []
files = [_ for _ in next(os.walk(self.folder))[2] if not _.startswith("_") and _.endswith(".py")]
for filename in files:
rv.append(filename[:-3])
rv.sort()
return rv | List commands from folder. |
2,710 | def query_string(cls,
query,
default_field=None,
default_operator=None,
analyzer=None,
allow_leading_wildcard=None,
lowercase_expanded_terms=None,
enable_position_increments=None,
fuzzy_prefix_length=None,
fuzzy_min_sim=None,
phrase_slop=None,
boost=None,
analyze_wildcard=None,
auto_generate_phrase_queries=None,
minimum_should_match=None):
this AND that OR thuscontent
instance = cls(query_string={: query})
if default_field is not None:
instance[][] = default_field
if default_operator is not None:
instance[][] = default_operator
if analyzer is not None:
instance[][] = analyzer
if allow_leading_wildcard is not None:
instance[][] = allow_leading_wildcard
if lowercase_expanded_terms is not None:
instance[][] = lowercase_expanded_terms
if enable_position_increments is not None:
instance[][] = enable_position_increments
if fuzzy_prefix_length is not None:
instance[][] = fuzzy_prefix_length
if fuzzy_min_sim is not None:
instance[][] = fuzzy_min_sim
if phrase_slop is not None:
instance[][] = phrase_slop
if boost is not None:
instance[][] = boost
if analyze_wildcard is not None:
instance[][] = analyze_wildcard
if auto_generate_phrase_queries is not None:
instance[][] = auto_generate_phrase_queries
if minimum_should_match is not None:
instance[][] = minimum_should_match
return instance | http://www.elasticsearch.org/guide/reference/query-dsl/query-string-query.html
A query that uses a query parser in order to parse its content.
> query = ElasticQuery().query_string('this AND that OR thus', default_field='content') |
2,711 | def get(self, fragment_info):
if not self.is_cached(fragment_info):
raise KeyError(u"Attempt to get text not cached")
return self.cache[fragment_info] | Get the value associated with the given key.
:param fragment_info: the text key
:type fragment_info: tuple of str ``(language, text)``
:raises: KeyError if the key is not present in the cache |
2,712 | def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None):
logging.info()
tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg))
shutil.make_archive(os.path.join(dist_dir, ),
,
tasks_dir_splits[0],
tasks_dir_splits[1])
if custom_main is None:
from . import _main
main_path = _main.__file__
if main_path[-3:] == :
main_path = main_path[:-1]
shutil.copy(os.path.realpath(main_path),
os.path.join(dist_dir, ))
else:
shutil.copy(os.path.realpath(custom_main),
os.path.join(dist_dir, ))
shutil.make_archive(os.path.join(dist_dir, ),
,
os.path.join(os.path.dirname(os.path.realpath(__file__)), , ),
)
if extra_data:
for dat in extra_data:
real_path = os.path.realpath(dat)
target = os.path.join(dist_dir, os.path.split(real_path)[1])
if os.path.isfile(real_path):
shutil.copy(real_path, target)
elif os.path.isdir(real_path):
shutil.copytree(real_path, target)
else:
raise IOError( % (dat, real_path)) | Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to
`dist_dir`, to be submitted to spark. Same for `extra_data`.
Parameters
----------
tasks_pkg (str): Path to the python package containing tasks
dist_dir (str): Path to the directory where the packaged code should be stored
custom_main (str): Path to a custom 'main' python file.
extra_data (List[str]): List containing paths to files/directories that should also be packaged
and submitted to spark |
2,713 | def parse_spec(spec, relative_to=None, subproject_roots=None):
def normalize_absolute_refs(ref):
return strip_prefix(ref, )
subproject = longest_dir_prefix(relative_to, subproject_roots) if subproject_roots else None
def prefix_subproject(spec_path):
if not subproject:
return spec_path
elif spec_path:
return os.path.join(subproject, spec_path)
else:
return os.path.normpath(subproject)
spec_parts = spec.rsplit(, 1)
if len(spec_parts) == 1:
default_target_spec = spec_parts[0]
spec_path = prefix_subproject(normalize_absolute_refs(default_target_spec))
target_name = os.path.basename(spec_path)
else:
spec_path, target_name = spec_parts
if not spec_path and relative_to:
spec_path = fast_relpath(relative_to, subproject) if subproject else relative_to
spec_path = prefix_subproject(normalize_absolute_refs(spec_path))
return spec_path, target_name | Parses a target address spec and returns the path from the root of the repo to this Target
and Target name.
:API: public
:param string spec: Target address spec.
:param string relative_to: path to use for sibling specs, ie: ':another_in_same_build_family',
interprets the missing spec_path part as `relative_to`.
:param list subproject_roots: Paths that correspond with embedded build roots under
the current build root.
For Example::
some_target(name='mytarget',
dependencies=['path/to/buildfile:targetname']
)
Where ``path/to/buildfile:targetname`` is the dependent target address spec
In case the target name is empty it returns the last component of the path as target name, ie::
spec_path, target_name = parse_spec('path/to/buildfile/foo')
Will return spec_path as 'path/to/buildfile/foo' and target_name as 'foo'.
Optionally, specs can be prefixed with '//' to denote an absolute spec path. This is normally
not significant except when a spec referring to a root level target is needed from deeper in
the tree. For example, in ``path/to/buildfile/BUILD``::
some_target(name='mytarget',
dependencies=[':targetname']
)
The ``targetname`` spec refers to a target defined in ``path/to/buildfile/BUILD*``. If instead
you want to reference ``targetname`` in a root level BUILD file, use the absolute form.
For example::
some_target(name='mytarget',
dependencies=['//:targetname']
) |
2,714 | def validate_hier_intervals(intervals_hier):
label_top = util.generate_labels(intervals_hier[0])
boundaries = set(util.intervals_to_boundaries(intervals_hier[0]))
for level, intervals in enumerate(intervals_hier[1:], 1):
label_current = util.generate_labels(intervals)
validate_structure(intervals_hier[0], label_top,
intervals, label_current)
new_bounds = set(util.intervals_to_boundaries(intervals))
if boundaries - new_bounds:
warnings.warn(
.format(level))
boundaries |= new_bounds | Validate a hierarchical segment annotation.
Parameters
----------
intervals_hier : ordered list of segmentations
Raises
------
ValueError
If any segmentation does not span the full duration of the top-level
segmentation.
If any segmentation does not start at 0. |
2,715 | def _parse_caption(self, node, state):
if node.tag not in ["caption", "figcaption"]:
return state
parent = state["parent"][node]
stable_id = (
f"{state[].name}"
f"::"
f"{}"
f":"
f"{state[][]}"
)
name = node.attrib["name"] if "name" in node.attrib else None
if isinstance(parent, Table):
state["context"][node] = Caption(
document=state["document"],
table=parent,
figure=None,
stable_id=stable_id,
name=name,
position=state["caption"]["idx"],
)
elif isinstance(parent, Figure):
state["context"][node] = Caption(
document=state["document"],
table=None,
figure=parent,
stable_id=stable_id,
name=name,
position=state["caption"]["idx"],
)
else:
raise NotImplementedError("Caption must be a child of Table or Figure.")
state["caption"]["idx"] += 1
return state | Parse a Caption of the node.
:param node: The lxml node to parse
:param state: The global state necessary to place the node in context
of the document as a whole. |
2,716 | def guest_inspect_stats(self, userid_list):
if not isinstance(userid_list, list):
userid_list = [userid_list]
action = "get the statistics of guest " % str(userid_list)
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._monitor.inspect_stats(userid_list) | Get the statistics including cpu and mem of the guests
:param userid_list: a single userid string or a list of guest userids
:returns: dictionary describing the cpu statistics of the vm
in the form {'UID1':
{
'guest_cpus': xx,
'used_cpu_time_us': xx,
'elapsed_cpu_time_us': xx,
'min_cpu_count': xx,
'max_cpu_limit': xx,
'samples_cpu_in_use': xx,
'samples_cpu_delay': xx,
'used_mem_kb': xx,
'max_mem_kb': xx,
'min_mem_kb': xx,
'shared_mem_kb': xx
},
'UID2':
{
'guest_cpus': xx,
'used_cpu_time_us': xx,
'elapsed_cpu_time_us': xx,
'min_cpu_count': xx,
'max_cpu_limit': xx,
'samples_cpu_in_use': xx,
'samples_cpu_delay': xx,
'used_mem_kb': xx,
'max_mem_kb': xx,
'min_mem_kb': xx,
'shared_mem_kb': xx
}
}
for the guests that are shutdown or not exist, no data
returned in the dictionary |
2,717 | def read (self, size = -1):
if size == 0:
return self._empty_buffer
if size < 0:
self.expect (self.delimiter)
return self.before
if self._buffer_type is bytes:
pat = (u % size).encode()
else:
pat = u % size
cre = re.compile(pat, re.DOTALL)
index = self.expect ([cre, self.delimiter])
if index == 0:
return self.after
return self.before | This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. |
2,718 | async def event_wait(event: asyncio.Event, timeout=None):
if timeout is None:
await event.wait()
return True
try:
await asyncio.wait_for(event.wait(), timeout)
except asyncio.TimeoutError:
return False
return True | Wait on an an asyncio event with an optional timeout
Returns:
true if the event got set, None if timed out |
2,719 | def parse_peddy_csv(self, f, pattern):
parsed_data = dict()
headers = None
s_name_idx = None
for l in f[].splitlines():
s = l.split(",")
if headers is None:
headers = s
try:
s_name_idx = [headers.index("sample_id")]
except ValueError:
try:
s_name_idx = [headers.index("sample_a"), headers.index("sample_b")]
except ValueError:
log.warn("Could not find sample name in Peddy output: {}".format(f[]))
return None
else:
s_name = .join([s[idx] for idx in s_name_idx])
parsed_data[s_name] = dict()
for i, v in enumerate(s):
if i not in s_name_idx:
if headers[i] == "error" and pattern == "sex_check":
v = "True" if v == "False" else "False"
try:
parsed_data[s_name][headers[i] + "_" + pattern] = float(v)
except ValueError:
parsed_data[s_name][headers[i] + "_" + pattern] = v
if len(parsed_data) == 0:
return None
return parsed_data | Parse csv output from peddy |
2,720 | def __generate(self):
base = []
texted = []
for pat in ALL_PATTERNS:
data = pat.copy()
data[] = data[]
data[] = True
data[] = data[]
base.append(data)
data = pat.copy()
data[] = data[]
data[] +=
data[] = True
data[] = data[] + Optional(Literal(",")).suppress() + BASE_TIME_PATTERNS[]
data[] =
data[] = { : data[][] + 5, : data[][] + 8}
base.append(data)
data = pat.copy()
data[] = data[]
data[] = True
data[] +=
data[] = data[] + Optional(oneOf([, ])).suppress() + BASE_TIME_PATTERNS[]
data[] =
data[] = { : data[][] + 9, : data[][] + 9}
base.append(data)
for pat in base:
data = pat.copy()
data[] +=
data[] = lineStart + data[] + Optional(oneOf([, , , ])).suppress() + restOfLine.suppress()
data[] = { : data[][] + 1, : data[][] + 90}
texted.append(data)
base.extend(texted)
self.patterns = base | Generates dates patterns |
2,721 | def fmtval(value, colorstr=None, precision=None, spacing=True, trunc=True,
end=):
colwidth = opts.colwidth
if precision is None:
precision = opts.precision
fmt = % precision
result = locale.format(fmt, value, True)
if spacing:
result = % colwidth % result
if trunc:
if len(result) > colwidth:
result = truncstr(result, colwidth)
if opts.incolor and colorstr:
return colorstr % result + end
else:
return result + end | Formats and returns a given number according to specifications. |
2,722 | def _combine_ranges_on_length(self, data_len, first, second):
first = get_true_slice(first, data_len)
second = get_true_slice(second, data_len)
final_start, final_step, final_stop = (None, None, None)
if first.start == None and second.start == None:
final_start = None
else:
final_start = (first.start if first.start else 0)+(second.start if second.start else 0)
if second.stop == None:
final_stop = first.stop
elif first.stop == None:
final_stop = (first.start if first.start else 0) + second.stop
else:
final_stop = min(first.stop, (first.start if first.start else 0) + second.stop)
if first.step == None and second.step == None:
final_step = None
else:
final_step = (first.step if first.step else 1)*(second.step if second.step else 1)
if final_start > final_stop:
final_start = final_stop
return slice(final_start, final_stop, final_step) | Combines a first range with a second range, where the second
range is considered within the scope of the first. |
2,723 | def handle(self, message):
opcode = message[]
if opcode == 10:
self.on_hello(message)
elif opcode == 11:
self.on_heartbeat(message)
elif opcode == 0:
self.on_message(message)
else:
logger.debug("Not a message we handle: OPCODE {}".format(opcode))
return | Dispatches messages to appropriate handler based on opcode
Args:
message (dict): Full message from Discord websocket connection |
2,724 | def usable_id(cls, id):
try:
qry_id = int(id)
except Exception:
qry_id = None
if not qry_id:
msg = % id
cls.error(msg)
return qry_id | Retrieve id from input which can be num or id. |
2,725 | def fit_tomography_data(tomo_data, method=, options=None):
if isinstance(method, str) and method.lower() in [, ]:
trace = __get_option(, options)
beta = __get_option(, options)
rho = __leastsq_fit(tomo_data, trace=trace, beta=beta)
if method == :
epsilon = __get_option(, options)
rho = __wizard(rho, epsilon=epsilon)
return rho
else:
raise Exception( % method) | Reconstruct a density matrix or process-matrix from tomography data.
If the input data is state_tomography_data the returned operator will
be a density matrix. If the input data is process_tomography_data the
returned operator will be a Choi-matrix in the column-vectorization
convention.
Args:
tomo_data (dict): process tomography measurement data.
method (str): the fitting method to use.
Available methods:
- 'wizard' (default)
- 'leastsq'
options (dict or None): additional options for fitting method.
Returns:
numpy.array: The fitted operator.
Available methods:
- 'wizard' (Default): The returned operator will be constrained to be
positive-semidefinite.
Options:
- 'trace': the trace of the returned operator.
The default value is 1.
- 'beta': hedging parameter for computing frequencies from
zero-count data. The default value is 0.50922.
- 'epsilon: threshold for truncating small eigenvalues to zero.
The default value is 0
- 'leastsq': Fitting without positive-semidefinite constraint.
Options:
- 'trace': Same as for 'wizard' method.
- 'beta': Same as for 'wizard' method.
Raises:
Exception: if the `method` parameter is not valid. |
2,726 | def respond_via_request(self, task):
warn(f"Detected slow response into webhook. "
f"(Greater than {RESPONSE_TIMEOUT} seconds)\n"
f"Recommended to use decorator from Dispatcher for handler with long timeouts.",
TimeoutWarning)
dispatcher = self.get_dispatcher()
loop = dispatcher.loop
try:
results = task.result()
except Exception as e:
loop.create_task(
dispatcher.errors_handlers.notify(dispatcher, types.Update.get_current(), e))
else:
response = self.get_response(results)
if response is not None:
asyncio.ensure_future(response.execute_response(dispatcher.bot), loop=loop) | Handle response after 55 second.
:param task:
:return: |
2,727 | def to_python(self,value):
if value==self.emptyValue or value in EMPTY_VALUES:
return self.emptyValue
try:
value=self.coerce(value)
except(ValueError,TypeError,ValidationError):
raise ValidationError(self.error_messages[]%{:value})
return value | Validates that the value is in self.choices and can be coerced to the right type. |
2,728 | def default_exchange_proposed_fn(prob_exchange):
def default_exchange_proposed_fn_(num_replica, seed=None):
seed_stream = distributions.SeedStream(seed, )
zero_start = tf.random.uniform([], seed=seed_stream()) > 0.5
if num_replica % 2 == 0:
def _exchange():
flat_exchange = tf.range(num_replica)
if num_replica > 2:
start = tf.cast(~zero_start, dtype=tf.int32)
end = num_replica - start
flat_exchange = flat_exchange[start:end]
return tf.reshape(flat_exchange, [tf.size(input=flat_exchange) // 2, 2])
else:
def _exchange():
start = tf.cast(zero_start, dtype=tf.int32)
end = num_replica - tf.cast(~zero_start, dtype=tf.int32)
flat_exchange = tf.range(num_replica)[start:end]
return tf.reshape(flat_exchange, [tf.size(input=flat_exchange) // 2, 2])
def _null_exchange():
return tf.reshape(tf.cast([], dtype=tf.int32), shape=[0, 2])
return tf.cond(
pred=tf.random.uniform([], seed=seed_stream()) < prob_exchange,
true_fn=_exchange,
false_fn=_null_exchange)
return default_exchange_proposed_fn_ | Default exchange proposal function, for replica exchange MC.
With probability `prob_exchange` propose combinations of replica for exchange.
When exchanging, create combinations of adjacent replicas in
[Replica Exchange Monte Carlo](
https://en.wikipedia.org/wiki/Parallel_tempering)
```
exchange_fn = default_exchange_proposed_fn(prob_exchange=0.5)
exchange_proposed = exchange_fn(num_replica=3)
exchange_proposed.eval()
==> [[0, 1]] # 1 exchange, 0 <--> 1
exchange_proposed.eval()
==> [] # 0 exchanges
```
Args:
prob_exchange: Scalar `Tensor` giving probability that any exchanges will
be generated.
Returns:
default_exchange_proposed_fn_: Python callable which take a number of
replicas (a Python integer), and return combinations of replicas for
exchange as an [n, 2] integer `Tensor`, `0 <= n <= num_replica // 2`,
with *unique* values in the set `{0, ..., num_replica}`. |
2,729 | def get_relationship_admin_session_for_family(self, family_id):
if not self.supports_relationship_admin():
raise errors.Unimplemented()
return sessions.RelationshipAdminSession(family_id, runtime=self._runtime) | Gets the ``OsidSession`` associated with the relationship administration service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family``
return: (osid.relationship.RelationshipAdminSession) - a
``RelationshipAdminSession``
raise: NotFound - no family found by the given ``Id``
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_relationship_admin()`` and
``supports_visible_federation()`` are ``true``* |
2,730 | def topk(timestep: int,
batch_size: int,
beam_size: int,
inactive: mx.nd.NDArray,
scores: mx.nd.NDArray,
hypotheses: List[ConstrainedHypothesis],
best_ids: mx.nd.NDArray,
best_word_ids: mx.nd.NDArray,
seq_scores: mx.nd.NDArray) -> Tuple[np.array, np.array, np.array, List[ConstrainedHypothesis], mx.nd.NDArray]:
for sentno in range(batch_size):
rows = slice(sentno * beam_size, sentno * beam_size + beam_size)
if hypotheses[rows.start] is not None and hypotheses[rows.start].size() > 0:
best_ids[rows], best_word_ids[rows], seq_scores[rows], \
hypotheses[rows], inactive[rows] = _sequential_topk(timestep,
beam_size,
inactive[rows],
scores[rows],
hypotheses[rows],
best_ids[rows] - rows.start,
best_word_ids[rows],
seq_scores[rows])
best_ids[rows] += rows.start
else:
inactive[rows] = 0
return best_ids, best_word_ids, seq_scores, hypotheses, inactive | Builds a new topk list such that the beam contains hypotheses having completed different numbers of constraints.
These items are built from three different types: (1) the best items across the whole
scores matrix, (2) the set of words that must follow existing constraints, and (3) k-best items from each row.
:param timestep: The current decoder timestep.
:param batch_size: The number of segments in the batch.
:param beam_size: The length of the beam for each segment.
:param inactive: Array listing inactive rows (shape: (beam_size,)).
:param scores: The scores array (shape: (batch_size if t==1 else beam_size, target_vocab_size)).
:param hypotheses: The list of hypothesis objects.
:param best_ids: The current list of best hypotheses (shape: (beam_size,)).
:param best_word_ids: The parallel list of best word IDs (shape: (beam_size,)).
:param seq_scores: (shape: (beam_size, 1)).
:return: A tuple containing the best hypothesis rows, the best hypothesis words, the scores,
the updated constrained hypotheses, and the updated set of inactive hypotheses. |
2,731 | def expool(name):
name = stypes.stringToCharP(name)
found = ctypes.c_int()
libspice.expool_c(name, ctypes.byref(found))
return bool(found.value) | Confirm the existence of a kernel variable in the kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/expool_c.html
:param name: Name of the variable whose value is to be returned.
:type name: str
:return: True when the variable is in the pool.
:rtype: bool |
2,732 | def parse_date(self, value):
if value is None:
raise Exception("Unable to parse date from %r" % value)
elif isinstance(value, string_types):
return self.str2date(value)
elif isinstance(value, int):
return date.fromordinal(value)
elif isinstance(value, datetime):
return value.date()
elif isinstance(value, date):
return value
else:
raise Exception("Unable to parse date from %r" % value) | A lazy method to parse anything to date.
If input data type is:
- string: parse date from it
- integer: use from ordinal
- datetime: use date part
- date: just return it |
2,733 | def _update_armed_status(self, message=None, status=None, status_stay=None):
arm_status = status
stay_status = status_stay
if isinstance(message, Message):
arm_status = message.armed_away
stay_status = message.armed_home
if arm_status is None or stay_status is None:
return
self._armed_status, old_status = arm_status, self._armed_status
self._armed_stay, old_stay = stay_status, self._armed_stay
if arm_status != old_status or stay_status != old_stay:
if old_status is not None or message is None:
if self._armed_status or self._armed_stay:
self.on_arm(stay=stay_status)
else:
self.on_disarm()
return self._armed_status or self._armed_stay | Uses the provided message to update the armed state.
:param message: message to use to update
:type message: :py:class:`~alarmdecoder.messages.Message`
:param status: armed status, overrides message bits
:type status: bool
:param status_stay: armed stay status, overrides message bits
:type status_stay: bool
:returns: bool indicating the new status |
2,734 | def play(quiet, session_file, shell, speed, prompt, commentecho):
run(
session_file.readlines(),
shell=shell,
speed=speed,
quiet=quiet,
test_mode=TESTING,
prompt_template=prompt,
commentecho=commentecho,
) | Play a session file. |
2,735 | def get_log(db, job_id):
logs = db(, job_id)
out = []
for log in logs:
time = str(log.timestamp)[:-4]
out.append( % (time, job_id, log.level, log.message))
return out | Extract the logs as a big string
:param db: a :class:`openquake.server.dbapi.Db` instance
:param job_id: a job ID |
2,736 | def list(self, request, *args, **kwargs):
return super(CustomerPermissionViewSet, self).list(request, *args, **kwargs) | Each customer is associated with a group of users that represent customer owners. The link is maintained
through **api/customer-permissions/** endpoint.
To list all visible links, run a **GET** query against a list.
Response will contain a list of customer owners and their brief data.
To add a new user to the customer, **POST** a new relationship to **customer-permissions** endpoint:
.. code-block:: http
POST /api/customer-permissions/ HTTP/1.1
Accept: application/json
Authorization: Token 95a688962bf68678fd4c8cec4d138ddd9493c93b
Host: example.com
{
"customer": "http://example.com/api/customers/6c9b01c251c24174a6691a1f894fae31/",
"role": "owner",
"user": "http://example.com/api/users/82cec6c8e0484e0ab1429412fe4194b7/"
} |
2,737 | def authorization_middleware(get_response):
middleware_settings = settings()
logger = _create_logger(middleware_settings)
min_scope = middleware_settings[]
def get_token_subject(sub):
return sub
def always_ok(*args, **kwargs):
return True
def authorize_function(scopes, token_signature, x_unique_id=None):
log_msg_scopes =
def is_authorized(*needed_scopes):
granted_scopes = set(scopes)
needed_scopes = set(needed_scopes)
result = needed_scopes.issubset(granted_scopes)
if result:
msg = log_msg_scopes.format(needed_scopes, granted_scopes, token_signature)
if x_unique_id:
msg += .format(x_unique_id)
logger.info(msg)
return result
return is_authorized
def authorize_forced_anonymous(_):
raise Exception(
)
def insufficient_scope():
msg =
response = http.HttpResponse(, status=401)
response[] = msg
return response
def expired_token():
msg =
response = http.HttpResponse(, status=401)
response[] = msg
return response
def invalid_token():
msg =
response = http.HttpResponse(, status=401)
response[] = msg
return response
def invalid_request():
msg = (
"Bearer realm=\"datapunt\", error=\"invalid_request\", "
"error_description=\"Invalid Authorization header format; "
"should be: \"")
response = http.HttpResponse(, status=400)
response[] = msg
return response
def token_data(authorization):
try:
prefix, token = authorization.split()
except ValueError:
logger.warning(
.format(authorization))
raise _AuthorizationHeaderError(invalid_request())
if prefix.lower() != :
logger.warning(
.format(authorization))
raise _AuthorizationHeaderError(invalid_request())
try:
header = jwt.get_unverified_header(token)
except jwt.ExpiredSignatureError:
logger.info("Expired token")
raise _AuthorizationHeaderError(expired_token())
except (jwt.InvalidTokenError, jwt.DecodeError):
logger.exception("API authz problem: JWT decode error while reading header")
raise _AuthorizationHeaderError(invalid_token())
if not in header:
logger.exception("Did not get a valid key identifier")
raise _AuthorizationHeaderError(invalid_token())
keys = middleware_settings[].verifiers
if header[] not in keys:
logger.exception("Unknown key identifier: {}".format(header[]))
raise _AuthorizationHeaderError(invalid_token())
key = keys[header[]]
try:
decoded = jwt.decode(token, key=key.key, algorithms=(key.alg,))
except jwt.InvalidTokenError:
logger.exception(
.format(token))
raise _AuthorizationHeaderError(invalid_token())
if not in decoded:
logger.warning(
.format(token))
raise _AuthorizationHeaderError(invalid_token())
else:
scopes = decoded[]
if in decoded:
sub = decoded[]
else:
sub = None
token_signature = token.split()[2]
return scopes, token_signature, sub
def middleware(request):
request_path = request.path
forced_anonymous = any(
request_path.startswith(route)
for route in middleware_settings[])
if middleware_settings[]:
logger.warning()
request.is_authorized_for = always_ok
request.get_token_subject =
return get_response(request)
is_options = request.method ==
if forced_anonymous or is_options:
authz_func = authorize_forced_anonymous
subject = None
else:
authorization = request.META.get()
token_signature =
sub = None
if authorization:
try:
scopes, token_signature, sub = token_data(authorization)
except _AuthorizationHeaderError as e:
return e.response
else:
scopes = []
x_unique_id = request.META.get()
authz_func = authorize_function(scopes, token_signature, x_unique_id)
subject = get_token_subject(sub)
if len(min_scope) > 0 and not authz_func(min_scope):
return insufficient_scope()
request.is_authorized_for = authz_func
request.get_token_subject = subject
response = get_response(request)
return response
return middleware | Django middleware to parse incoming access tokens, validate them and
set an authorization function on the request.
The decision to use a generic middleware rather than an
AuthenticationMiddleware is explicitly made, because inctances of the
latter come with a number of assumptions (i.e. that user.is_authorized()
exists, or that request.user uses the User model).
Example usage:
::
request.is_authorized_for()
:param get_response: callable that creates the response object
:return: response
:todo:
Two things needs to be done when we can completely remove the Django
JWT plugin:
- Nested function 'middleware' allows both 'JWT' (not IANA-registered)
and 'Bearer' as Authorization header prefix; JWT should not be
accepted.
- The Django JWT middleware does not include the authz claim, so this
plugin does not fail if it is not present; this behavior is wrong
when we no longer use the Django JWT plugin. |
2,738 | def _parse(jsonOutput):
metadatacontent
parsed={}
if not jsonOutput:
return parsed
parsed["status"] = jsonOutput[0]
if jsonOutput[1] == None or jsonOutput[1] == "":
return parsed
realJson = json.loads(jsonOutput[1])
content = ""
for js in realJson:
if "X-TIKA:content" in js:
content += js["X-TIKA:content"]
if content == "":
content = None
parsed["content"] = content
parsed["metadata"] = {}
for js in realJson:
for n in js:
if n != "X-TIKA:content":
if n in parsed["metadata"]:
if not isinstance(parsed["metadata"][n], list):
parsed["metadata"][n] = [parsed["metadata"][n]]
parsed["metadata"][n].append(js[n])
else:
parsed["metadata"][n] = js[n]
return parsed | Parses JSON response from Tika REST API server
:param jsonOutput: JSON output from Tika Server
:return: a dictionary having 'metadata' and 'content' values |
2,739 | def _set_lsp_reoptimize_timer(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={: []}, int_size=32), restriction_dict={: [u]}), is_leaf=True, yang_name="lsp-reoptimize-timer", rest_name="reoptimize-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: u, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "uint32",
: ,
})
self.__lsp_reoptimize_timer = t
if hasattr(self, ):
self._set() | Setter method for lsp_reoptimize_timer, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_reoptimize_timer (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_reoptimize_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_reoptimize_timer() directly. |
2,740 | def is_Linear(self):
coeff_dict = self.expression.as_coefficients_dict()
for key in coeff_dict.keys():
if len(key.free_symbols) < 2 and (key.is_Add or key.is_Mul or key.is_Atom):
pass
else:
return False
if key.is_Pow and key.args[1] != 1:
return False
else:
return True | Returns True if expression is linear (a polynomial with degree 1 or 0) (read-only). |
2,741 | def send(message, request_context=None, binary=False):
if binary:
return uwsgi.websocket_send_binary(message, request_context)
return uwsgi.websocket_send(message, request_context) | Sends a message to websocket.
:param str message: data to send
:param request_context:
:raises IOError: If unable to send a message. |
2,742 | def interact_GxG(pheno,snps1,snps2=None,K=None,covs=None):
if K is None:
K=SP.eye(N)
N=snps1.shape[0]
if snps2 is None:
snps2 = snps1
return interact_GxE(snps=snps1,pheno=pheno,env=snps2,covs=covs,K=K) | Epistasis test between two sets of SNPs
Args:
pheno: [N x 1] SP.array of 1 phenotype for N individuals
snps1: [N x S1] SP.array of S1 SNPs for N individuals
snps2: [N x S2] SP.array of S2 SNPs for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
Returns:
pv: [S2 x S1] SP.array of P values for epistasis tests beten all SNPs in
snps1 and snps2 |
2,743 | def is_identity(self):
if not self.terms:
return True
return len(self.terms) == 1 and not self.terms[0].ops and self.terms[0].coeff == 1.0 | If `self` is I, returns True, otherwise False. |
2,744 | def send_data(self):
if not self.connection._sock:
raise err.InterfaceError("(0, )")
conn = self.connection
try:
with open(self.filename, ) as open_file:
packet_size = min(conn.max_allowed_packet, 16*1024)
while True:
chunk = open_file.read(packet_size)
if not chunk:
break
conn.write_packet(chunk)
except IOError:
raise err.OperationalError(1017, "Can{0}') | Send data packets from the local file to the server |
2,745 | def doc_files(self):
doc_files = []
for doc_file_re in settings.DOC_FILES_RE:
doc_files.extend(
self.archive.get_files_re(doc_file_re, ignorecase=True))
return [.join(x.split()[1:]) for x in doc_files] | Returns list of doc files that should be used for %doc in specfile.
Returns:
List of doc files from the archive - only basenames, not full
paths. |
2,746 | def build_dir():
tag_arr = [, , , , ]
path_arr = [os.path.join(CRUD_PATH, x) for x in tag_arr]
for wpath in path_arr:
if os.path.exists(wpath):
continue
os.makedirs(wpath) | Build the directory used for templates. |
2,747 | def _convert_scalar_indexer(self, key, kind=None):
assert kind in [, , , , None]
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in [] and (is_int or is_flt):
self._invalid_indexer(, key)
elif kind in [, ] and is_flt:
self._invalid_indexer(, key)
return super()._convert_scalar_indexer(key, kind=kind) | We don't allow integer or float indexing on datetime-like when using
loc.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None |
2,748 | def _replaces(self):
return {concat(a, c, b[1:])
for a, b in self.slices[:-1]
for c in ALPHABET} | tge |
2,749 | def _parse_row(self, i):
row = self.data[i]
for j in range(len(row)):
self.data[i][j] = self._parse_value(self.data[i][j]) | Parses row
:param i: index of row to parse |
2,750 | def add_project(self, path):
if not foundations.common.path_exists(path):
return False
path = os.path.normpath(path)
if self.__model.get_project_nodes(path):
self.__engine.notifications_manager.warnify(
"{0} | project is already opened!".format(self.__class__.__name__, path))
return False
LOGGER.info("{0} | Adding project!".format(self.__class__.__name__, path))
project_node = self.__model.register_project(path)
if not project_node:
return False
self.__model.set_project_nodes(project_node)
return True | Adds a project.
:param path: Project path.
:type path: unicode
:return: Method success.
:rtype: bool |
2,751 | def check_for_lime(self, pattern):
check = self.commands.lime_check.value
lime_loaded = False
result = self.shell.execute(check)
stdout = self.shell.decode(result[])
connections = self.net_parser.parse(stdout)
for conn in connections:
local_addr, remote_addr = conn
if local_addr == pattern:
lime_loaded = True
break
return lime_loaded | Check to see if LiME has loaded on the remote system
:type pattern: str
:param pattern: pattern to check output against
:type listen_port: int
:param listen_port: port LiME is listening for connections on |
2,752 | def pick_peaks(nc, L=16):
offset = nc.mean() / 20.
nc = filters.gaussian_filter1d(nc, sigma=4)
th = filters.median_filter(nc, size=L) + offset
peaks = []
for i in range(1, nc.shape[0] - 1):
if nc[i - 1] < nc[i] and nc[i] > nc[i + 1]:
if nc[i] > th[i]:
peaks.append(i)
return peaks | Obtain peaks from a novelty curve using an adaptive threshold. |
2,753 | def blank_dc(self, n_coarse_chan):
if n_coarse_chan < 1:
logger.warning()
return None
if not n_coarse_chan % int(n_coarse_chan) == 0:
logger.warning()
return None
n_coarse_chan = int(n_coarse_chan)
n_chan = self.data.shape[-1]
n_chan_per_coarse = int(n_chan / n_coarse_chan)
mid_chan = int(n_chan_per_coarse / 2)
for ii in range(n_coarse_chan):
ss = ii*n_chan_per_coarse
self.data[..., ss+mid_chan] = np.median(self.data[..., ss+mid_chan+5:ss+mid_chan+10]) | Blank DC bins in coarse channels.
Note: currently only works if entire file is read |
2,754 | def bam_needs_processing(data):
return ((data.get("work_bam") or data.get("align_bam")) and
(any(tz.get_in(["config", "algorithm", x], data) for x in
["variantcaller", "mark_duplicates", "recalibrate", "realign", "svcaller",
"jointcaller", "variant_regions"])
or any(k in data for k in ["cwl_keys", "output_cwl_keys"]))) | Check if a work input needs processing for parallelization. |
2,755 | def timing(name, delta, rate=1, tags=None):
return client().timing(name, delta, rate=rate, tags=tags) | Sends new timing information. `delta` is in milliseconds.
>>> import statsdecor
>>> statsdecor.timing('my.metric', 314159265359) |
2,756 | def get_subset(self, subset):
if len(subset) > 50:
raise ze.TooManyItems("You may only retrieve 50 items per call")
params = self.url_params
retr = []
for itm in subset:
retr.extend(self.item(itm))
self.url_params = params
self.url_params = None
return retr | Retrieve a subset of items
Accepts a single argument: a list of item IDs |
2,757 | def on_recv_rsp(self, rsp_pb):
ret_code, msg, _= SubAccPush.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_sub_acc_push(ret_code, msg)
return ret_code, msg | receive response callback function |
2,758 | def _keplerian_to_keplerian_mean(cls, coord, center):
a, e, i, Ω, ω, ν = coord
if e < 1:
cos_E = (e + cos(ν)) / (1 + e * cos(ν))
sin_E = (sin(ν) * sqrt(1 - e ** 2)) / (1 + e * cos(ν))
E = arctan2(sin_E, cos_E) % (2 * np.pi)
M = E - e * sin(E)
else:
H = arccosh((e + cos(ν)) / (1 + e * cos(ν)))
M = e * sinh(H) - H
return np.array([a, e, i, Ω, ω, M], dtype=float) | Conversion from Keplerian to Keplerian Mean
The difference is the use of Mean anomaly instead of True anomaly |
2,759 | def stmt_star_handler(
self,
stmts,
prev_node_to_avoid=None
):
break_nodes = list()
cfg_statements = list()
self.prev_nodes_to_avoid.append(prev_node_to_avoid)
self.last_control_flow_nodes.append(None)
first_node = None
node_not_to_step_past = self.nodes[-1]
for stmt in stmts:
node = self.visit(stmt)
if isinstance(node, ControlFlowNode) and not isinstance(node.test, TryNode):
self.last_control_flow_nodes.append(node.test)
else:
self.last_control_flow_nodes.append(None)
if isinstance(node, ControlFlowNode):
break_nodes.extend(node.break_statements)
elif isinstance(node, BreakNode):
break_nodes.append(node)
if not isinstance(node, IgnoredNode):
cfg_statements.append(node)
if not first_node:
if isinstance(node, ControlFlowNode):
first_node = node.test
else:
first_node = get_first_node(
node,
node_not_to_step_past
)
self.prev_nodes_to_avoid.pop()
self.last_control_flow_nodes.pop()
connect_nodes(cfg_statements)
if cfg_statements:
if first_node:
first_statement = first_node
else:
first_statement = get_first_statement(cfg_statements[0])
last_statements = get_last_statements(cfg_statements)
return ConnectStatements(
first_statement=first_statement,
last_statements=last_statements,
break_statements=break_nodes
)
else:
return IgnoredNode() | Handle stmt* expressions in an AST node.
Links all statements together in a list of statements, accounting for statements with multiple last nodes. |
2,760 | def add(self, parent, obj_type, **attributes):
return self.ixn.add(parent.obj_ref(), obj_type, *self._get_args_list(**attributes)) | IXN API add command
@param parent: object parent - object will be created under this parent.
@param object_type: object type.
@param attributes: additional attributes.
@return: IXN object reference. |
2,761 | def start_consuming(self, to_tuple=False, auto_decode=True):
while not self.is_closed:
self.process_data_events(
to_tuple=to_tuple,
auto_decode=auto_decode
)
if self.consumer_tags:
sleep(IDLE_WAIT)
continue
break | Start consuming messages.
:param bool to_tuple: Should incoming messages be converted to a
tuple before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return: |
2,762 | def _height_is_big_enough(image, height):
if height > image.size[1]:
raise ImageSizeError(image.size[1], height) | Check that the image height is superior to `height` |
2,763 | def rotate(data, axis=(1., 0, 0), angle=0., center=None, mode="constant", interpolation="linear"):
if center is None:
center = tuple([s // 2 for s in data.shape])
cx, cy, cz = center
m = np.dot(mat4_translate(cx, cy, cz),
np.dot(mat4_rotate(angle, *axis),
mat4_translate(-cx, -cy, -cz)))
m = np.linalg.inv(m)
return affine(data, m, mode=mode, interpolation=interpolation) | rotates data around axis by a given angle
Parameters
----------
data: ndarray
3d array
axis: tuple
axis to rotate by angle about
axis = (x,y,z)
angle: float
center: tuple or None
origin of rotation (cz,cy,cx) in pixels
if None, center is the middle of data
mode: string
boundary mode, one of the following:
'constant'
pads with zeros
'edge'
pads with edge values
'wrap'
pads with the repeated version of the input
interpolation, string
interpolation mode, one of the following
'linear'
'nearest'
Returns
-------
res: ndarray
rotated array (same shape as input) |
2,764 | def get_deviations(self):
mean_vec = self.mean()
df = self.loc[:,:].copy()
for col in df.columns:
df.loc[:,col] -= mean_vec[col]
return type(self).from_dataframe(pst=self.pst,df=df) | get the deviations of the ensemble value from the mean vector
Returns
-------
en : pyemu.Ensemble
Ensemble of deviations from the mean |
2,765 | def start_archive(self, session_id, has_audio=True, has_video=True, name=None, output_mode=OutputModes.composed, resolution=None):
if not isinstance(output_mode, OutputModes):
raise OpenTokException(u().format(output_mode))
if resolution and output_mode == OutputModes.individual:
raise OpenTokException(u())
payload = {: name,
: session_id,
: has_audio,
: has_video,
: output_mode.value,
: resolution,
}
response = requests.post(self.endpoints.archive_url(), data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
return Archive(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 400:
raise RequestError(response.json().get("message"))
elif response.status_code == 404:
raise NotFoundError("Session not found")
elif response.status_code == 409:
raise ArchiveError(response.json().get("message"))
else:
raise RequestError("An unexpected error occurred", response.status_code) | Starts archiving an OpenTok session.
Clients must be actively connected to the OpenTok session for you to successfully start
recording an archive.
You can only record one archive at a time for a given session. You can only record archives
of sessions that use the OpenTok Media Router (sessions with the media mode set to routed);
you cannot archive sessions with the media mode set to relayed.
For more information on archiving, see the
`OpenTok archiving <https://tokbox.com/opentok/tutorials/archiving/>`_ programming guide.
:param String session_id: The session ID of the OpenTok session to archive.
:param String name: This is the name of the archive. You can use this name
to identify the archive. It is a property of the Archive object, and it is a property
of archive-related events in the OpenTok.js library.
:param Boolean has_audio: if set to True, an audio track will be inserted to the archive.
has_audio is an optional parameter that is set to True by default. If you set both
has_audio and has_video to False, the call to the start_archive() method results in
an error.
:param Boolean has_video: if set to True, a video track will be inserted to the archive.
has_video is an optional parameter that is set to True by default.
:param OutputModes output_mode: Whether all streams in the archive are recorded
to a single file (OutputModes.composed, the default) or to individual files
(OutputModes.individual).
:param String resolution (Optional): The resolution of the archive, either "640x480" (the default)
or "1280x720". This parameter only applies to composed archives. If you set this
parameter and set the output_mode parameter to OutputModes.individual, the call to the
start_archive() method results in an error.
:rtype: The Archive object, which includes properties defining the archive,
including the archive ID. |
2,766 | def _get_float_remainder(fvalue, signs=9):
check_positive(fvalue)
if isinstance(fvalue, six.integer_types):
return "0"
if isinstance(fvalue, Decimal) and fvalue.as_tuple()[2] == 0:
return "0"
signs = min(signs, len(FRACTIONS))
remainder = str(fvalue).split()[1]
iremainder = int(remainder)
orig_remainder = remainder
factor = len(str(remainder)) - signs
if factor > 0:
iremainder = int(round(iremainder / (10.0**factor)))
format = "%%0%dd" % min(len(remainder), signs)
remainder = format % iremainder
if len(remainder) > signs:
raise ValueError("Signs overflow: I can't round only fractional part \
of %s to fit %s in %d signs" % \
(str(fvalue), orig_remainder, signs))
return remainder | Get remainder of float, i.e. 2.05 -> '05'
@param fvalue: input value
@type fvalue: C{integer types}, C{float} or C{Decimal}
@param signs: maximum number of signs
@type signs: C{integer types}
@return: remainder
@rtype: C{str}
@raise ValueError: fvalue is negative
@raise ValueError: signs overflow |
2,767 | def get_encrypted_field(base_class):
assert not isinstance(base_class, models.Field)
field_name = + base_class.__name__
if base_class not in FIELD_CACHE:
FIELD_CACHE[base_class] = type(field_name,
(EncryptedMixin, base_class), {
: base_class,
})
return FIELD_CACHE[base_class] | A get or create method for encrypted fields, we cache the field in
the module to avoid recreation. This also allows us to always return
the same class reference for a field.
:type base_class: models.Field[T]
:rtype: models.Field[EncryptedMixin, T] |
2,768 | def parse_data_line(self, sline):
}
self._addRawResult(resid, values, False)
return 0 | Parses the data line and builds the dictionary.
:param sline: a split data line to parse
:returns: the number of rows to jump and parse the next data line or return the code error -1 |
2,769 | def pid_exists(pid):
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True | Check whether pid exists in the current process table. |
2,770 | def setup_mturk_connection(self):
if ((self.aws_access_key_id == ) or
(self.aws_secret_access_key == )):
print "AWS access key not set in ~/.psiturkconfig; please enter a valid access key."
assert False
if self.is_sandbox:
endpoint_url =
else:
endpoint_url =
self.mtc = boto3.client(,
region_name=,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
endpoint_url=endpoint_url)
return True | Connect to turk |
2,771 | def has_axon(neuron, treefun=_read_neurite_type):
s neurites
Returns:
CheckResult with result
'
return CheckResult(NeuriteType.axon in (treefun(n) for n in neuron.neurites)) | Check if a neuron has an axon
Arguments:
neuron(Neuron): The neuron object to test
treefun: Optional function to calculate the tree type of
neuron's neurites
Returns:
CheckResult with result |
2,772 | def forget(self, obj):
ve modified is
not automatically saved on ``session.commit()`` .
'
self._init()
self.known.pop(obj._pk, None)
self.wknown.pop(obj._pk, None) | Forgets about an entity (automatically called when an entity is
deleted). Call this to ensure that an entity that you've modified is
not automatically saved on ``session.commit()`` . |
2,773 | def IntegerLike(msg=None):
def fn(value):
if not any([
isinstance(value, numbers.Integral),
(isinstance(value, float) and value.is_integer()),
(isinstance(value, basestring) and value.isdigit())
]):
raise Invalid(msg or (
.format(value))
)
else:
return value
return fn | Checks whether a value is:
- int, or
- long, or
- float without a fractional part, or
- str or unicode composed only of digits |
2,774 | def add_middleware(middleware: EFBMiddleware):
global middlewares
if isinstance(middleware, EFBMiddleware):
middlewares.append(middleware)
else:
raise TypeError("Middleware instance is expected") | Register a middleware with the coordinator.
Args:
middleware (EFBMiddleware): Middleware to register |
2,775 | def attach(self, media: typing.Union[InputMedia, typing.Dict]):
if isinstance(media, dict):
if not in media:
raise ValueError(f"Invalid media!")
media_type = media[]
if media_type == :
media = InputMediaPhoto(**media)
elif media_type == :
media = InputMediaVideo(**media)
else:
raise TypeError(f"Invalid media type !")
elif not isinstance(media, InputMedia):
raise TypeError(f"Media must be an instance of InputMedia or dict, not {type(media).__name__}")
elif media.type in [, , ]:
raise ValueError(f"This type of media is not supported by media groups!")
self.media.append(media) | Attach media
:param media: |
2,776 | def server_powerstatus(host=None,
admin_username=None,
admin_password=None,
module=None):
ret = __execute_ret(,
host=host, admin_username=admin_username,
admin_password=admin_password,
module=module)
result = {: 0}
if ret[] == :
result[] = True
result[] =
if ret[] == :
result[] = False
result[] =
if ret[].startswith():
result[] = False
result[] = ret[]
return result | return the power status for the passed module
CLI Example:
.. code-block:: bash
salt dell drac.server_powerstatus |
2,777 | def _append_params(oauth_params, params):
merged = list(params)
merged.extend(oauth_params)
merged.sort(key=lambda i: i[0].startswith())
return merged | Append OAuth params to an existing set of parameters.
Both params and oauth_params is must be lists of 2-tuples.
Per `section 3.5.2`_ and `3.5.3`_ of the spec.
.. _`section 3.5.2`: https://tools.ietf.org/html/rfc5849#section-3.5.2
.. _`3.5.3`: https://tools.ietf.org/html/rfc5849#section-3.5.3 |
2,778 | def get_and_write(self, iso_path, local_path, blocksize=8192):
iso_pathiso_path
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput()
with open(local_path, ) as fp:
self._get_and_write_fp(utils.normpath(iso_path), fp, blocksize) | (deprecated) Fetch a single file from the ISO and write it out to the
specified file. Note that this will overwrite the contents of the local
file if it already exists. Also note that 'iso_path' must be an
absolute path to the file. Finally, the 'iso_path' can be an ISO9660
path, a Rock Ridge path, or a Joliet path. In the case of ambiguity,
the Joliet path is tried first, followed by the ISO9660 path, followed
by the Rock Ridge path. It is recommended to use the get_file_from_iso
API instead to resolve this ambiguity.
Parameters:
iso_path - The absolute path to the file to get data from.
local_path - The local filename to write the contents to.
blocksize - The blocksize to use when copying data; the default is 8192.
Returns:
Nothing. |
2,779 | def readmegen():
hitchpylibrarytoolkit.readmegen(
_storybook(), DIR.project, DIR.key, DIR.gen, "commandlib"
) | Build documentation. |
2,780 | def comparison(self, lhs, rhs):
if len(rhs) > 0:
return ast.Compare(left=lhs, ops=list(map(lambda x: x[0], rhs)),
comparators=list(map(lambda x: x[1], rhs)),
loc=lhs.loc.join(rhs[-1][1].loc))
else:
return lhs | (2.6, 2.7) comparison: expr (comp_op expr)*
(3.0, 3.1) comparison: star_expr (comp_op star_expr)*
(3.2-) comparison: expr (comp_op expr)* |
2,781 | def get_bug_report():
platform_info = BugReporter.get_platform_info()
module_info = {
: hal_version.__version__,
: hal_version.__build__
}
return {
: platform_info,
: module_info
} | Generate information for a bug report
:return: information for bug report |
2,782 | def close_document(self, path):
to_close = []
for widget in self.widgets(include_clones=True):
p = os.path.normpath(os.path.normcase(widget.file.path))
path = os.path.normpath(os.path.normcase(path))
if p == path:
to_close.append(widget)
for widget in to_close:
tw = widget.parent_tab_widget
tw.remove_tab(tw.indexOf(widget)) | Closes a text document.
:param path: Path of the document to close. |
2,783 | def sense_dep(self, target):
self.chipset.rf_configuration(0x02, b"\x0B\x0B\x0A")
return super(Device, self).sense_dep(target) | Search for a DEP Target in active or passive communication mode. |
2,784 | def _prepare_uimodules(self):
for key, value in self._config.get(config.UI_MODULES, {}).iteritems():
self._config[config.UI_MODULES][key] = self._import_class(value)
self._config[config.UI_MODULES] = dict(self._config[config.UI_MODULES] or {}) | Prepare the UI Modules from a list of namespaced paths. |
2,785 | async def load_message_field(obj, msg, field, field_archiver=None):
fname, ftype, params = field[0], field[1], field[2:]
field_archiver = field_archiver if field_archiver else load_field
await field_archiver(obj[fname], ftype, params, eref(msg, fname)) | Loads message field from the object. Field is defined by the message field specification.
Returns loaded value, supports field reference.
:param reader:
:param msg:
:param field:
:param field_archiver:
:return: |
2,786 | def clear(self):
self._delete_child_storage(self.root_node)
self._delete_node_storage(self.root_node)
self.root_node = BLANK_NODE | clear all tree data |
2,787 | def sevenths_inv(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
seventh_qualities = [, , , , , ]
valid_semitones = np.array([QUALITIES[name] for name in seventh_qualities])
ref_roots, ref_semitones, ref_basses = encode_many(reference_labels, False)
est_roots, est_semitones, est_basses = encode_many(estimated_labels, False)
eq_roots_basses = (ref_roots == est_roots) * (ref_basses == est_basses)
eq_semitones = np.all(np.equal(ref_semitones, est_semitones), axis=1)
comparison_scores = (eq_roots_basses * eq_semitones).astype(np.float)
is_valid = np.array([np.all(np.equal(ref_semitones, semitones), axis=1)
for semitones in valid_semitones])
comparison_scores[np.sum(is_valid, axis=0) == 0] = -1
valid_inversion = np.ones(ref_basses.shape, dtype=bool)
bass_idx = ref_basses >= 0
valid_inversion[bass_idx] = ref_semitones[bass_idx, ref_basses[bass_idx]]
comparison_scores[valid_inversion == 0] = -1
return comparison_scores | Compare chords along MIREX 'sevenths' rules. Chords with qualities
outside [maj, maj7, 7, min, min7, N] are ignored.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.sevenths_inv(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut. |
2,788 | def create_folder(self, folder_name, parent_kind_str, parent_uuid):
data = {
: folder_name,
: {
: parent_kind_str,
: parent_uuid
}
}
return self._post("/folders", data) | Send POST to /folders to create a new folder with specified name and parent.
:param folder_name: str name of the new folder
:param parent_kind_str: str type of parent folder has(dds-folder,dds-project)
:param parent_uuid: str uuid of the parent object
:return: requests.Response containing the successful result |
2,789 | def _one_q_sic_prep(index, qubit):
if index == 0:
return Program()
theta = 2 * np.arccos(1 / np.sqrt(3))
zx_plane_rotation = Program([
RX(-pi / 2, qubit),
RZ(theta - pi, qubit),
RX(-pi / 2, qubit),
])
if index == 1:
return zx_plane_rotation
elif index == 2:
return zx_plane_rotation + RZ(-2 * pi / 3, qubit)
elif index == 3:
return zx_plane_rotation + RZ(2 * pi / 3, qubit)
raise ValueError(f) | Prepare the index-th SIC basis state. |
2,790 | def volume_show(name, profile=None, **kwargs):
*
conn = _auth(profile, **kwargs)
return conn.volume_show(name) | Create a block storage volume
name
Name of the volume
profile
Profile to use
CLI Example:
.. code-block:: bash
salt '*' nova.volume_show myblock profile=openstack |
2,791 | def libvlc_vlm_del_media(p_instance, psz_name):
f = _Cfunctions.get(, None) or \
_Cfunction(, ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name) | Delete a media (VOD or broadcast).
@param p_instance: the instance.
@param psz_name: the media to delete.
@return: 0 on success, -1 on error. |
2,792 | def script(experiment, projects):
benchbuild_c = local[local.path(sys.argv[0])]
slurm_script = local.cwd / experiment.name + "-" + str(
CFG[][])
srun = local["srun"]
srun_args = []
if not CFG["slurm"]["multithread"]:
srun_args.append("--hint=nomultithread")
if not CFG["slurm"]["turbo"]:
srun_args.append("--pstate-turbo=off")
srun = srun[srun_args]
srun = srun[benchbuild_c["run"]]
return __save__(slurm_script, srun, experiment, projects) | Prepare a slurm script that executes the experiment for a given project.
Args:
experiment: The experiment we want to execute
projects: All projects we generate an array job for. |
2,793 | def grab_literal(template, l_del):
global _CURRENT_LINE
try:
literal, template = template.split(l_del, 1)
_CURRENT_LINE += literal.count()
return (literal, template)
except ValueError:
return (template, ) | Parse a literal from the template |
2,794 | def append_ISO19115_keywords(keywords):
}
ISO19115_keywords = {}
for key, value in list(ISO19115_mapping.items()):
ISO19115_keywords[value] = setting(key, expected_type=str)
keywords.update(ISO19115_keywords) | Append ISO19115 from setting to keywords.
:param keywords: The keywords destination.
:type keywords: dict |
2,795 | def _parseLine(cls, line):
r = cls._PROG.match(line)
if not r:
raise ValueError("Error: parsing . Correct: \"<number> <number> [<text>]\"" % line)
d = r.groupdict()
if len(d[]) == 0 or len(d[]) == 0:
raise ValueError("Error: parsing . Correct: \"<number> <number> [<text>]\"" % line)
return AudioClipSpec(d[], d[], d[].strip()) | Parsers a single line of text and returns an AudioClipSpec
Line format:
<number> <number> [<text>]
Returns: list(AudioClipSpec) or None |
2,796 | def directory_files(path):
for entry in os.scandir(path):
if not entry.name.startswith() and entry.is_file():
yield entry.name | Yield directory file names. |
2,797 | def permute_point(p, permutation=None):
if not permutation:
return p
return [p[int(permutation[i])] for i in range(len(p))] | Permutes the point according to the permutation keyword argument. The
default permutation is "012" which does not change the order of the
coordinate. To rotate counterclockwise, use "120" and to rotate clockwise
use "201". |
2,798 | def poll(self, poll_rate=None, timeout=None):
return _poll_task(
self.id,
self._server_config,
poll_rate,
timeout
) | Return the status of a task or timeout.
There are several API calls that trigger asynchronous tasks, such as
synchronizing a repository, or publishing or promoting a content view.
It is possible to check on the status of a task if you know its UUID.
This method polls a task once every ``poll_rate`` seconds and, upon
task completion, returns information about that task.
:param poll_rate: Delay between the end of one task check-up and
the start of the next check-up. Defaults to
``nailgun.entity_mixins.TASK_POLL_RATE``.
:param timeout: Maximum number of seconds to wait until timing out.
Defaults to ``nailgun.entity_mixins.TASK_TIMEOUT``.
:returns: Information about the asynchronous task.
:raises: ``nailgun.entity_mixins.TaskTimedOutError`` if the task
completes with any result other than "success".
:raises: ``nailgun.entity_mixins.TaskFailedError`` if the task finishes
with any result other than "success".
:raises: ``requests.exceptions.HTTPError`` If the API returns a message
with an HTTP 4XX or 5XX status code. |
2,799 | def _vcap_from_service_definition(service_def):
if in service_def:
credentials = service_def[]
else:
credentials = service_def
service = {}
service[] = credentials
service[] = _name_from_service_definition(service_def)
vcap = {: [service]}
return vcap | Turn a service definition into a vcap services
containing a single service. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.