Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
1,700 | def get_protein_data_pgrouped(proteindata, p_acc, headerfields):
report = get_protein_data_base(proteindata, p_acc, headerfields)
return get_cov_protnumbers(proteindata, p_acc, report) | Parses protein data for a certain protein into tsv output
dictionary |
1,701 | def truncate(self, length):
if length > len(self.digest):
raise ValueError("cannot enlarge the original digest by %d bytes"
% (length - len(self.digest)))
return self.__class__(self.func, self.digest[:length]) | Return a new `Multihash` with a shorter digest `length`.
If the given `length` is greater than the original, a `ValueError`
is raised.
>>> mh1 = Multihash(0x01, b'FOOBAR')
>>> mh2 = mh1.truncate(3)
>>> mh2 == (0x01, b'FOO')
True
>>> mh3 = mh1.truncate(10)
Traceback (most recent call last):
...
ValueError: cannot enlarge the original digest by 4 bytes |
1,702 | def _process_state(cls, unprocessed, processed, state):
assert type(state) is str, "wrong state name %r" % state
assert state[0] != , "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens | Preprocess a single state definition. |
1,703 | def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
tabs = self.get_active_tabs()
context.update({
: tabs,
: tabs[0].code if tabs else ,
: self.get_app_label(),
: self.get_model_name(),
: self.get_model_alias(),
: self.object._meta.verbose_name.title(),
: self.get_back_url(),
: self.get_edit_url(),
: self.get_delete_url(),
: self.title,
})
return context | Add context data to view |
1,704 | def lx4num(string, first):
string = stypes.stringToCharP(string)
first = ctypes.c_int(first)
last = ctypes.c_int()
nchar = ctypes.c_int()
libspice.lx4num_c(string, first, ctypes.byref(last), ctypes.byref(nchar))
return last.value, nchar.value | Scan a string from a specified starting position for the
end of a number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lx4num_c.html
:param string: Any character string.
:type string: str
:param first: First character to scan from in string.
:type first: int
:return: last and nchar
:rtype: tuple |
1,705 | def fmap(self, f: Callable[[T], B]) -> :
return List([f(x) for x in self.unbox()]) | doufo.List.fmap: map `List`
Args:
`self`:
`f` (`Callable[[T], B]`): any callable funtion
Returns:
return (`List[B]`): A `List` of objected from `f`.
Raises: |
1,706 | def is_valid_mac(addr):
addrs = addr.split()
if len(addrs) != 6:
return False
for m in addrs:
try:
if int(m, 16) > 255:
return False
except ValueError:
return False
return True | Check the syntax of a given mac address.
The acceptable format is xx:xx:xx:xx:xx:xx |
1,707 | def analyze(problem, Y, calc_second_order=True, num_resamples=100,
conf_level=0.95, print_to_console=False, parallel=False,
n_processors=None, seed=None):
if seed:
np.random.seed(seed)
if not problem.get():
D = problem[]
else:
D = len(set(problem[]))
if calc_second_order and Y.size % (2 * D + 2) == 0:
N = int(Y.size / (2 * D + 2))
elif not calc_second_order and Y.size % (D + 2) == 0:
N = int(Y.size / (D + 2))
else:
raise RuntimeError()
if conf_level < 0 or conf_level > 1:
raise RuntimeError("Confidence level must be between 0-1.")
Y = (Y - Y.mean()) / Y.std()
A, B, AB, BA = separate_output_values(Y, D, N, calc_second_order)
r = np.random.randint(N, size=(N, num_resamples))
Z = norm.ppf(0.5 + conf_level / 2)
if not parallel:
S = create_Si_dict(D, calc_second_order)
for j in range(D):
S[][j] = first_order(A, AB[:, j], B)
S[][j] = Z * first_order(A[r], AB[r, j], B[r]).std(ddof=1)
S[][j] = total_order(A, AB[:, j], B)
S[][j] = Z * total_order(A[r], AB[r, j], B[r]).std(ddof=1)
if calc_second_order:
for j in range(D):
for k in range(j + 1, D):
S[][j, k] = second_order(
A, AB[:, j], AB[:, k], BA[:, j], B)
S[][j, k] = Z * second_order(A[r], AB[r, j],
AB[r, k], BA[r, j], B[r]).std(ddof=1)
else:
tasks, n_processors = create_task_list(
D, calc_second_order, n_processors)
func = partial(sobol_parallel, Z, A, AB, BA, B, r)
pool = Pool(n_processors)
S_list = pool.map_async(func, tasks)
pool.close()
pool.join()
S = Si_list_to_dict(S_list.get(), D, calc_second_order)
if print_to_console:
print_indices(S, problem, calc_second_order)
S.problem = problem
S.to_df = MethodType(to_df, S)
return S | Perform Sobol Analysis on model outputs.
Returns a dictionary with keys 'S1', 'S1_conf', 'ST', and 'ST_conf', where
each entry is a list of size D (the number of parameters) containing the
indices in the same order as the parameter file. If calc_second_order is
True, the dictionary also contains keys 'S2' and 'S2_conf'.
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
calc_second_order : bool
Calculate second-order sensitivities (default True)
num_resamples : int
The number of resamples (default 100)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
mathematical models and their Monte Carlo estimates." Mathematics
and Computers in Simulation, 55(1-3):271-280,
doi:10.1016/S0378-4754(00)00270-6.
.. [2] Saltelli, A. (2002). "Making best use of model evaluations to
compute sensitivity indices." Computer Physics Communications,
145(2):280-297, doi:10.1016/S0010-4655(02)00280-1.
.. [3] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
S. Tarantola (2010). "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
doi:10.1016/j.cpc.2009.09.018.
Examples
--------
>>> X = saltelli.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True) |
1,708 | def renumber(args):
from jcvi.algorithms.lis import longest_increasing_subsequence
from jcvi.utils.grouper import Grouper
p = OptionParser(renumber.__doc__)
p.set_annot_reformat_opts()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
pf = bedfile.rsplit(".", 1)[0]
abedfile = pf + ".a.bed"
bbedfile = pf + ".b.bed"
if need_update(bedfile, (abedfile, bbedfile)):
prepare(bedfile)
mbed = Bed(bbedfile)
g = Grouper()
for s in mbed:
accn = s.accn
g.join(*accn.split(";"))
bed = Bed(abedfile)
for chr, sbed in bed.sub_beds():
current_chr = chr_number(chr)
if not current_chr:
continue
ranks = []
gg = set()
for s in sbed:
accn = s.accn
achr, arank = atg_name(accn)
if achr != current_chr:
continue
ranks.append(arank)
gg.add(accn)
lranks = longest_increasing_subsequence(ranks)
print(current_chr, len(sbed), "==>", len(ranks), \
"==>", len(lranks), file=sys.stderr)
granks = set(gene_name(current_chr, x, prefix=opts.prefix, \
pad0=opts.pad0, uc=opts.uc) for x in lranks) | \
set(gene_name(current_chr, x, prefix=opts.prefix, \
pad0=opts.pad0, sep="te", uc=opts.uc) for x in lranks)
tagstore = {}
for s in sbed:
achr, arank = atg_name(s.accn)
accn = s.accn
if accn in granks:
tag = (accn, FRAME)
elif accn in gg:
tag = (accn, RETAIN)
else:
tag = (".", NEW)
tagstore[accn] = tag
for s in sbed:
accn = s.accn
gaccn = g[accn]
tags = [((tagstore[x][-1] if x in tagstore else NEW), x) for x in gaccn]
group = [(PRIORITY.index(tag), x) for tag, x in tags]
best = min(group)[-1]
if accn != best:
tag = (best, OVERLAP)
else:
tag = tagstore[accn]
print("\t".join((str(s), "|".join(tag)))) | %prog renumber Mt35.consolidated.bed > tagged.bed
Renumber genes for annotation updates. |
1,709 | def getNextService(self, discover):
manager = self.getManager()
if manager is not None and not manager:
self.destroyManager()
if not manager:
yadis_url, services = discover(self.url)
manager = self.createManager(services, yadis_url)
if manager:
service = manager.next()
manager.store(self.session)
else:
service = None
return service | Return the next authentication service for the pair of
user_input and session. This function handles fallback.
@param discover: a callable that takes a URL and returns a
list of services
@type discover: str -> [service]
@return: the next available service |
1,710 | def awake(self, procid):
logger.debug(f"Remove procid:{procid} from waitlists and reestablish it in the running list")
for wait_list in self.rwait:
if procid in wait_list:
wait_list.remove(procid)
for wait_list in self.twait:
if procid in wait_list:
wait_list.remove(procid)
self.timers[procid] = None
self.running.append(procid)
if self._current is None:
self._current = procid | Remove procid from waitlists and reestablish it in the running list |
1,711 | def encode_username_password(
username: Union[str, bytes], password: Union[str, bytes]
) -> bytes:
if isinstance(username, unicode_type):
username = unicodedata.normalize("NFC", username)
if isinstance(password, unicode_type):
password = unicodedata.normalize("NFC", password)
return utf8(username) + b":" + utf8(password) | Encodes a username/password pair in the format used by HTTP auth.
The return value is a byte string in the form ``username:password``.
.. versionadded:: 5.1 |
1,712 | def set_translation(lang):
global translation
langs = list()
if lang:
if in lang:
lang = lang.split()[0]
langs += [lang, lang[:2]]
path = pkg_resources.resource_filename(, )
codeset = locale.getpreferredencoding()
if codeset == :
codeset =
try:
translation = gettext.translation(
, path, languages=langs, codeset=codeset)
translation.ugettext = translation.gettext
except IOError:
return False
return True | Set the translation used by (some) pywws modules.
This sets the translation object ``pywws.localisation.translation``
to use a particular language.
The ``lang`` parameter can be any string of the form ``en``,
``en_GB`` or ``en_GB.UTF-8``. Anything after a ``.`` character is
ignored. In the case of a string such as ``en_GB``, the routine
will search for an ``en_GB`` language file before searching for an
``en`` one.
:param lang: language code.
:type lang: string
:return: success status.
:rtype: bool |
1,713 | def accept(self):
client, addr = self._socket.accept()
conn = Connection(self._context, client)
conn.set_accept_state()
return (conn, addr) | Call the :meth:`accept` method of the underlying socket and set up SSL
on the returned socket, using the Context object supplied to this
:class:`Connection` object at creation.
:return: A *(conn, addr)* pair where *conn* is the new
:class:`Connection` object created, and *address* is as returned by
the socket's :meth:`accept`. |
1,714 | def attach_related_file(self, path, mimetype=None):
filename = os.path.basename(path)
content = open(path, ).read()
self.attach_related(filename, content, mimetype) | Attaches a file from the filesystem. |
1,715 | def convertPrice(variant, regex=None, short_regex=None, none_regex=none_price_regex):
if isinstance(variant, int) and not isinstance(variant, bool):
return variant
elif isinstance(variant, float):
return round(variant * 100)
elif isinstance(variant, str):
match = (regex or default_price_regex).search(variant) \
or (short_regex or short_price_regex).match(variant)
if not match:
if none_regex and none_regex.match(variant):
return None
raise ValueError(.format(variant))
return int(match.group()) * 100 + \
int(match.groupdict().get(, ).ljust(2, ))
else:
raise TypeError(.format(variant)) | Helper function to convert the given input price into integers (cents
count). :obj:`int`, :obj:`float` and :obj:`str` are supported
:param variant: Price
:param re.compile regex: Regex to convert str into price. The re should
contain two named groups `euro` and `cent`
:param re.compile short_regex: Short regex version (no cent part)
group `euro` should contain a valid integer.
:param re.compile none_regex: Regex to detect that no value is provided
if the input data is str, the normal regex do not match and this
regex matches `None` is returned.
:rtype: int/None |
1,716 | def margin(
self,
axis=None,
weighted=True,
include_missing=False,
include_transforms_for_dims=None,
prune=False,
include_mr_cat=False,
):
axis = self._calculate_correct_axis_for_cube(axis)
hs_dims = self._hs_dims_for_cube(include_transforms_for_dims)
margin = self._cube.margin(
axis=axis,
weighted=weighted,
include_missing=include_missing,
include_transforms_for_dims=hs_dims,
prune=prune,
include_mr_cat=include_mr_cat,
)
return self._extract_slice_result_from_cube(margin) | Return ndarray representing slice margin across selected axis.
A margin (or basis) can be calculated for a contingency table, provided
that the dimensions of the desired directions are marginable. The
dimensions are marginable if they represent mutualy exclusive data,
such as true categorical data. For array types the items dimensions are
not marginable. Requesting a margin across these dimensions
(e.g. slice.margin(axis=0) for a categorical array cube slice) will
produce an error. For multiple response slices, the implicit convention
is that the provided direction scales to the selections dimension of the
slice. These cases produce meaningful data, but of a slightly different
shape (e.g. slice.margin(0) for a MR x CAT slice will produce 2D ndarray
(variable dimensions are never collapsed!)).
:param axis: Axis across which to sum. Can be 0 (columns margin),
1 (rows margin) and None (table margin). If requested across
variables dimension (e.g. requesting 0 margin for CA array) it will
produce an error.
:param weighted: Weighted or unweighted counts.
:param include_missing: Include missing categories or not.
:param include_transforms_for_dims: Indices of dimensions for which to
include transformations
:param prune: Perform pruning based on unweighted counts.
:returns: (weighed or unweighted counts) summed across provided axis.
For multiple response types, items dimensions are not collapsed. |
1,717 | def get_entry_categories(self, category_nodes):
categories = []
for category_node in category_nodes:
domain = category_node.attrib.get()
if domain == :
categories.append(self.categories[category_node.text])
return categories | Return a list of entry's categories
based on imported categories. |
1,718 | def get_input(problem):
input_data = load_input()
pbsplit = problem.split(":")
problem_input = input_data[][pbsplit[0]]
if isinstance(problem_input, dict) and "filename" in problem_input and "value" in problem_input:
if len(pbsplit) > 1 and pbsplit[1] == :
return problem_input["filename"]
else:
return open(problem_input["value"], ).read()
else:
return problem_input | Returns the specified problem answer in the form
problem: problem id
Returns string, or bytes if a file is loaded |
1,719 | def solubility_parameter(self):
rNH3
return solubility_parameter(T=self.T, Hvapm=self.Hvapm, Vml=self.Vml,
Method=self.solubility_parameter_method,
CASRN=self.CAS) | r'''Solubility parameter of the chemical at its
current temperature and pressure, in units of [Pa^0.5].
.. math::
\delta = \sqrt{\frac{\Delta H_{vap} - RT}{V_m}}
Calculated based on enthalpy of vaporization and molar volume.
Normally calculated at STP. For uses of this property, see
:obj:`thermo.solubility.solubility_parameter`.
Examples
--------
>>> Chemical('NH3').solubility_parameter
24766.329043856073 |
1,720 | def withAttribute(*args,**attrDict):
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute has value , must be " %
(attrName, tokens[attrName], attrValue))
return pa | Helper to create a validating parse action to be used with start
tags created with :class:`makeXMLTags` or
:class:`makeHTMLTags`. Use ``withAttribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``.
Call ``withAttribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align","right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`withClass`.
To verify that the attribute exists, but without specifying a value,
pass ``withAttribute.ANY_VALUE`` as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1 |
1,721 | def register_blueprints(app, application_package_name=None, blueprint_directory=None):
if not application_package_name:
application_package_name =
if not blueprint_directory:
blueprint_directory = os.path.join(os.getcwd(), application_package_name)
blueprint_directories = get_child_directories(blueprint_directory)
for directory in blueprint_directories:
abs_package = .format(application_package_name, directory)
service = importlib.import_module(abs_package)
app.register_blueprint(service.blueprint_api, url_prefix=) | Register Flask blueprints on app object |
1,722 | def UpdateFlow(self,
client_id,
flow_id,
flow_obj=db.Database.unchanged,
flow_state=db.Database.unchanged,
client_crash_info=db.Database.unchanged,
pending_termination=db.Database.unchanged,
processing_on=db.Database.unchanged,
processing_since=db.Database.unchanged,
processing_deadline=db.Database.unchanged):
try:
flow = self.flows[(client_id, flow_id)]
except KeyError:
raise db.UnknownFlowError(client_id, flow_id)
if flow_obj != db.Database.unchanged:
self.flows[(client_id, flow_id)] = flow_obj
flow = flow_obj
if flow_state != db.Database.unchanged:
flow.flow_state = flow_state
if client_crash_info != db.Database.unchanged:
flow.client_crash_info = client_crash_info
if pending_termination != db.Database.unchanged:
flow.pending_termination = pending_termination
if processing_on != db.Database.unchanged:
flow.processing_on = processing_on
if processing_since != db.Database.unchanged:
flow.processing_since = processing_since
if processing_deadline != db.Database.unchanged:
flow.processing_deadline = processing_deadline
flow.last_update_time = rdfvalue.RDFDatetime.Now() | Updates flow objects in the database. |
1,723 | def remove_accounts_from_group(accounts_query, group):
query = accounts_query.filter(date_deleted__isnull=True)
for account in query:
remove_account_from_group(account, group) | Remove accounts from group. |
1,724 | def __read_device(self):
state = XinputState()
res = self.manager.xinput.XInputGetState(
self.__device_number, ctypes.byref(state))
if res == XINPUT_ERROR_SUCCESS:
return state
if res != XINPUT_ERROR_DEVICE_NOT_CONNECTED:
raise RuntimeError(
"Unknown error %d attempting to get state of device %d" % (
res, self.__device_number))
return None | Read the state of the gamepad. |
1,725 | def execute_catch(c, sql, vars=None):
try:
c.execute(sql, vars)
except Exception as err:
cmd = sql.split(, 1)[0]
log.error("Error executing %s: %s", cmd, err) | Run a query, but ignore any errors. For error recovery paths where the error handler should not raise another. |
1,726 | def create_intent(self,
parent,
intent,
language_code=None,
intent_view=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
if not in self._inner_api_calls:
self._inner_api_calls[
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_intent,
default_retry=self._method_configs[].retry,
default_timeout=self._method_configs[]
.timeout,
client_info=self._client_info,
)
request = intent_pb2.CreateIntentRequest(
parent=parent,
intent=intent,
language_code=language_code,
intent_view=intent_view,
)
return self._inner_api_calls[](
request, retry=retry, timeout=timeout, metadata=metadata) | Creates an intent in the specified agent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> parent = client.project_agent_path('[PROJECT]')
>>>
>>> # TODO: Initialize ``intent``:
>>> intent = {}
>>>
>>> response = client.create_intent(parent, intent)
Args:
parent (str): Required. The agent to create a intent for.
Format: ``projects/<Project ID>/agent``.
intent (Union[dict, ~google.cloud.dialogflow_v2.types.Intent]): Required. The intent to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Intent`
language_code (str): Optional. The language of training phrases, parameters and rich messages
defined in ``intent``. If not specified, the agent's default language is
used. [More than a dozen
languages](https://dialogflow.com/docs/reference/language) are supported.
Note: languages must be enabled in the agent, before they can be used.
intent_view (~google.cloud.dialogflow_v2.types.IntentView): Optional. The resource view to apply to the returned intent.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Intent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
1,727 | def from_bytes(OverwinterTx, byte_string):
header = byte_string[0:4]
group_id = byte_string[4:8]
if header != b or group_id != b:
raise ValueError(
.format(b.hex(),
b.hex(),
header.hex(),
group_id.hex()))
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[8:])
current = 8 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
expiry_height = byte_string[current:current + 4]
current += 4
if current == len(byte_string):
tx_joinsplits = tuple()
joinsplit_pubkey = None
joinsplit_sig = None
else:
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_joinsplits_num.number):
tx_joinsplit = z.SproutJoinsplit.from_bytes(
byte_string[current:])
current += len(tx_joinsplit)
tx_joinsplits.append(tx_joinsplit)
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
return OverwinterTx(
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
expiry_height=expiry_height,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig) | byte-like -> OverwinterTx |
1,728 | def add_to_stage(self, paths):
cmd = self._command.add(paths)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError(t add paths to VCS. Process exited with code %d and message: %s' % (
code, stderr + stdout)) | Stage given files
:param paths:
:return: |
1,729 | def convert_runsummary_to_json(
df, comment=, prefix=
):
data_field = []
comment += ", by {}".format(getpass.getuser())
for det_id, det_data in df.groupby():
runs_field = []
data_field.append({"DetectorId": det_id, "Runs": runs_field})
for run, run_data in det_data.groupby():
parameters_field = []
runs_field.append({
"Run": int(run),
"Parameters": parameters_field
})
parameter_dict = {}
for row in run_data.itertuples():
for parameter_name in run_data.columns:
if parameter_name in REQUIRED_COLUMNS:
continue
if parameter_name not in parameter_dict:
entry = {: prefix + parameter_name, : []}
parameter_dict[parameter_name] = entry
data_value = getattr(row, parameter_name)
try:
data_value = float(data_value)
except ValueError as e:
log.critical("Data values has to be floats!")
raise ValueError(e)
value = {: str(getattr(row, )), : data_value}
parameter_dict[parameter_name][].append(value)
for parameter_data in parameter_dict.values():
parameters_field.append(parameter_data)
data_to_upload = {"Comment": comment, "Data": data_field}
file_data_to_upload = json.dumps(data_to_upload)
return file_data_to_upload | Convert a Pandas DataFrame with runsummary to JSON for DB upload |
1,730 | def add_method(obj, func, name=None):
if name is None:
name = func.__name__
if sys.version_info < (3,):
method = types.MethodType(func, obj, obj.__class__)
else:
method = types.MethodType(func, obj)
setattr(obj, name, method) | Adds an instance method to an object. |
1,731 | def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
ds = ds.batch(batch_size)
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
while True:
h.update(sess.run(get_next))
except tf.errors.OutOfRangeError:
pass
return h | Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter |
1,732 | def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
if high is None and low is None and limit is None:
return X, set()
dfs = X.map(_document_frequency).sum()
tfs = X.map(lambda x: np.asarray(x.sum(axis=0))).sum().ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return kept_indices, removed_terms | Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features. |
1,733 | def _parse_args(self,freqsAngles=True,_firstFlip=False,*args):
from galpy.orbit import Orbit
RasOrbit= False
integrated= True
if len(args) == 5 or len(args) == 3:
raise IOError("Must specify phi for actionAngleIsochroneApprox")
if len(args) == 6 or len(args) == 4:
if len(args) == 6:
R,vR,vT, z, vz, phi= args
else:
R,vR,vT, phi= args
z, vz= 0., 0.
if isinstance(R,float):
os= [Orbit([R,vR,vT,z,vz,phi])]
RasOrbit= True
integrated= False
elif len(R.shape) == 1:
os= [Orbit([R[ii],vR[ii],vT[ii],z[ii],vz[ii],phi[ii]]) for ii in range(R.shape[0])]
RasOrbit= True
integrated= False
if isinstance(args[0],Orbit) \
or (isinstance(args[0],list) and isinstance(args[0][0],Orbit)) \
or RasOrbit:
if RasOrbit:
pass
elif not isinstance(args[0],list):
os= [args[0]]
if len(os[0]._orb.vxvv) == 3 or len(os[0]._orb.vxvv) == 5:
raise IOError("Must specify phi for actionAngleIsochroneApprox")
else:
os= args[0]
if len(os[0]._orb.vxvv) == 3 or len(os[0]._orb.vxvv) == 5:
raise IOError("Must specify phi for actionAngleIsochroneApprox")
self._check_consistent_units_orbitInput(os[0])
if not hasattr(os[0]._orb,):
if _firstFlip:
for o in os:
o._orb.vxvv[1]= -o._orb.vxvv[1]
o._orb.vxvv[2]= -o._orb.vxvv[2]
o._orb.vxvv[4]= -o._orb.vxvv[4]
[o.integrate(self._tsJ,pot=self._pot,
method=self._integrate_method,
dt=self._integrate_dt) for o in os]
if _firstFlip:
for o in os:
o._orb.vxvv[1]= -o._orb.vxvv[1]
o._orb.vxvv[2]= -o._orb.vxvv[2]
o._orb.vxvv[4]= -o._orb.vxvv[4]
o._orb.orbit[:,1]= -o._orb.orbit[:,1]
o._orb.orbit[:,2]= -o._orb.orbit[:,2]
o._orb.orbit[:,4]= -o._orb.orbit[:,4]
integrated= False
ntJ= os[0].getOrbit().shape[0]
no= len(os)
R= nu.empty((no,ntJ))
vR= nu.empty((no,ntJ))
vT= nu.empty((no,ntJ))
z= nu.zeros((no,ntJ))+10.**-7.
vz= nu.zeros((no,ntJ))+10.**-7.
phi= nu.empty((no,ntJ))
for ii in range(len(os)):
this_orbit= os[ii].getOrbit()
R[ii,:]= this_orbit[:,0]
vR[ii,:]= this_orbit[:,1]
vT[ii,:]= this_orbit[:,2]
if this_orbit.shape[1] == 6:
z[ii,:]= this_orbit[:,3]
vz[ii,:]= this_orbit[:,4]
phi[ii,:]= this_orbit[:,5]
else:
phi[ii,:]= this_orbit[:,3]
if freqsAngles and not integrated:
no= R.shape[0]
nt= R.shape[1]
oR= nu.empty((no,2*nt-1))
ovR= nu.empty((no,2*nt-1))
ovT= nu.empty((no,2*nt-1))
oz= nu.zeros((no,2*nt-1))+10.**-7.
ovz= nu.zeros((no,2*nt-1))+10.**-7.
ophi= nu.empty((no,2*nt-1))
if _firstFlip:
oR[:,:nt]= R[:,::-1]
ovR[:,:nt]= vR[:,::-1]
ovT[:,:nt]= vT[:,::-1]
oz[:,:nt]= z[:,::-1]
ovz[:,:nt]= vz[:,::-1]
ophi[:,:nt]= phi[:,::-1]
else:
oR[:,nt-1:]= R
ovR[:,nt-1:]= vR
ovT[:,nt-1:]= vT
oz[:,nt-1:]= z
ovz[:,nt-1:]= vz
ophi[:,nt-1:]= phi
if _firstFlip:
os= [Orbit([R[ii,0],vR[ii,0],vT[ii,0],z[ii,0],vz[ii,0],phi[ii,0]]) for ii in range(R.shape[0])]
else:
os= [Orbit([R[ii,0],-vR[ii,0],-vT[ii,0],z[ii,0],-vz[ii,0],phi[ii,0]]) for ii in range(R.shape[0])]
[o.integrate(self._tsJ,pot=self._pot,
method=self._integrate_method,
dt=self._integrate_dt) for o in os]
ts= self._tsJ
if _firstFlip:
for ii in range(no):
oR[ii,nt:]= os[ii].R(ts[1:])
ovR[ii,nt:]= os[ii].vR(ts[1:])
ovT[ii,nt:]= os[ii].vT(ts[1:])
if os[ii].getOrbit().shape[1] == 6:
oz[ii,nt:]= os[ii].z(ts[1:])
ovz[ii,nt:]= os[ii].vz(ts[1:])
ophi[ii,nt:]= os[ii].phi(ts[1:])
else:
for ii in range(no):
oR[ii,:nt-1]= os[ii].R(ts[1:])[::-1]
ovR[ii,:nt-1]= -os[ii].vR(ts[1:])[::-1]
ovT[ii,:nt-1]= -os[ii].vT(ts[1:])[::-1]
if os[ii].getOrbit().shape[1] == 6:
oz[ii,:nt-1]= os[ii].z(ts[1:])[::-1]
ovz[ii,:nt-1]= -os[ii].vz(ts[1:])[::-1]
ophi[ii,:nt-1]= os[ii].phi(ts[1:])[::-1]
return (oR,ovR,ovT,oz,ovz,ophi)
else:
return (R,vR,vT,z,vz,phi) | Helper function to parse the arguments to the __call__ and actionsFreqsAngles functions |
1,734 | def get_user_presence(self, userid):
response, status_code = self.__pod__.Presence.get_v2_user_uid_presence(
sessionToken=self.__session__,
uid=userid
).result()
self.logger.debug( % (status_code, response))
return status_code, response | check on presence of a user |
1,735 | def get_child_by_name(parent, name):
def iterate_children(widget, name):
if widget.get_name() == name:
return widget
try:
for w in widget.get_children():
result = iterate_children(w, name)
if result is not None:
return result
else:
continue
except AttributeError:
pass
return iterate_children(parent, name) | Iterate through a gtk container, `parent`,
and return the widget with the name `name`. |
1,736 | def add_item_metadata(self, handle, key, value):
_mkdir_if_missing(self._metadata_fragments_abspath)
prefix = self._handle_to_fragment_absprefixpath(handle)
fpath = prefix + .format(key)
_put_obj(fpath, value) | Store the given key:value pair for the item associated with handle.
:param handle: handle for accessing an item before the dataset is
frozen
:param key: metadata key
:param value: metadata value |
1,737 | def create_embeded_pkcs7_signature(data, cert, key):
assert isinstance(data, bytes)
assert isinstance(cert, str)
try:
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
signcert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
except crypto.Error as e:
raise exceptions.CorruptCertificate from e
bio_in = crypto._new_mem_buf(data)
pkcs7 = crypto._lib.PKCS7_sign(
signcert._x509, pkey._pkey, crypto._ffi.NULL, bio_in, PKCS7_NOSIGS
)
bio_out = crypto._new_mem_buf()
crypto._lib.i2d_PKCS7_bio(bio_out, pkcs7)
signed_data = crypto._bio_to_string(bio_out)
return signed_data | Creates an embeded ("nodetached") pkcs7 signature.
This is equivalent to the output of::
openssl smime -sign -signer cert -inkey key -outform DER -nodetach < data
:type data: bytes
:type cert: str
:type key: str |
1,738 | def convert_to_consumable_types (self, project, name, prop_set, sources, only_one=False):
if __debug__:
from .targets import ProjectTarget
assert isinstance(name, basestring) or name is None
assert isinstance(project, ProjectTarget)
assert isinstance(prop_set, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
assert isinstance(only_one, bool)
consumed = []
missing_types = []
if len (sources) > 1:
for t in transformed[1]:
if t.type() in missing_types:
consumed.append(t)
consumed = unique(consumed)
return consumed | Attempts to convert 'source' to the types that this generator can
handle. The intention is to produce the set of targets can should be
used when generator is run.
only_one: convert 'source' to only one of source types
if there's more that one possibility, report an
error.
Returns a pair:
consumed: all targets that can be consumed. |
1,739 | def set_host_finished(self, scan_id, target, host):
finished_hosts = self.scans_table[scan_id][]
finished_hosts[target].extend(host)
self.scans_table[scan_id][] = finished_hosts | Add the host in a list of finished hosts |
1,740 | def dist(src, tar, method=sim_levenshtein):
if callable(method):
return 1 - method(src, tar)
else:
raise AttributeError( + str(method)) | Return a distance between two strings.
This is a generalized function for calling other distance functions.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
method : function
Specifies the similarity metric (:py:func:`sim_levenshtein` by default)
-- Note that this takes a similarity metric function, not a distance
metric function.
Returns
-------
float
Distance according to the specified function
Raises
------
AttributeError
Unknown distance function
Examples
--------
>>> round(dist('cat', 'hat'), 12)
0.333333333333
>>> round(dist('Niall', 'Neil'), 12)
0.6
>>> dist('aluminum', 'Catalan')
0.875
>>> dist('ATCG', 'TAGC')
0.75 |
1,741 | def _select_index(self, row, col):
nr, nc = self._size
nr = nr-1
nc = nc-1
if (row > nr and col >= nc) or (row >= nr and col > nc):
self._select_index(0, 0)
elif (row <= 0 and col < 0) or (row < 0 and col <= 0):
self._select_index(nr, nc)
elif row > nr :
self._select_index(0, col+1)
elif row < 0 :
self._select_index(nr, col-1)
elif col > nc :
self._select_index(row+1, 0)
elif col < 0 :
self._select_index(row-1, nc)
elif 0 <= row and row <= nr and 0 <= col and col <= nc :
self._index = (row, col)
else :
raise NotImplementedError("you'r trying to go where no completion\
have gone before : %d:%d (%d:%d)"%(row, col, nr, nc) ) | Change the selection index, and make sure it stays in the right range
A little more complicated than just dooing modulo the number of row columns
to be sure to cycle through all element.
horizontaly, the element are maped like this :
to r <-- a b c d e f --> to g
to f <-- g h i j k l --> to m
to l <-- m n o p q r --> to a
and vertically
a d g j m p
b e h k n q
c f i l o r |
1,742 | def addFilter(self, filterMethod=FILTER_METHOD_AND, **kwargs):
AND
filterMethod = filterMethod.upper()
if filterMethod not in FILTER_METHODS:
raise ValueError( %(str(filterMethod), repr(FILTER_METHODS)))
self.filters.append((filterMethod, kwargs)) | addFilter - Add a filter to this query.
@param filterMethod <str> - The filter method to use (AND or OR), default: 'AND'
@param additional args - Filter arguments. @see QueryableListBase.filter
@raises ValueError if filterMethod is not one of known methods. |
1,743 | def lbd_to_XYZ_jac(*args,**kwargs):
out= sc.zeros((6,6))
if len(args) == 3:
l,b,D= args
vlos, pmll, pmbb= 0., 0., 0.
elif len(args) == 6:
l,b,D,vlos,pmll,pmbb= args
if kwargs.get(,False):
l*= _DEGTORAD
b*= _DEGTORAD
cl= sc.cos(l)
sl= sc.sin(l)
cb= sc.cos(b)
sb= sc.sin(b)
out[0,0]= -D*cb*sl
out[0,1]= -D*sb*cl
out[0,2]= cb*cl
out[1,0]= D*cb*cl
out[1,1]= -D*sb*sl
out[1,2]= cb*sl
out[2,1]= D*cb
out[2,2]= sb
if len(args) == 3:
if kwargs.get(,False):
out[:,0]*= _DEGTORAD
out[:,1]*= _DEGTORAD
return out[:3,:3]
out[3,0]= -sl*cb*vlos-cl*_K*D*pmll+sb*sl*_K*D*pmbb
out[3,1]= -cl*sb*vlos-cb*cl*_K*D*pmbb
out[3,2]= -sl*_K*pmll-sb*cl*_K*pmbb
out[3,3]= cl*cb
out[3,4]= -sl*_K*D
out[3,5]= -cl*sb*_K*D
out[4,0]= cl*cb*vlos-sl*_K*D*pmll-cl*sb*_K*D*pmbb
out[4,1]= -sl*sb*vlos-sl*cb*_K*D*pmbb
out[4,2]= cl*_K*pmll-sl*sb*_K*pmbb
out[4,3]= sl*cb
out[4,4]= cl*_K*D
out[4,5]= -sl*sb*_K*D
out[5,1]= cb*vlos-sb*_K*D*pmbb
out[5,2]= cb*_K*pmbb
out[5,3]= sb
out[5,5]= cb*_K*D
if kwargs.get(,False):
out[:,0]*= _DEGTORAD
out[:,1]*= _DEGTORAD
return out | NAME:
lbd_to_XYZ_jac
PURPOSE:
calculate the Jacobian of the Galactic spherical coordinates to Galactic rectangular coordinates transformation
INPUT:
l,b,D- Galactic spherical coordinates
vlos,pmll,pmbb- Galactic spherical velocities (some as proper motions)
if 6 inputs: l,b,D,vlos,pmll x cos(b),pmbb
if 3: l,b,D
degree= (False) if True, l and b are in degrees
OUTPUT:
jacobian
HISTORY:
2013-12-09 - Written - Bovy (IAS) |
1,744 | def get_alert(thing_name, key, session=None):
return _request(, .format(thing_name), params={: key}, session=session) | Set an alert on a thing with the given condition |
1,745 | def show_lbaas_healthmonitor(self, lbaas_healthmonitor, **_params):
return self.get(self.lbaas_healthmonitor_path % (lbaas_healthmonitor),
params=_params) | Fetches information for a lbaas_healthmonitor. |
1,746 | def handle_url_build_error(self, error: Exception, endpoint: str, values: dict) -> str:
for handler in self.url_build_error_handlers:
result = handler(error, endpoint, values)
if result is not None:
return result
raise error | Handle a build error.
Ideally this will return a valid url given the error endpoint
and values. |
1,747 | def tdSensorValue(self, protocol, model, sid, datatype):
value = create_string_buffer(20)
timestamp = c_int()
self._lib.tdSensorValue(protocol, model, sid, datatype,
value, sizeof(value), byref(timestamp))
return {: self._to_str(value), : timestamp.value} | Get the sensor value for a given sensor.
:return: a dict with the keys: value, timestamp. |
1,748 | def robust_outer_product(vec_1, vec_2):
mantissa_1, exponents_1 = np.frexp(vec_1)
mantissa_2, exponents_2 = np.frexp(vec_2)
new_mantissas = mantissa_1[None, :] * mantissa_2[:, None]
new_exponents = exponents_1[None, :] + exponents_2[:, None]
return new_mantissas * np.exp2(new_exponents) | Calculates a 'robust' outer product of two vectors that may or may not
contain very small values.
Parameters
----------
vec_1 : 1D ndarray
vec_2 : 1D ndarray
Returns
-------
outer_prod : 2D ndarray. The outer product of vec_1 and vec_2 |
1,749 | def sort_tiers(self, key=lambda x: x.name):
self.tiers.sort(key=key) | Sort the tiers given the key. Example key functions:
Sort according to the tiername in a list:
``lambda x: ['name1', 'name2' ... 'namen'].index(x.name)``.
Sort according to the number of annotations:
``lambda x: len(list(x.get_intervals()))``
:param func key: A key function. Default sorts alphabetically. |
1,750 | def business_rule_notification_is_blocked(self, hosts, services):
acknowledged = 0
for src_prob_id in self.source_problems:
if src_prob_id in hosts:
src_prob = hosts[src_prob_id]
else:
src_prob = services[src_prob_id]
if src_prob.last_hard_state_id != 0:
if src_prob.problem_has_been_acknowledged:
acknowledged += 1
elif self.business_rule_downtime_as_ack is True:
if src_prob.scheduled_downtime_depth > 0:
acknowledged += 1
elif hasattr(src_prob, "host") and \
hosts[src_prob.host].scheduled_downtime_depth > 0:
acknowledged += 1
return acknowledged == len(self.source_problems) | Process business rule notifications behaviour. If all problems have
been acknowledged, no notifications should be sent if state is not OK.
By default, downtimes are ignored, unless explicitly told to be treated
as acknowledgements through with the business_rule_downtime_as_ack set.
:return: True if all source problem are acknowledged, otherwise False
:rtype: bool |
1,751 | def generate_single_simulation(self, x):
if x:
self.__rng = np.random.RandomState(x)
time_points, species_over_time = self._gssa(self.__initial_conditions, self.__t_max)
descriptors = []
for i, s in enumerate(self.__species):
row = [0] * len(self.__species)
row[i] = 1
descriptors.append(Moment(row, s))
trajectories = [Trajectory(time_points, spot, desc) for
spot, desc in zip(species_over_time, descriptors)]
return trajectories | Generate a single SSA simulation
:param x: an integer to reset the random seed. If None, the initial random number generator is used
:return: a list of :class:`~means.simulation.Trajectory` one per species in the problem
:rtype: list[:class:`~means.simulation.Trajectory`] |
1,752 | def create_untl_xml_subelement(parent, element, prefix=):
subelement = SubElement(parent, prefix + element.tag)
if element.content is not None:
subelement.text = element.content
if element.qualifier is not None:
subelement.attrib["qualifier"] = element.qualifier
if element.children > 0:
for child in element.children:
SubElement(subelement, prefix + child.tag).text = child.content
else:
subelement.text = element.content
return subelement | Create a UNTL XML subelement. |
1,753 | def _bundle_generic(bfile, addhelper, fmt, reffmt, data_dir):
ext = converters.get_format_extension(fmt)
refext = refconverters.get_format_extension(reffmt)
subdir = + fmt + + reffmt
readme_path = os.path.join(subdir, )
addhelper(bfile, readme_path, _create_readme(fmt, reffmt))
for name, data, notes in _basis_data_iter(fmt, reffmt, data_dir):
for ver, verdata in data.items():
filename = misc.basis_name_to_filename(name)
basis_filepath = os.path.join(subdir, .format(filename, ver, ext))
ref_filename = os.path.join(subdir, .format(filename, ver, refext))
bsdata, refdata = verdata
addhelper(bfile, basis_filepath, bsdata)
addhelper(bfile, ref_filename, refdata)
if len(notes) > 0:
notes_filename = os.path.join(subdir, filename + )
addhelper(bfile, notes_filename, notes)
for fam in api.get_families(data_dir):
fam_notes = api.get_family_notes(fam, data_dir)
if len(fam_notes) > 0:
fam_notes_filename = os.path.join(subdir, fam + )
addhelper(bfile, fam_notes_filename, fam_notes) | Loop over all basis sets and add data to an archive
Parameters
----------
bfile : object
An object that gets passed through to the addhelper function
addhelper : function
A function that takes bfile and adds data to the bfile
fmt : str
Format of the basis set to create
reffmt : str
Format to use for the references
data_dir : str
Data directory with all the basis set information.
Returns
-------
None |
1,754 | def snapshot(self):
self._snapshot = {
: self.muted,
: self.volume,
: self.stream
}
_LOGGER.info(, self.friendly_name) | Snapshot current state. |
1,755 | def transform_q(q, query):
for i, child in enumerate(q.children):
if isinstance(child, Q):
transform_q(child, query)
else:
where_node = query.build_filter(child)
q.children[i] = where_node | Replaces (lookup, value) children of Q with equivalent WhereNode objects.
This is a pre-prep of our Q object, ready for later rendering into SQL.
Modifies in place, no need to return.
(We could do this in render_q, but then we'd have to pass the Query object
from ConditionalAggregate down into SQLConditionalAggregate, which Django
avoids to do in their API so we try and follow their lead here) |
1,756 | def migrate_passwords_to_leader_storage(self, excludes=None):
if not is_leader():
log("Skipping password migration as not the lead unit",
level=DEBUG)
return
dirname = os.path.dirname(self.root_passwd_file_template)
path = os.path.join(dirname, )
for f in glob.glob(path):
if excludes and f in excludes:
log("Excluding %s from leader storage migration" % (f),
level=DEBUG)
continue
key = os.path.basename(f)
with open(f, ) as passwd:
_value = passwd.read().strip()
try:
leader_set(settings={key: _value})
if self.delete_ondisk_passwd_file:
os.unlink(f)
except ValueError:
pass | Migrate any passwords storage on disk to leader storage. |
1,757 | def main(sample_id, assembly_file, minsize):
logger.info("Starting assembly file processing")
warnings = []
fails = ""
logger.info("Starting assembly parsing")
assembly_obj = Assembly(assembly_file, 0, 0,
sample_id, minsize)
if in assembly_file:
assembler = "SPAdes"
else:
assembler = "MEGAHIT"
with open(".warnings", "w") as warn_fh:
t_80 = int(minsize) * 0.8
t_150 = int(minsize) * 1.5
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking assembly length: {}".format(assembly_len))
if assembly_obj.nORFs < 1:
warn_msg = "No complete ORFs found."
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len < t_80:
logger.warning("Assembly size ({}) smaller than the minimum "
"threshold of 80% of expected genome size. "
"Applying contig filters without the k-mer "
"coverage filter".format(assembly_len))
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking updated assembly length: "
"{}".format(assembly_len))
if assembly_len < t_80:
warn_msg = "Assembly size smaller than the minimum" \
" threshold of 80% of expected genome size: {}".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len > t_150:
warn_msg = "Assembly size ({}) larger than the maximum" \
" threshold of 150% of expected genome size.".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
with open(".report.json", "w") as json_report:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Contigs ({})".format(assembler),
"value": len(assembly_obj.contigs),
"table": "assembly",
"columnBar": True},
{"header": "Assembled BP ({})".format(assembler),
"value": assembly_len,
"table": "assembly",
"columnBar": True},
{"header": "ORFs",
"value": assembly_obj.nORFs,
"table": "assembly",
"columnBar":False}
]
}],
}
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "assembly",
"value": warnings
}]
if fails:
json_dic["fail"] = [{
"sample": sample_id,
"table": "assembly",
"value": [fails]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass") | Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly: str
Path to the fatsa file generated by the assembler.
minsize: str
Min contig size to be considered a complete ORF |
1,758 | def _connect(self):
try:
if self.ca_cert is None or self.certfile is None or \
self.keyfile is None or self.crlfile is None:
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
**MONGO_OPTS)
if self.login is not None and self.password is not None:
client[self.dbname].authenticate(self.login, self.password)
else:
logger.info()
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
ssl_ca_certs=self.ca_cert,
ssl_certfile=self.certfile,
ssl_keyfile=self.keyfile,
ssl_pem_passphrase=self.keyfile_passphrase,
ssl_crlfile=self.crlfile,
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if self.login is not None:
client[self.dbname].authenticate(self.login,
mechanism=)
return client
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
logger.info(.format(exc))
raise ConnectionError(str(exc)) from exc
except pymongo.errors.ConfigurationError as exc:
raise ConfigurationError from exc | Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
:exc:`~ConfigurationError`: If there is a ConfigurationError while
connecting to the database. |
1,759 | def exception_wrapper(f):
@wraps(f)
def wrapper(*args, **kwds):
try:
return f(*args, **kwds)
except dbus.exceptions.DBusException as err:
_args = err.args
raise PyMPRISException(*_args)
return wrapper | Decorator to convert dbus exception to pympris exception. |
1,760 | def set_affinity_matrix(self, affinity_mat):
affinity_mat = check_array(affinity_mat, accept_sparse=sparse_formats)
if affinity_mat.shape[0] != affinity_mat.shape[1]:
raise ValueError("affinity matrix is not square")
self.affinity_matrix = affinity_mat | Parameters
----------
affinity_mat : sparse matrix (N_obs, N_obs).
The adjacency matrix to input. |
1,761 | def encrypt(self):
value = self.parameters.get("Plaintext")
if isinstance(value, six.text_type):
value = value.encode()
return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8"), : }) | We perform no encryption, we just encode the value as base64 and then
decode it in decrypt(). |
1,762 | def find_additional_rels(self, all_models):
for model_name, model in iteritems(all_models):
if model_name != self.name:
for field_name in model.field_names:
field = model.fields[field_name]
if field.field_type == self.name and field.back_populates is not None and \
(isinstance(field, StatikForeignKeyField) or isinstance(field, StatikManyToManyField)):
self.additional_rels[field.back_populates] = {
: model_name,
: field_name,
: (model_name, field.field_type)
if isinstance(field, StatikManyToManyField) else None
}
logger.debug(
,
self.name,
field.back_populates,
model_name,
self.additional_rels[field.back_populates]
) | Attempts to scan for additional relationship fields for this model based on all of the other models'
structures and relationships. |
1,763 | def get_instance_property(instance, property_name):
name = get_name(instance)
while True:
try:
value = getattr(instance, property_name)
if value is not None:
break
print(f"retrieving {property_name} on {name} produced None, retrying")
time.sleep(RETRY_INTERVAL_SEC)
instance.reload()
continue
except Exception as e:
print(f"retrieving {property_name} on {name} failed with {e}, retrying")
time.sleep(RETRY_INTERVAL_SEC)
try:
instance.reload()
except Exception:
pass
continue
return value | Retrieves property of an instance, keeps retrying until getting a non-None |
1,764 | def memoizedmethod(method):
method_name = method.__name__
@wraps(method)
def patched(self, *args, **kwargs):
try:
return self._cache[method_name]
except KeyError:
result = self._cache[method_name] = method(
self, *args, **kwargs)
return result
return patched | Decorator that caches method result.
Args:
method (function): Method
Returns:
function: Memoized method.
Notes:
Target method class needs as "_cache" attribute (dict).
It is the case of "ObjectIOBase" and all its subclasses. |
1,765 | def ReadTrigger(self, trigger_link, options=None):
if options is None:
options = {}
path = base.GetPathFromLink(trigger_link)
trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)
return self.Read(path, , trigger_id, None, options) | Reads a trigger.
:param str trigger_link:
The link to the trigger.
:param dict options:
The request options for the request.
:return:
The read Trigger.
:rtype:
dict |
1,766 | def shadow_hash(crypt_salt=None, password=None, algorithm=):
*My5alTMyP@asswd
return salt.utils.pycrypto.gen_hash(crypt_salt, password, algorithm) | Generates a salted hash suitable for /etc/shadow.
crypt_salt : None
Salt to be used in the generation of the hash. If one is not
provided, a random salt will be generated.
password : None
Value to be salted and hashed. If one is not provided, a random
password will be generated.
algorithm : sha512
Hash algorithm to use.
CLI Example:
.. code-block:: bash
salt '*' random.shadow_hash 'My5alT' 'MyP@asswd' md5 |
1,767 | def check_type_and_values_of_specification_dict(specification_dict,
unique_alternatives):
for key in specification_dict:
specification = specification_dict[key]
if isinstance(specification, str):
if specification not in ["all_same", "all_diff"]:
msg = "specification_dict[{}] not in [, ]"
raise ValueError(msg.format(key))
elif isinstance(specification, list):
for group in specification:
group_is_list = isinstance(group, list)
if group_is_list:
for group_item in group:
if isinstance(group_item, list):
msg = "Wrong structure for specification_dict[{}]"
msg_2 = " Values can be a list of lists of ints,"
msg_3 = " not lists of lists of lists of ints."
total_msg = msg.format(key) + msg_2 + msg_3
raise ValueError(total_msg)
elif group_item not in unique_alternatives:
msg_1 = "{} in {} in specification_dict[{}]"
msg_2 = " is not in long_format[alt_id_col]"
total_msg = (msg_1.format(group_item, group, key) +
msg_2)
raise ValueError(total_msg)
else:
if group not in unique_alternatives:
msg_1 = "{} in specification_dict[{}]"
msg_2 = " is not in long_format[alt_id_col]"
raise ValueError(msg_1.format(group, key) + msg_2)
else:
msg = "specification_dict[{}] must be , , or"
msg_2 = " a list."
raise TypeError(msg.format(key) + msg_2)
return None | Verifies that the values of specification_dict have the correct type, have
the correct structure, and have valid values (i.e. are actually in the set
of possible alternatives). Will raise various errors if / when appropriate.
Parameters
----------
specification_dict : OrderedDict.
Keys are a proper subset of the columns in `long_form_df`. Values are
either a list or a single string, `"all_diff"` or `"all_same"`. If a
list, the elements should be:
- single objects that are within the alternative ID column of
`long_form_df`
- lists of objects that are within the alternative ID column of
`long_form_df`. For each single object in the list, a unique
column will be created (i.e. there will be a unique coefficient
for that variable in the corresponding utility equation of the
corresponding alternative). For lists within the
`specification_dict` values, a single column will be created for
all the alternatives within iterable (i.e. there will be one
common coefficient for the variables in the iterable).
unique_alternatives : 1D ndarray.
Should contain the possible alternative id's for this dataset.
Returns
-------
None. |
1,768 | def fit(self, X, y=None):
self.opt_ = None
self.cputime_ = None
self.iters_ = None
self.duality_gap_ = None
self.sample_covariance_ = None
self.lam_scale_ = None
self.is_fitted_ = False
X = check_array(X, ensure_min_features=2, estimator=self)
X = as_float_array(X, copy=False, force_all_finite=False)
if self.cv is None:
cv = (3, 10)
elif isinstance(self.cv, int):
cv = (self.cv, 10)
elif isinstance(self.cv, tuple):
cv = self.cv
cv = RepeatedKFold(n_splits=cv[0], n_repeats=cv[1])
self.init_coefs(X)
if isinstance(self.lams, int):
n_refinements = self.n_refinements
lam_1 = self.lam_scale_
lam_0 = 1e-2 * lam_1
path = np.logspace(np.log10(lam_0), np.log10(lam_1), self.lams)[::-1]
else:
path = self.lams
n_refinements = 1
results = list()
t0 = time.time()
for rr in range(n_refinements):
if self.sc is None:
this_result = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose, backend=self.backend
)(
delayed(_quic_path)(
X[train],
path,
X_test=X[test],
lam=self.lam,
tol=self.tol,
max_iter=self.max_iter,
Theta0=self.Theta0,
Sigma0=self.Sigma0,
method=self.method,
verbose=self.verbose,
score_metric=self.score_metric,
init_method=self.init_method,
)
for train, test in cv.split(X)
)
else:
train_test_grid = [(train, test) for (train, test) in cv.split(X)]
indexed_param_grid = list(
zip(range(len(train_test_grid)), train_test_grid)
)
par_param_grid = self.sc.parallelize(indexed_param_grid)
X_bc = self.sc.broadcast(X)
quic_path = partial(
_quic_path,
path=path,
lam=self.lam,
tol=self.tol,
max_iter=self.max_iter,
Theta0=self.Theta0,
Sigma0=self.Sigma0,
method=self.method,
verbose=self.verbose,
score_metric=self.score_metric,
init_method=self.init_method,
)
indexed_results = dict(
par_param_grid.map(
partial(_quic_path_spark, quic_path=quic_path, X_bc=X_bc)
).collect()
)
this_result = [
indexed_results[idx] for idx in range(len(train_test_grid))
]
X_bc.unpersist()
covs, _, scores = zip(*this_result)
covs = zip(*covs)
scores = zip(*scores)
results.extend(zip(path, scores, covs))
results = sorted(results, key=operator.itemgetter(0), reverse=True)
best_score = -np.inf
last_finite_idx = 0
best_index = 0
for index, (lam, scores, _) in enumerate(results):
scores = [s for s in scores if not np.isinf(s)]
if len(scores) == 0:
this_score = -np.inf
else:
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
if best_index == 0:
lam_1 = results[0][0]
lam_0 = results[1][0]
elif best_index == last_finite_idx and not best_index == len(results) - 1:
lam_1 = results[best_index][0]
lam_0 = results[best_index + 1][0]
elif best_index == len(results) - 1:
lam_1 = results[best_index][0]
lam_0 = 0.01 * results[best_index][0]
else:
lam_1 = results[best_index - 1][0]
lam_0 = results[best_index + 1][0]
if isinstance(self.lams, int):
path = np.logspace(np.log10(lam_1), np.log10(lam_0), self.lams + 2)
path = path[1:-1]
if self.verbose and n_refinements > 1:
print(
"[GraphLassoCV] Done refinement % 2i out of %i: % 3is"
% (rr + 1, n_refinements, time.time() - t0)
)
results = list(zip(*results))
grid_scores_ = list(results[1])
lams = list(results[0])
lams.append(0)
grid_scores_.append(
cross_val_score(EmpiricalCovariance(), X, cv=cv, n_jobs=self.n_jobs)
)
self.grid_scores_ = np.array(grid_scores_)
self.lam_ = self.lam * lams[best_index]
self.cv_lams_ = [self.lam * l for l in lams]
if self.method == "quic":
(
self.precision_,
self.covariance_,
self.opt_,
self.cputime_,
self.iters_,
self.duality_gap_,
) = quic(
self.sample_covariance_,
self.lam_,
mode="default",
tol=self.tol,
max_iter=self.max_iter,
Theta0=self.Theta0,
Sigma0=self.Sigma0,
path=None,
msg=self.verbose,
)
else:
raise NotImplementedError("Only method= has been implemented.")
self.is_fitted_ = True
return self | Fits the GraphLasso covariance model to X.
Closely follows sklearn.covariance.graph_lasso.GraphLassoCV.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate |
1,769 | def com_google_fonts_check_fstype(ttFont):
value = ttFont[].fsType
if value != 0:
FSTYPE_RESTRICTIONS = {
0x0002: ("* The font must not be modified, embedded or exchanged in"
" any manner without first obtaining permission of"
" the legal owner."),
0x0004: ("The font may be embedded, and temporarily loaded on the"
" remote system, but documents that use it must"
" not be editable."),
0x0008: ("The font may be embedded but must only be installed"
" temporarily on other systems."),
0x0100: ("The font may not be subsetted prior to embedding."),
0x0200: ("Only bitmaps contained in the font may be embedded."
" No outline data may be embedded.")
}
restrictions = ""
for bit_mask in FSTYPE_RESTRICTIONS.keys():
if value & bit_mask:
restrictions += FSTYPE_RESTRICTIONS[bit_mask]
if value & 0b1111110011110001:
restrictions += ("* There are reserved bits set,"
" which indicates an invalid setting.")
yield FAIL, ("OS/2 fsType is a legacy DRM-related field.\n"
"In this font it is set to {} meaning that:\n"
"{}\n"
"No such DRM restrictions can be enabled on the"
" Google Fonts collection, so the fsType field"
" must be set to zero (Installable Embedding) instead.\n"
"Fonts with this setting indicate that they may be embedded"
" and permanently installed on the remote system"
" by an application.\n\n"
" More detailed info is available at:\n"
" https://docs.microsoft.com/en-us"
"/typography/opentype/spec/os2
"").format(value, restrictions)
else:
yield PASS, ("OS/2 fsType is properly set to zero.") | Checking OS/2 fsType.
Fonts must have their fsType field set to zero.
This setting is known as Installable Embedding, meaning
that none of the DRM restrictions are enabled on the fonts.
More info available at:
https://docs.microsoft.com/en-us/typography/opentype/spec/os2#fstype |
1,770 | def _parse_hparams(hparams):
prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"]
ret = []
for prefix in prefixes:
ret_dict = {}
for key in hparams.values():
if prefix in key:
par_name = key[len(prefix):]
ret_dict[par_name] = hparams.get(key)
ret.append(ret_dict)
return ret | Split hparams, based on key prefixes.
Args:
hparams: hyperparameters
Returns:
Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer. |
1,771 | def generate(env):
link.generate(env)
if env[] == :
env[] = SCons.Util.CLVar()
env[] =
env[] =
env[] =
| Add Builders and construction variables for gnulink to an Environment. |
1,772 | def get_data_length(self):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError()
if self.inode is not None:
return self.inode.get_data_length()
return self.data_length | A method to get the length of the data that this Directory Record
points to.
Parameters:
None.
Returns:
The length of the data that this Directory Record points to. |
1,773 | def ProbGreater(self, x):
t = [prob for (val, prob) in self.d.iteritems() if val > x]
return sum(t) | Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability |
1,774 | def pack(self, remaining_size):
arguments_count, payload = self.pack_data(remaining_size - self.header_size)
payload_length = len(payload)
if payload_length % 8 != 0:
payload += b"\x00" * (8 - payload_length % 8)
self.header = PartHeader(self.kind, self.attribute, arguments_count, self.bigargumentcount,
payload_length, remaining_size)
hdr = self.header_struct.pack(*self.header)
if pyhdb.tracing:
self.trace_header = humanhexlify(hdr, 30)
self.trace_payload = humanhexlify(payload, 30)
return hdr + payload | Pack data of part into binary format |
1,775 | def decrypt_subtitle(self, subtitle):
return self.decrypt(self._build_encryption_key(int(subtitle.id)),
subtitle[][0].text.decode(),
subtitle[][0].text.decode()) | Decrypt encrypted subtitle data in high level model object
@param crunchyroll.models.Subtitle subtitle
@return str |
1,776 | def clinvar_submission_header(submission_objs, csv_type):
complete_header = {}
custom_header = {}
if csv_type == :
complete_header = CLINVAR_HEADER
else:
complete_header = CASEDATA_HEADER
for header_key, header_value in complete_header.items():
for clinvar_obj in submission_objs:
for key, value in clinvar_obj.items():
if not header_key in custom_header and header_key == key:
custom_header[header_key] = header_value
return custom_header | Determine which fields to include in csv header by checking a list of submission objects
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
csv_type(str) : 'variant_data' or 'case_data'
Returns:
custom_header(dict): A dictionary with the fields required in the csv header. Keys and values are specified in CLINVAR_HEADER and CASEDATA_HEADER |
1,777 | def https_connection(self):
endpoint = self.endpoint
host, remainder = endpoint.split(, 1)
port = remainder
if in remainder:
port, _ = remainder.split(, 1)
conn = HTTPSConnection(
host, int(port),
context=self._get_ssl(self.cacert),
)
path = (
"/model/{}".format(self.uuid)
if self.uuid else ""
)
return conn, self._http_headers(), path | Return an https connection to this Connection's endpoint.
Returns a 3-tuple containing::
1. The :class:`HTTPSConnection` instance
2. Dictionary of auth headers to be used with the connection
3. The root url path (str) to be used for requests. |
1,778 | def add_number_widget(self, ref, x=1, value=1):
if ref not in self.widgets:
widget = widgets.NumberWidget(screen=self, ref=ref, x=x, value=value)
self.widgets[ref] = widget
return self.widgets[ref] | Add Number Widget |
1,779 | def _handle_ticker(self, dtype, data, ts):
self.log.debug("_handle_ticker: %s - %s - %s", dtype, data, ts)
channel_id, *data = data
channel_identifier = self.channel_directory[channel_id]
entry = (data, ts)
self.tickers[channel_identifier].put(entry) | Adds received ticker data to self.tickers dict, filed under its channel
id.
:param dtype:
:param data:
:param ts:
:return: |
1,780 | def singularity_build(script=None, src=None, dest=None, **kwargs):
singularity = SoS_SingularityClient()
singularity.build(script, src, dest, **kwargs)
return 0 | docker build command. By default a script is sent to the docker build command but
you can also specify different parameters defined inu//docker-py.readthedocs.org/en/stable/api/#build |
1,781 | def _normalized_keys(self, section, items):
normalized = {}
for name, val in items:
key = section + "." + _normalize_name(name)
normalized[key] = val
return normalized | Normalizes items to construct a dictionary with normalized keys.
This routine is where the names become keys and are made the same
regardless of source - configuration files or environment. |
1,782 | def _make_publisher(catalog_or_dataset):
level = catalog_or_dataset
keys = [k for k in ["publisher_name", "publisher_mbox"] if k in level]
if keys:
level["publisher"] = {
key.replace("publisher_", ""): level.pop(key) for key in keys
}
return level | De estar presentes las claves necesarias, genera el diccionario
"publisher" a nivel catálogo o dataset. |
1,783 | def transitions_for(self, roles=None, actor=None, anchors=[]):
proxy = self.obj.access_for(roles, actor, anchors)
return {name: transition for name, transition in self.transitions(current=False).items()
if name in proxy} | For use on :class:`~coaster.sqlalchemy.mixins.RoleMixin` classes:
returns currently available transitions for the specified
roles or actor as a dictionary of name: :class:`StateTransitionWrapper`. |
1,784 | def strftime(dt, fmt):
if _illegal_s.search(fmt):
raise TypeError("This strftime implementation does not handle %s")
if dt.year > 1900:
return dt.strftime(fmt)
fmt = fmt.replace(, )\
.replace(, str(dt.year))\
.replace(, .format(dt.year)[-2:])
year = dt.year
delta = 2000 - year
off = 6*(delta // 100 + delta // 400)
year = year + off
year = year + ((2000 - year)//28)*28
timetuple = dt.timetuple()
return time.strftime(fmt, (year,) + timetuple[1:]) | `strftime` implementation working before 1900 |
1,785 | def parse_options_header(value, multiple=False):
if not value:
return "", {}
result = []
value = "," + value.replace("\n", ",")
while value:
match = _option_header_start_mime_type.match(value)
if not match:
break
result.append(match.group(1))
options = {}
rest = match.group(2)
continued_encoding = None
while rest:
optmatch = _option_header_piece_re.match(rest)
if not optmatch:
break
option, count, encoding, language, option_value = optmatch.groups()
if not count:
continued_encoding = None
else:
if not encoding:
encoding = continued_encoding
continued_encoding = encoding
option = unquote_header_value(option)
if option_value is not None:
option_value = unquote_header_value(option_value, option == "filename")
if encoding is not None:
option_value = _unquote(option_value).decode(encoding)
if count:
options[option] = options.get(option, "") + option_value
else:
options[option] = option_value
rest = rest[optmatch.end() :]
result.append(options)
if multiple is False:
return tuple(result)
value = rest
return tuple(result) if result else ("", {}) | Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionchanged:: 0.15
:rfc:`2231` parameter continuations are handled.
.. versionadded:: 0.5
:param value: the header to parse.
:param multiple: Whether try to parse and return multiple MIME types
:return: (mimetype, options) or (mimetype, options, mimetype, options, …)
if multiple=True |
1,786 | def rename_ligand(self,ligand_name,mol_file):
self.universe.ligand = self.universe.select_atoms(ligand_name)
self.universe.ligand.residues.resnames = "LIG"
self.universe.ligand.resname = "LIG"
if mol_file is None:
self.universe.ligand.write("lig.pdb")
os.system("babel -ipdb lig.pdb -omol lig.mol ") | Get an atom selection for the selected from both topology and trajectory. Rename the ligand LIG
to help with ligand names that are not standard, e.g. contain numbers.
Takes:
* ligand_name * - MDAnalysis atom selection for the ligand selected by user
Output:
* self.ligand * - renamed ligand with resname LIG,
* self.ligand_noH * - renamed ligand with resname LIG and without H atoms (these are not
present in the final 2D representation and are therefore excluded from some analysis scripts.) |
1,787 | def _optimize_with_progs(format_module, filename, image_format):
filesize_in = os.stat(filename).st_size
report_stats = None
for func in format_module.PROGRAMS:
if not getattr(Settings, func.__name__):
continue
report_stats = _optimize_image_external(
filename, func, image_format, format_module.OUT_EXT)
filename = report_stats.final_filename
if format_module.BEST_ONLY:
break
if report_stats is not None:
report_stats.bytes_in = filesize_in
else:
report_stats = stats.skip(image_format, filename)
return report_stats | Use the correct optimizing functions in sequence.
And report back statistics. |
1,788 | def push(self, item):
self.server.lpush(self.key, self._encode_item(item)) | Push an item |
1,789 | def get_prep_value(self, value):
if isinstance(value, JSON.JsonDict):
return json.dumps(value, cls=JSON.Encoder)
if isinstance(value, JSON.JsonList):
return value.json_string
if isinstance(value, JSON.JsonString):
return json.dumps(value)
return value | The psycopg adaptor returns Python objects,
but we also have to handle conversion ourselves |
1,790 | def convert_radian(coord, *variables):
if any(v.attrs.get() == for v in variables):
return coord * 180. / np.pi
return coord | Convert the given coordinate from radian to degree
Parameters
----------
coord: xr.Variable
The variable to transform
``*variables``
The variables that are on the same unit.
Returns
-------
xr.Variable
The transformed variable if one of the given `variables` has units in
radian |
1,791 | def _print_foreign_playlist_message(self):
self.operation_mode = self.window_mode = NORMAL_MODE
self.refreshBody()
txt=.format(self._cnf.foreign_filename_only_no_extension,
self._cnf.stations_filename_only_no_extension)
self._show_help(txt, FOREIGN_PLAYLIST_MESSAGE_MODE,
caption = ,
prompt = ,
is_message=True) | reset previous message |
1,792 | def observe(matcher):
@functools.wraps(matcher)
def observer(self, subject, *expected, **kw):
if hasattr(self, ):
self.before(subject, *expected, **kw)
result = matcher(self, subject, *expected, **kw)
if result is not True and hasattr(self, ):
self.after_error(result, subject, *expected, **kw)
if result is True and hasattr(self, ):
self.after_success(subject, *expected, **kw)
if not hasattr(self, ):
self.show_diff = all([
isinstance(subject, six.string_types),
all([isinstance(x, six.string_types) for x in expected]),
])
return result
return observer | Internal decorator to trigger operator hooks before/after
matcher execution. |
1,793 | def trace(
data, name, format=, datarange=(None, None), suffix=, path=, rows=1, columns=1,
num=1, last=True, fontmap = None, verbose=1):
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
standalone = rows == 1 and columns == 1 and num == 1
if standalone:
if verbose > 0:
print_(, name)
figure()
subplot(rows, columns, num)
pyplot(data.tolist())
ylim(datarange)
title( % name, x=0., y=1., ha=, va=,
fontsize=)
tlabels = gca().get_xticklabels()
setp(tlabels, , fontmap[max(rows / 2, 1)])
tlabels = gca().get_yticklabels()
setp(tlabels, , fontmap[max(rows / 2, 1)])
if standalone:
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith():
path +=
savefig("%s%s%s.%s" % (path, name, suffix, format)) | Generates trace plot from an array of data.
:Arguments:
data: array or list
Usually a trace from an MCMC sample.
name: string
The name of the trace.
datarange: tuple or list
Preferred y-range of trace (defaults to (None,None)).
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot. |
1,794 | def formatMessageForBuildResults(self, mode, buildername, buildset, build, master, previous_results, blamelist):
ss_list = buildset[]
results = build[]
ctx = dict(results=build[],
mode=mode,
buildername=buildername,
workername=build[].get(
, ["<unknown>"])[0],
buildset=buildset,
build=build,
projects=self.getProjects(ss_list, master),
previous_results=previous_results,
status_detected=self.getDetectedStatus(
mode, results, previous_results),
build_url=utils.getURLForBuild(
master, build[][], build[]),
buildbot_url=master.config.buildbotURL,
blamelist=blamelist,
summary=self.messageSummary(build, results),
sourcestamps=self.messageSourceStamps(ss_list)
)
yield self.buildAdditionalContext(master, ctx)
msgdict = self.renderMessage(ctx)
return msgdict | Generate a buildbot mail message and return a dictionary
containing the message body, type and subject. |
1,795 | def update_user(resource_root, user):
return call(resource_root.put,
% (USERS_PATH, user.name), ApiUser, data=user) | Update a user.
Replaces the user's details with those provided.
@param resource_root: The root Resource object
@param user: An ApiUser object
@return: An ApiUser object |
1,796 | def Clouds(name=None, deterministic=False, random_state=None):
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return meta.SomeOf((1, 2), children=[
CloudLayer(
intensity_mean=(196, 255), intensity_freq_exponent=(-2.5, -2.0), intensity_coarse_scale=10,
alpha_min=0, alpha_multiplier=(0.25, 0.75), alpha_size_px_max=(2, 8), alpha_freq_exponent=(-2.5, -2.0),
sparsity=(0.8, 1.0), density_multiplier=(0.5, 1.0)
),
CloudLayer(
intensity_mean=(196, 255), intensity_freq_exponent=(-2.0, -1.0), intensity_coarse_scale=10,
alpha_min=0, alpha_multiplier=(0.5, 1.0), alpha_size_px_max=(64, 128), alpha_freq_exponent=(-2.0, -1.0),
sparsity=(1.0, 1.4), density_multiplier=(0.8, 1.5)
)
], random_order=False, name=name, deterministic=deterministic, random_state=random_state) | Augmenter to draw clouds in images.
This is a wrapper around ``CloudLayer``. It executes 1 to 2 layers per image, leading to varying densities
and frequency patterns of clouds.
This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``
and ``960x1280``.
dtype support::
* ``uint8``: yes; tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Parameters of this augmenter are optimized for the value range of uint8.
While other dtypes may be accepted, they will lead to images augmented in
ways inappropriate for the respective dtype.
Parameters
----------
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Clouds()
Creates an augmenter that adds clouds to images. |
1,797 | def points(self, points):
if not isinstance(points, np.ndarray):
raise TypeError()
x = np.unique(points[:,0])
y = np.unique(points[:,1])
z = np.unique(points[:,2])
nx, ny, nz = len(x), len(y), len(z)
dx, dy, dz = np.unique(np.diff(x)), np.unique(np.diff(y)), np.unique(np.diff(z))
ox, oy, oz = np.min(x), np.min(y), np.min(z)
self._from_specs((nx,ny,nz), (dx,dy,dz), (ox,oy,oz))
self.Modified() | set points without copying |
1,798 | def encode(in_bytes):
final_zero = True
out_bytes = []
idx = 0
search_start_idx = 0
for in_char in in_bytes:
if in_char == :
final_zero = True
out_bytes.append(chr(idx - search_start_idx + 1))
out_bytes.append(in_bytes[search_start_idx:idx])
search_start_idx = idx + 1
else:
if idx - search_start_idx == 0xFD:
final_zero = False
out_bytes.append()
out_bytes.append(in_bytes[search_start_idx:idx+1])
search_start_idx = idx + 1
idx += 1
if idx != search_start_idx or final_zero:
out_bytes.append(chr(idx - search_start_idx + 1))
out_bytes.append(in_bytes[search_start_idx:idx])
return .join(out_bytes) | Encode a string using Consistent Overhead Byte Stuffing (COBS).
Input is any byte string. Output is also a byte string.
Encoding guarantees no zero bytes in the output. The output
string will be expanded slightly, by a predictable amount.
An empty string is encoded to '\\x01 |
1,799 | def run(self):
super().run()
if not isinstance(self._executable, Program):
raise ValueError("Please `load` an appropriate executable.")
quil_program = self._executable
trials = quil_program.num_shots
classical_addresses = get_classical_addresses_from_program(quil_program)
if self.noise_model is not None:
quil_program = apply_noise_model(quil_program, self.noise_model)
quil_program = self.augment_program_with_memory_values(quil_program)
try:
self._bitstrings = self.connection._qvm_run(quil_program=quil_program,
classical_addresses=classical_addresses,
trials=trials,
measurement_noise=self.measurement_noise,
gate_noise=self.gate_noise,
random_seed=self.random_seed)[]
except KeyError:
warnings.warn("You are running a QVM program with no MEASURE instructions. "
"The result of this program will always be an empty array. Are "
"you sure you didn't mean to measure some of your qubits?")
self._bitstrings = np.zeros((trials, 0), dtype=np.int64)
return self | Run a Quil program on the QVM multiple times and return the values stored in the
classical registers designated by the classical_addresses parameter.
:return: An array of bitstrings of shape ``(trials, len(classical_addresses))`` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.